From e771aa3f85bcffd939e1e5d4f0f9a95623d829a7 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Tue, 18 Apr 2023 17:16:56 -0400 Subject: [PATCH 01/38] Make thresholds apply to integrations rather than files --- .../common_monitors/dark_monitor.py | 141 ++++++++---------- 1 file changed, 66 insertions(+), 75 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 26d433ed1..8af72af3d 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -402,11 +402,11 @@ def exclude_existing_badpix(self, badpix, pixel_type): new_pixels_y : list List of y coordinates of new bad pixels """ - + if len(badpix[0]) == 0: logging.warning("\tNo new {} pixels to check.".format(pixel_type)) return ([], []) - + logging.info("\tChecking {} potential new {} pixels".format(len(badpix[0]), pixel_type)) if pixel_type not in ['hot', 'dead', 'noisy']: @@ -440,7 +440,7 @@ def exclude_existing_badpix(self, badpix, pixel_type): if len(np.intersect1d(ind_x[0], ind_y[0])) == 0: new_pixels_x.append(x) new_pixels_y.append(y) - + logging.info("\t\tKeeping {} {} pixels".format(len(new_pixels_x), pixel_type)) # pixel = (x, y) # if pixel not in already_found: @@ -810,7 +810,7 @@ def process(self, file_list): # Add new noisy pixels to the database logging.info('\tFound {} new noisy pixels'.format(len(new_noisy_pixels[0]))) self.add_bad_pix(new_noisy_pixels, 'noisy', file_list, mean_slope_file, baseline_file, min_time, mid_time, max_time) - + logging.info("Creating Mean Slope Image {}".format(slope_image)) # Create png file of mean slope image. Add bad pixels only for full frame apertures self.create_mean_slope_figure(slope_image, len(slope_files), hotxy=new_hot_pix, deadxy=new_dead_pix, @@ -937,11 +937,11 @@ def run(self): # If the aperture is not listed in the threshold file, we need # a default if not np.any(match): - file_count_threshold = 30 + integration_count_threshold = 30 logging.warning(('\tAperture {} is not present in the threshold file. Continuing ' 'with the default threshold of 30 files.'.format(aperture))) else: - file_count_threshold = limits['Threshold'][match][0] + integration_count_threshold = limits['Threshold'][match][0] self.aperture = aperture # We need a separate search for each readout pattern @@ -967,80 +967,71 @@ def run(self): logging.info('\tAperture: {}, Readpattern: {}, new entries: {}'.format(self.aperture, self.readpatt, len(new_entries))) - # Check to see if there are enough new files to meet the - # monitor's signal-to-noise requirements - if len(new_entries) >= file_count_threshold: - logging.info('\tMAST query has returned sufficient new dark files for {}, {}, {} to run the dark monitor.' - .format(self.instrument, self.aperture, self.readpatt)) - - # Get full paths to the files - new_filenames = [] - for file_entry in new_entries: - try: - new_filenames.append(filesystem_path(file_entry['filename'])) - except FileNotFoundError: - logging.warning('\t\tUnable to locate {} in filesystem. Not including in processing.' - .format(file_entry['filename'])) - - # In some (unusual) cases, there are files in MAST with the correct aperture name - # but incorrect array sizes. Make sure that the new files all have the expected - # aperture size - temp_filenames = [] - bad_size_filenames = [] - expected_ap = Siaf(instrument)[aperture] - expected_xsize = expected_ap.XSciSize - expected_ysize = expected_ap.YSciSize - for new_file in new_filenames: - with fits.open(new_file) as hdulist: - xsize = hdulist[0].header['SUBSIZE1'] - ysize = hdulist[0].header['SUBSIZE2'] - if xsize == expected_xsize and ysize == expected_ysize: - temp_filenames.append(new_file) - else: - bad_size_filenames.append(new_file) - if len(temp_filenames) != len(new_filenames): - logging.info('\tSome files returned by MAST have unexpected aperture sizes. These files will be ignored: ') - for badfile in bad_size_filenames: - logging.info('\t\t{}'.format(badfile)) - new_filenames = deepcopy(temp_filenames) - - # If it turns out that the monitor doesn't find enough - # of the files returned by the MAST query to meet the threshold, - # then the monitor will not be run - if len(new_filenames) < file_count_threshold: - logging.info(("\tFilesystem search for the files identified by MAST has returned {} files. " - "This is less than the required minimum number of files ({}) necessary to run " - "the monitor. Quitting.").format(len(new_filenames), file_count_threshold)) - monitor_run = False + # Get full paths to the files + new_filenames = [] + for file_entry in new_entries: + try: + new_filenames.append(filesystem_path(file_entry['filename'])) + except FileNotFoundError: + logging.warning('\t\tUnable to locate {} in filesystem. Not including in processing.' + .format(file_entry['filename'])) + + # Generate a count of the total number of integrations across the files. This number will + # be compared to the threshold value to determine if the monitor is run. + # Also, in some (unusual) cases, there are files in MAST with the correct aperture name + # but incorrect array sizes. Make sure that the new files all have the expected + # aperture size + total_integrations = 0 + temp_filenames = [] + bad_size_filenames = [] + expected_ap = Siaf(instrument)[aperture] + expected_xsize = expected_ap.XSciSize + expected_ysize = expected_ap.YSciSize + for new_file in new_filenames: + with fits.open(new_file) as hdulist: + xsize = hdulist[0].header['SUBSIZE1'] + ysize = hdulist[0].header['SUBSIZE2'] + nints = hdulist[0].header['NINTS'] + if xsize == expected_xsize and ysize == expected_ysize: + temp_filenames.append(new_file) + total_integrations += int(nints) else: - logging.info(("\tFilesystem search for the files identified by MAST has returned {} files.") - .format(len(new_filenames))) - monitor_run = True - - if monitor_run: - # Set up directories for the copied data - ensure_dir_exists(os.path.join(self.output_dir, 'data')) - self.data_dir = os.path.join(self.output_dir, - 'data/{}_{}'.format(self.instrument.lower(), - self.aperture.lower())) - ensure_dir_exists(self.data_dir) + bad_size_filenames.append(new_file) + if len(temp_filenames) != len(new_filenames): + logging.info('\tSome files returned by MAST have unexpected aperture sizes. These files will be ignored: ') + for badfile in bad_size_filenames: + logging.info('\t\t{}'.format(badfile)) + new_filenames = deepcopy(temp_filenames) + + # Check to see if there are enough new integrations to meet the + # monitor's signal-to-noise requirements + logging.info((f'\tFilesystem search for new dark integrations for {self.instrument}, {self.aperture}, ' + f'{self.readpatt} has found {total_integrations} in {len(new_filenames)} files.')) + if total_integrations >= integration_count_threshold: + logging.info(f'\tThis meets the threshold of {integration_count_threshold}.') + monitor_run = True + else: + logging.info(f'\tThis is below the threshold of {integration_count_threshold}. Monitor not run.') + monitor_run = False - # Copy files from filesystem - dark_files, not_copied = copy_files(new_filenames, self.data_dir) + if monitor_run: + # Set up directories for the copied data + ensure_dir_exists(os.path.join(self.output_dir, 'data')) + self.data_dir = os.path.join(self.output_dir, + 'data/{}_{}'.format(self.instrument.lower(), + self.aperture.lower())) + ensure_dir_exists(self.data_dir) - logging.info('\tNew_filenames: {}'.format(new_filenames)) - logging.info('\tData dir: {}'.format(self.data_dir)) - logging.info('\tCopied to working dir: {}'.format(dark_files)) - logging.info('\tNot copied: {}'.format(not_copied)) + # Copy files from filesystem + dark_files, not_copied = copy_files(new_filenames, self.data_dir) - # Run the dark monitor - self.process(dark_files) + logging.info('\tNew_filenames: {}'.format(new_filenames)) + logging.info('\tData dir: {}'.format(self.data_dir)) + logging.info('\tCopied to working dir: {}'.format(dark_files)) + logging.info('\tNot copied: {}'.format(not_copied)) - else: - logging.info(('\tDark monitor skipped. MAST query has returned {} new dark files for ' - '{}, {}, {}. {} new files are required to run dark current monitor.') - .format(len(new_entries), instrument, aperture, self.readpatt, file_count_threshold)) - monitor_run = False + # Run the dark monitor + self.process(dark_files) # Update the query history new_entry = {'instrument': instrument, From 8f13466bf24f6973610eb30e087c62c2855c6e0e Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 19 Apr 2023 12:15:43 -0400 Subject: [PATCH 02/38] add notes --- jwql/instrument_monitors/common_monitors/dark_monitor.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 8af72af3d..a8320bc3c 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -735,6 +735,13 @@ def process(self, file_list): try: + + + do we bother switching to use rateints files where available, so that we can create a sigma-clipped mean + slope rather than the basic mean that goes into the rate file? If we are confident in the jump flagging + then it seems like a straight mean might be ok? My concern with rateints files is that the pipeline + might not output them in all cases? + # Read in all slope images and place into a list slope_image_stack, slope_exptimes = pipeline_tools.image_stack(slope_files) From 10cabf2389eed30b6a0058b1d0e16ed7e79068dc Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Fri, 21 Apr 2023 16:59:34 -0400 Subject: [PATCH 03/38] Begin implementing a splitting function to equalize number of integrations per run --- .../common_monitors/dark_monitor.py | 85 ++++++++++++++++--- 1 file changed, 72 insertions(+), 13 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index a8320bc3c..36d014546 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -989,6 +989,7 @@ def run(self): # but incorrect array sizes. Make sure that the new files all have the expected # aperture size total_integrations = 0 + integrations = [] temp_filenames = [] bad_size_filenames = [] expected_ap = Siaf(instrument)[aperture] @@ -1002,6 +1003,7 @@ def run(self): if xsize == expected_xsize and ysize == expected_ysize: temp_filenames.append(new_file) total_integrations += int(nints) + integrations.append(int(nints)) else: bad_size_filenames.append(new_file) if len(temp_filenames) != len(new_filenames): @@ -1015,13 +1017,9 @@ def run(self): logging.info((f'\tFilesystem search for new dark integrations for {self.instrument}, {self.aperture}, ' f'{self.readpatt} has found {total_integrations} in {len(new_filenames)} files.')) if total_integrations >= integration_count_threshold: - logging.info(f'\tThis meets the threshold of {integration_count_threshold}.') + logging.info(f'\tThis meets the threshold of {integration_count_threshold} integrations.') monitor_run = True - else: - logging.info(f'\tThis is below the threshold of {integration_count_threshold}. Monitor not run.') - monitor_run = False - if monitor_run: # Set up directories for the copied data ensure_dir_exists(os.path.join(self.output_dir, 'data')) self.data_dir = os.path.join(self.output_dir, @@ -1029,16 +1027,57 @@ def run(self): self.aperture.lower())) ensure_dir_exists(self.data_dir) - # Copy files from filesystem - dark_files, not_copied = copy_files(new_filenames, self.data_dir) + # Split the list of good files into sub-lists based on the integration + # threshold. The monitor will then be run on each sub-list independently, + # in order to produce results with roughly the same signal-to-noise. This + # also prevents the monitor running on a huge chunk of files in the case + # where it hasn't been run in a while and data have piled up in the meantime. + self.split_files_into_equal_lists(new_filenames, integrations) + The function above needs to create self.new_file_lists, which is a + list of file lists. It also needs to keep track of the starting and + ending time associated with each sub-list, so that the database can + be updated appropriately + + what about the case where the final few files are not enough to trigger + another run of the monitor? do we leave them for next time? or bundle them + into the final sub-list? Probably the latter. + + # Run the monitor once on each list + for new_file_list in self.new_file_lists: + # Copy files from filesystem + dark_files, not_copied = copy_files(new_file_list, self.data_dir) + + logging.info('\tNew_filenames: {}'.format(new_file_list)) + logging.info('\tData dir: {}'.format(self.data_dir)) + logging.info('\tCopied to working dir: {}'.format(dark_files)) + logging.info('\tNot copied: {}'.format(not_copied)) + + # Run the dark monitor + self.process(dark_files) + + # Update the query history + new_entry = {'instrument': instrument, + 'aperture': aperture, + 'readpattern': self.readpatt, + 'start_time_mjd': self.query_start, + 'end_time_mjd': self.query_end, + 'files_found': len(new_entries), + 'run_monitor': monitor_run, + 'entry_date': datetime.datetime.now()} + with engine.begin() as connection: + connection.execute( + self.query_table.__table__.insert(), new_entry) + logging.info('\tUpdated the query history table') + + + + + else: + logging.info(f'\tThis is below the threshold of {integration_count_threshold} integrations. Monitor not run.') + monitor_run = False + - logging.info('\tNew_filenames: {}'.format(new_filenames)) - logging.info('\tData dir: {}'.format(self.data_dir)) - logging.info('\tCopied to working dir: {}'.format(dark_files)) - logging.info('\tNot copied: {}'.format(not_copied)) - # Run the dark monitor - self.process(dark_files) # Update the query history new_entry = {'instrument': instrument, @@ -1128,6 +1167,26 @@ def shift_to_full_frame(self, coords): return (x, y) + def split_files_into_equal_lists(self, files, integration_list, threshold): + """Given a list of filenames and a list of the number of integrations + within each, split the files into sub-lists, where the files in each + list have a total number of integrations that is just over the given + threshold value + + Parameters + ---------- + files : list + List of filenames + + integration_list : list + List of integers describing how many integrations are in each file + + threshold : int + Threshold number of integrations needed to trigger a run of the + dark monitor + """ + pass + def stats_by_amp(self, image, amps): """Calculate statistics in the input image for each amplifier as well as the full image From e237a23bae2a3b3804db0431f6c7d98d42de06a7 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Tue, 25 Apr 2023 14:56:20 -0400 Subject: [PATCH 04/38] subdivide files for monitor by epoch and num of integrations --- .../common_monitors/dark_monitor.py | 196 ++++++++++++++---- jwql/utils/constants.py | 12 ++ 2 files changed, 171 insertions(+), 37 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 36d014546..6d307c566 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -990,6 +990,8 @@ def run(self): # aperture size total_integrations = 0 integrations = [] + starting_times = [] + ending_times = [] temp_filenames = [] bad_size_filenames = [] expected_ap = Siaf(instrument)[aperture] @@ -1004,6 +1006,8 @@ def run(self): temp_filenames.append(new_file) total_integrations += int(nints) integrations.append(int(nints)) + starting_times.append(hdulist[0].header['EXPSTART']) + ending_times.append(hdulist[0].header['EXPEND']) else: bad_size_filenames.append(new_file) if len(temp_filenames) != len(new_filenames): @@ -1032,18 +1036,10 @@ def run(self): # in order to produce results with roughly the same signal-to-noise. This # also prevents the monitor running on a huge chunk of files in the case # where it hasn't been run in a while and data have piled up in the meantime. - self.split_files_into_equal_lists(new_filenames, integrations) - The function above needs to create self.new_file_lists, which is a - list of file lists. It also needs to keep track of the starting and - ending time associated with each sub-list, so that the database can - be updated appropriately - - what about the case where the final few files are not enough to trigger - another run of the monitor? do we leave them for next time? or bundle them - into the final sub-list? Probably the latter. + self.split_files_into_sub_lists(new_filenames, integrations, starting_times, ending_times, integration_count_threshold) # Run the monitor once on each list - for new_file_list in self.new_file_lists: + for new_file_list, batch_start_time, batch_end_time in zip(self.file_batches, self.start_time_batches, self.end_time_batches): # Copy files from filesystem dark_files, not_copied = copy_files(new_file_list, self.data_dir) @@ -1055,43 +1051,36 @@ def run(self): # Run the dark monitor self.process(dark_files) - # Update the query history + # Update the query history once for each group of files new_entry = {'instrument': instrument, 'aperture': aperture, 'readpattern': self.readpatt, - 'start_time_mjd': self.query_start, - 'end_time_mjd': self.query_end, - 'files_found': len(new_entries), + 'start_time_mjd': batch_start_time, + 'end_time_mjd': batch_end_time, + 'files_found': len(dark_files), 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} with engine.begin() as connection: connection.execute( self.query_table.__table__.insert(), new_entry) logging.info('\tUpdated the query history table') - - - - else: logging.info(f'\tThis is below the threshold of {integration_count_threshold} integrations. Monitor not run.') monitor_run = False - - - - # Update the query history - new_entry = {'instrument': instrument, - 'aperture': aperture, - 'readpattern': self.readpatt, - 'start_time_mjd': self.query_start, - 'end_time_mjd': self.query_end, - 'files_found': len(new_entries), - 'run_monitor': monitor_run, - 'entry_date': datetime.datetime.now()} - with engine.begin() as connection: - connection.execute( - self.query_table.__table__.insert(), new_entry) - logging.info('\tUpdated the query history table') + # Update the query history + new_entry = {'instrument': instrument, + 'aperture': aperture, + 'readpattern': self.readpatt, + 'start_time_mjd': self.query_start, + 'end_time_mjd': self.query_end, + 'files_found': len(new_entries), + 'run_monitor': monitor_run, + 'entry_date': datetime.datetime.now()} + with engine.begin() as connection: + connection.execute( + self.query_table.__table__.insert(), new_entry) + logging.info('\tUpdated the query history table') logging.info('Dark Monitor completed successfully.') @@ -1167,11 +1156,24 @@ def shift_to_full_frame(self, coords): return (x, y) - def split_files_into_equal_lists(self, files, integration_list, threshold): + def split_files_into_sub_lists(self, files, start_times, end_times, integration_list, threshold): """Given a list of filenames and a list of the number of integrations within each, split the files into sub-lists, where the files in each list have a total number of integrations that is just over the given - threshold value + threshold value. + + Dark calibration plans per instrument: + NIRCam - for full frame, takes only 2 integrations (150 groups) once per ~30-50 days. + for subarrays, takes 5-10 integrations once per 30-50 days + NIRISS - full frame - 2 exps of 5 ints within each 2 week period. No requirement for + the 2 exps to be taken at the same time though. Could be separated + by almost 2 weeks, and be closer to the darks from the previous or + following 2 week period. + subarrays - 30 ints in each month-long span + MIRI - 2 ints every 2 hours-5 days for a while, then 2 ints every 14-21 days + NIRSpec - full frame 5-6 integrations spread over each month + subarray - 12 ints spread over each 2 month period + FGS - N/A Parameters ---------- @@ -1181,11 +1183,131 @@ def split_files_into_equal_lists(self, files, integration_list, threshold): integration_list : list List of integers describing how many integrations are in each file + start_times : list + List of MJD dates corresponding to the exposure start time of each file in ``files`` + + end_times : list + List of MJD dates corresponding to the exposures end time of each file in ``files`` + + integration_list : list + List of the number of integrations for each file in ``files`` + threshold : int Threshold number of integrations needed to trigger a run of the dark monitor """ - pass + + # Not grouping together data across multiple epochs is probably more + # important than the number of integrations.... + + + + #include a final delta_t value that is the time between the last file and + #the current time. If that value is less than...something...then we assume + #we are in the middle of an epoch of the cal program. In that case, we will + #skip running the monitor on the final batch, as defined below. We can save that + #for a future run, where the final delta_t is long enough that we can assume + #that epoch of the cal program has completed. + + # Eventual return parameters + self.file_batches = [] + self.start_time_batches = [] + self.end_time_batches = [] + self.integration_batches = [] + + # Add the current time onto the end of start_times + start_times = np.append(start_times, Time.now().mjd) + + # Get the delta t between each pair of files. Insert 0 as the initial + # delta_t, to make the coding easier + delta_t = start_times[1:] - start_times[0:-1] # units are days + delta_t = np.insert(delta_t, 0, 0) + + # Divide up the list such that you don't cross large delta t values. We want to measure + # dark current during each "epoch" within a calibration proposal + dividers = np.where(delta_t >= DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME[self.instrument])[0] + + # Add dividers at the beginning index to make the coding easier + dividers = np.insert(dividers, 0, 0) + + # If no epoch boundaries are found, then add a divider at the end, and the entire + # set of files will be treated as a single batch + if len(dividers) == 1: + dividers = np.insert(dividers, len(dividers), len(dividers)) + + # Within each batch, divide up the integrations into multiple batches if the total + # number of integrations are above 2*threshold + for i in range(len(dividers) - 1): + batch_ints = integration_list[dividers[i]:dividers[i+1]] + batch_files = files[dividers[i]:dividers[i+1]] + batch_start_times = start_times[dividers[i]:dividers[i+1]] + batch_end_times = end_times[dividers[i]:dividers[i+1]] + batch_int_sum = np.sum(batch_ints) + + # Calculate how many subgroups to break up the batch into, + # based on the threshold, and under the assumption that we + # don't want to skip running on any of the files. + n_subgroups = int(batch_int_sum / threshold) + + if n_subgroups == 0: + # Here, we are in a batch where the total number of integrations + # is less than the treshold (but the batch was identified due to + # the gaps in time before and after the batch.) In this case, we'll + # run the monitor with fewer than the threshold number of integrations + self.file_batches.append(batch_files) + self.start_time_batches.append(batch_start_times) + self.end_time_batches.append(batch_end_times) + self.integration_batches.append(batch_ints) + if n_subgroups == 1: + # Here there are not enough integrations to split the batch into + # more than one subgroup + self.file_batches.append(batch_files) + self.start_time_batches.append(batch_start_times) + self.end_time_batches.append(batch_end_times) + self.integration_batches.append(batch_ints) + + elif n_subgroups > 1: + # Here there are enough integrations to break the batch up + # into more than one subgroup. We can't split within a file, + # so we split after the file that gets the total number of + # integrations above the threshold. + + # Calculate the total number of integrations up to each file + batch_int_sums = np.array([ np.sum(batch_ints[0:jj]) for jj in range(1, len(batch_ints)) ]) + + base = 0 + startidx = 0 + endidx = 0 + complete = False + for batchnum in range(len(n_subgroups)): + endidx = np.where(batch_int_sums >= (base + threshold))[0] + + # Check if we reach the end of the file list + if len(endidx) == 0: + endidx = len(batch_int_sum) + complete = True + else: + endidx = endidx[0] + + subgroup_ints = batch_ints[startidx: endidx] + subgroup_files = batch_files[startidx: endidx] + subgroup_start_times = batch_start_times[startidx: endidx] + subgroup_end_times = batch_end_times[startidx: endidx] + subgroup_int_sum = np.sum(subgroup_ints) + + # Add to output lists + self.file_batches.append(subgroup_files) + self.start_time_batches.append(subgroup_start_times) + self.end_time_batches.append(subgroup_end_times) + self.integration_batches.append(subgroup_ints) + + if not complete: + startidx = deepcopy(endidx) + base = batch_int_sums[endidx - 1] + else: + # If we reach the end of the list before the expected number of + # subgroups, then we quit. + break def stats_by_amp(self, image, amps): """Calculate statistics in the input image for each amplifier as diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index 0e0a9cd69..b9eb705a8 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -159,6 +159,18 @@ # Types of potential bad pixels identified by the dark current monitor DARK_MONITOR_BADPIX_TYPES = ['hot', 'dead', 'noisy'] +# Minimum amount of time, in days, between epochs of dark current observations. If the +# dark monitor sees this much time, or longer, between two dark current files, it assumes +# that the two files are part of separate epochs. This means the monitor will run separately +# on these files, rather than bundling them together into a batch, where they would have +# been combined into a mean dark rate +DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME = {'nircam': 10., + 'niriss': 10., + 'miri': 10., + 'nirspec': 10., + 'fgs': 10. + } + # Maximum number of potential new bad pixels to overplot on the dark monitor # mean dark image plot. Too many overplotted points starts to obscure the image # itself, and are most likely not really new bad pixels From 1f26d3a9006f9a5c2f17edf592a9fa67ffe95fc2 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Tue, 25 Apr 2023 16:55:39 -0400 Subject: [PATCH 05/38] Add number of skipped integs to dark threshold file --- .../common_monitors/dark_monitor.py | 33 +- .../dark_monitor_file_thresholds.txt | 1248 ++++++++--------- jwql/instrument_monitors/pipeline_tools.py | 15 +- jwql/utils/constants.py | 2 +- 4 files changed, 666 insertions(+), 632 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 6d307c566..252773b79 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -701,10 +701,15 @@ def process(self, file_list): logging.info('\tWorking on file: {}'.format(filename)) + + need to deal with rateints files here + rate_file = filename.replace("dark", "rate") rate_file_name = os.path.basename(rate_file) local_rate_file = os.path.join(self.data_dir, rate_file_name) + + if os.path.isfile(local_rate_file): logging.info("\t\tFile {} exists, skipping pipeline".format(local_rate_file)) slope_files.append(local_rate_file) @@ -712,9 +717,17 @@ def process(self, file_list): logging.info("\t\tAdding {} to calibration set".format(filename)) pipeline_files.append(filename) - outputs = run_parallel_pipeline(pipeline_files, "dark", "rate", self.instrument) + # For MIRI, save the rateints files. For other instruments save the rate files. + if self.instrument == 'miri': + output_suffix = 'rateints' + else: + output_suffix = 'rate' + + # For other instruments, just save the rate files + outputs = run_parallel_pipeline(pipeline_files, "dark", [output_suffix], self.instrument) + for filename in file_list: - processed_file = filename.replace("_dark", "_rate") + processed_file = filename.replace("_dark", f"_{output_suffix}") if processed_file not in slope_files and os.path.isfile(processed_file): slope_files.append(processed_file) os.remove(filename) @@ -742,8 +755,16 @@ def process(self, file_list): then it seems like a straight mean might be ok? My concern with rateints files is that the pipeline might not output them in all cases? - # Read in all slope images and place into a list - slope_image_stack, slope_exptimes = pipeline_tools.image_stack(slope_files) + + for MIRI, we want rateints files, and we want to throw out the first int of each one before creating mean slope images + + + + + + # Read in all slope images and create a stack of ints (from rateints files) + # or mean ints (from rate files) + slope_image_stack, slope_exptimes = pipeline_tools.image_stack(slope_files, skipped_initial_ints=) # Calculate a mean slope image from the inputs slope_image, stdev_image = calculations.mean_image(slope_image_stack, sigma_threshold=3) @@ -1165,12 +1186,16 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ Dark calibration plans per instrument: NIRCam - for full frame, takes only 2 integrations (150 groups) once per ~30-50 days. for subarrays, takes 5-10 integrations once per 30-50 days + team response - NIRISS - full frame - 2 exps of 5 ints within each 2 week period. No requirement for the 2 exps to be taken at the same time though. Could be separated by almost 2 weeks, and be closer to the darks from the previous or following 2 week period. subarrays - 30 ints in each month-long span MIRI - 2 ints every 2 hours-5 days for a while, then 2 ints every 14-21 days + team response - monitor should run on each exp separately. It should also throw out + the first integration of each exp. + NIRSpec - full frame 5-6 integrations spread over each month subarray - 12 ints spread over each 2 month period FGS - N/A diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt b/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt index d423bbbdb..bf6bdc34a 100644 --- a/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt +++ b/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt @@ -1,624 +1,624 @@ -Instrument Aperture Threshold -nircam NRCA1_FULL_OSS 10 -nircam NRCA2_FULL_OSS 10 -nircam NRCA3_FULL_OSS 10 -nircam NRCA4_FULL_OSS 10 -nircam NRCA5_FULL_OSS 10 -nircam NRCB1_FULL_OSS 10 -nircam NRCB2_FULL_OSS 10 -nircam NRCB3_FULL_OSS 10 -nircam NRCB4_FULL_OSS 10 -nircam NRCB5_FULL_OSS 10 -nircam NRCALL_FULL 10 -nircam NRCAS_FULL 10 -nircam NRCA1_FULL 10 -nircam NRCA2_FULL 10 -nircam NRCA3_FULL 10 -nircam NRCA4_FULL 10 -nircam NRCA5_FULL 10 -nircam NRCBS_FULL 10 -nircam NRCB1_FULL 10 -nircam NRCB2_FULL 10 -nircam NRCB3_FULL 10 -nircam NRCB4_FULL 10 -nircam NRCB5_FULL 10 -nircam NRCB1_FULLP 10 -nircam NRCB5_FULLP 10 -nircam NRCA1_SUB160 30 -nircam NRCA2_SUB160 30 -nircam NRCA3_SUB160 30 -nircam NRCA4_SUB160 30 -nircam NRCA5_SUB160 30 -nircam NRCB1_SUB160 30 -nircam NRCB2_SUB160 30 -nircam NRCB3_SUB160 30 -nircam NRCB4_SUB160 30 -nircam NRCB5_SUB160 30 -nircam NRCA1_SUB320 30 -nircam NRCA2_SUB320 30 -nircam NRCA3_SUB320 30 -nircam NRCA4_SUB320 30 -nircam NRCA5_SUB320 30 -nircam NRCB1_SUB320 30 -nircam NRCB2_SUB320 30 -nircam NRCB3_SUB320 30 -nircam NRCB4_SUB320 30 -nircam NRCB5_SUB320 30 -nircam NRCA1_SUB640 30 -nircam NRCA2_SUB640 30 -nircam NRCA3_SUB640 30 -nircam NRCA4_SUB640 30 -nircam NRCA5_SUB640 30 -nircam NRCB1_SUB640 30 -nircam NRCB2_SUB640 30 -nircam NRCB3_SUB640 30 -nircam NRCB4_SUB640 30 -nircam NRCB5_SUB640 30 -nircam NRCA5_GRISM256_F322W2 30 -nircam NRCA5_GRISM128_F322W2 30 -nircam NRCA5_GRISM64_F322W2 30 -nircam NRCA5_GRISM256_F277W 30 -nircam NRCA5_GRISM128_F277W 30 -nircam NRCA5_GRISM64_F277W 30 -nircam NRCA5_GRISM256_F356W 30 -nircam NRCA5_GRISM128_F356W 30 -nircam NRCA5_GRISM64_F356W 30 -nircam NRCA5_GRISM256_F444W 30 -nircam NRCA5_GRISM128_F444W 30 -nircam NRCA5_GRISM64_F444W 30 -nircam NRCA5_GRISM_F322W2 30 -nircam NRCA5_GRISM_F277W 30 -nircam NRCA5_GRISM_F356W 30 -nircam NRCA5_GRISM_F444W 30 -nircam NRCA1_GRISMTS 30 -nircam NRCA1_GRISMTS256 30 -nircam NRCA1_GRISMTS128 30 -nircam NRCA1_GRISMTS64 30 -nircam NRCA3_GRISMTS 30 -nircam NRCA3_GRISMTS256 30 -nircam NRCA3_GRISMTS128 30 -nircam NRCA3_GRISMTS64 30 -nircam NRCA5_TAGRISMTS32 30 -nircam NRCA5_TAGRISMTS32_F405N 30 -nircam NRCA5_TAGRISMTS_SCI_F322W2 30 -nircam NRCA5_TAGRISMTS_SCI_F444W 30 -nircam NRCA3_DHSPIL 30 -nircam NRCA3_DHSPIL_SUB96 30 -nircam NRCA3_DHSPIL_WEDGES 30 -nircam NRCB4_DHSPIL 30 -nircam NRCB4_DHSPIL_SUB96 30 -nircam NRCB4_DHSPIL_WEDGES 30 -nircam NRCA3_FP1 30 -nircam NRCA3_FP1_SUB8 30 -nircam NRCA3_FP1_SUB64 30 -nircam NRCA3_FP2MIMF 30 -nircam NRCA1_FP3MIMF 30 -nircam NRCA2_FP4MIMF 30 -nircam NRCA4_FP5MIMF 30 -nircam NRCB4_FP1 30 -nircam NRCB4_FP1_SUB8 30 -nircam NRCB4_FP1_SUB64 30 -nircam NRCB4_FP2MIMF 30 -nircam NRCB2_FP3MIMF 30 -nircam NRCB1_FP4MIMF 30 -nircam NRCB3_FP5MIMF 30 -nircam NRCA3_SUB64P 30 -nircam NRCA3_SUB160P 30 -nircam NRCA3_SUB400P 30 -nircam NRCA5_SUB64P 30 -nircam NRCA5_SUB160P 30 -nircam NRCA5_SUB400P 30 -nircam NRCB1_SUB64P 30 -nircam NRCB1_SUB160P 30 -nircam NRCB1_SUB400P 30 -nircam NRCB5_SUB64P 30 -nircam NRCB5_SUB160P 30 -nircam NRCB5_SUB400P 30 -nircam NRCB5_TAPSIMG32 30 -nircam NRCA5_GRISMC_WFSS 30 -nircam NRCA5_GRISMR_WFSS 30 -nircam NRCALL_GRISMC_WFSS 30 -nircam NRCALL_GRISMR_WFSS 30 -nircam NRCB5_GRISMC_WFSS 30 -nircam NRCB5_GRISMR_WFSS 30 -nircam NRCA2_MASK210R 30 -nircam NRCA5_MASK335R 30 -nircam NRCA5_MASK430R 30 -nircam NRCA4_MASKSWB 30 -nircam NRCA5_MASKLWB 30 -nircam NRCA2_TAMASK210R 30 -nircam NRCA5_TAMASK335R 30 -nircam NRCA5_TAMASK430R 30 -nircam NRCA4_TAMASKSWB 30 -nircam NRCA5_TAMASKLWB 30 -nircam NRCA5_TAMASKLWBL 30 -nircam NRCA4_TAMASKSWBS 30 -nircam NRCB1_MASK210R 30 -nircam NRCB5_MASK335R 30 -nircam NRCB5_MASK430R 30 -nircam NRCB3_MASKSWB 30 -nircam NRCB5_MASKLWB 30 -nircam NRCB1_TAMASK210R 30 -nircam NRCB5_TAMASK335R 30 -nircam NRCB5_TAMASK430R 30 -nircam NRCB3_TAMASKSWB 30 -nircam NRCB5_TAMASKLWB 30 -nircam NRCB5_TAMASKLWBL 30 -nircam NRCB3_TAMASKSWBS 30 -nircam NRCA2_FSTAMASK210R 30 -nircam NRCA4_FSTAMASKSWB 30 -nircam NRCA5_FSTAMASKLWB 30 -nircam NRCA5_FSTAMASK335R 30 -nircam NRCA5_FSTAMASK430R 30 -nircam NRCA4_MASKSWB_F182M 30 -nircam NRCA4_MASKSWB_F187N 30 -nircam NRCA4_MASKSWB_F210M 30 -nircam NRCA4_MASKSWB_F212N 30 -nircam NRCA4_MASKSWB_F200W 30 -nircam NRCA4_MASKSWB_NARROW 30 -nircam NRCA5_MASKLWB_F250M 30 -nircam NRCA5_MASKLWB_F300M 30 -nircam NRCA5_MASKLWB_F277W 30 -nircam NRCA5_MASKLWB_F335M 30 -nircam NRCA5_MASKLWB_F360M 30 -nircam NRCA5_MASKLWB_F356W 30 -nircam NRCA5_MASKLWB_F410M 30 -nircam NRCA5_MASKLWB_F430M 30 -nircam NRCA5_MASKLWB_F460M 30 -nircam NRCA5_MASKLWB_F480M 30 -nircam NRCA5_MASKLWB_F444W 30 -nircam NRCA5_MASKLWB_NARROW 30 -nircam NRCA2_FULL_MASK210R 10 -nircam NRCA5_FULL_MASK335R 10 -nircam NRCA5_FULL_MASK430R 10 -nircam NRCA4_FULL_MASKSWB 10 -nircam NRCA4_FULL_MASKSWB_F182M 10 -nircam NRCA4_FULL_MASKSWB_F187N 10 -nircam NRCA4_FULL_MASKSWB_F210M 10 -nircam NRCA4_FULL_MASKSWB_F212N 10 -nircam NRCA4_FULL_MASKSWB_F200W 10 -nircam NRCA5_FULL_MASKLWB 10 -nircam NRCA5_FULL_MASKLWB_F250M 10 -nircam NRCA5_FULL_MASKLWB_F300M 10 -nircam NRCA5_FULL_MASKLWB_F277W 10 -nircam NRCA5_FULL_MASKLWB_F335M 10 -nircam NRCA5_FULL_MASKLWB_F360M 10 -nircam NRCA5_FULL_MASKLWB_F356W 10 -nircam NRCA5_FULL_MASKLWB_F410M 10 -nircam NRCA5_FULL_MASKLWB_F430M 10 -nircam NRCA5_FULL_MASKLWB_F460M 10 -nircam NRCA5_FULL_MASKLWB_F480M 10 -nircam NRCA5_FULL_MASKLWB_F444W 10 -nircam NRCA2_FULL_WEDGE_RND 10 -nircam NRCA4_FULL_WEDGE_BAR 10 -nircam NRCA5_FULL_WEDGE_RND 10 -nircam NRCA5_FULL_WEDGE_BAR 10 -nircam NRCA2_FULL_TAMASK210R 10 -nircam NRCA5_FULL_TAMASK335R 10 -nircam NRCA5_FULL_TAMASK430R 10 -nircam NRCA4_FULL_TAMASKSWB 10 -nircam NRCA5_FULL_TAMASKLWB 10 -nircam NRCA5_FULL_TAMASKLWBL 10 -nircam NRCA4_FULL_TAMASKSWBS 10 -nircam NRCA2_FULL_FSTAMASK210R 10 -nircam NRCA4_FULL_FSTAMASKSWB 10 -nircam NRCA5_FULL_FSTAMASKLWB 10 -nircam NRCA5_FULL_FSTAMASK335R 10 -nircam NRCA5_FULL_FSTAMASK430R 10 -niriss NIS_CEN_OSS 10 -niriss NIS_CEN 10 -niriss NIS_AMI1 30 -niriss NIS_AMI2 30 -niriss NIS_AMI3 30 -niriss NIS_AMI4 30 -niriss NIS_AMITA 30 -niriss NIS_SOSSTA 30 -niriss NIS_WFSS_OFFSET 30 -niriss NIS_WFSS64 30 -niriss NIS_WFSS64R 30 -niriss NIS_WFSS64R3 30 -niriss NIS_WFSS64C 30 -niriss NIS_WFSS64C3 30 -niriss NIS_WFSS128 30 -niriss NIS_WFSS128R 30 -niriss NIS_WFSS128R3 30 -niriss NIS_WFSS128C 30 -niriss NIS_WFSS128C3 30 -niriss NIS_SUB64 30 -niriss NIS_SUB128 30 -niriss NIS_SUB256 30 -niriss NIS_SUBAMPCAL 30 -niriss NIS_SUBSTRIP96 30 -niriss NIS_SUBSTRIP256 30 -niriss NIS_FP1MIMF 30 -niriss NIS_FP2MIMF 30 -niriss NIS_FP3MIMF 30 -niriss NIS_FP4MIMF 30 -niriss NIS_FP5MIMF 30 -niriss NIS_AMIFULL 10 -niriss NIS_SOSSFULL 10 -niriss NIS_WFSS 10 -miri MIRIM_FULL_OSS 10 -miri MIRIM_FULL 10 -miri MIRIM_ILLUM 30 -miri MIRIM_BRIGHTSKY 30 -miri MIRIM_SUB256 30 -miri MIRIM_SUB128 30 -miri MIRIM_SUB64 30 -miri MIRIM_SLITLESSPRISM 30 -miri MIRIM_SLITLESSUPPER 30 -miri MIRIM_SLITLESSLOWER 30 -miri MIRIM_MASK1065 30 -miri MIRIM_MASK1140 30 -miri MIRIM_MASK1550 30 -miri MIRIM_MASKLYOT 30 -miri MIRIM_TAMRS 30 -miri MIRIM_TALRS 30 -miri MIRIM_TABLOCK 30 -miri MIRIM_TALYOT_UL 30 -miri MIRIM_TALYOT_UR 30 -miri MIRIM_TALYOT_LL 30 -miri MIRIM_TALYOT_LR 30 -miri MIRIM_TALYOT_CUL 30 -miri MIRIM_TALYOT_CUR 30 -miri MIRIM_TALYOT_CLL 30 -miri MIRIM_TALYOT_CLR 30 -miri MIRIM_TA1550_UL 30 -miri MIRIM_TA1550_UR 30 -miri MIRIM_TA1550_LL 30 -miri MIRIM_TA1550_LR 30 -miri MIRIM_TA1550_CUL 30 -miri MIRIM_TA1550_CUR 30 -miri MIRIM_TA1550_CLL 30 -miri MIRIM_TA1550_CLR 30 -miri MIRIM_TA1140_UL 30 -miri MIRIM_TA1140_UR 30 -miri MIRIM_TA1140_LL 30 -miri MIRIM_TA1140_LR 30 -miri MIRIM_TA1140_CUL 30 -miri MIRIM_TA1140_CUR 30 -miri MIRIM_TA1140_CLL 30 -miri MIRIM_TA1140_CLR 30 -miri MIRIM_TA1065_UL 30 -miri MIRIM_TA1065_UR 30 -miri MIRIM_TA1065_LL 30 -miri MIRIM_TA1065_LR 30 -miri MIRIM_TA1065_CUL 30 -miri MIRIM_TA1065_CUR 30 -miri MIRIM_TA1065_CLL 30 -miri MIRIM_TA1065_CLR 30 -miri MIRIM_TAFULL 10 -miri MIRIM_TAILLUM 30 -miri MIRIM_TABRIGHTSKY 30 -miri MIRIM_TASUB256 30 -miri MIRIM_TASUB128 30 -miri MIRIM_TASUB64 30 -miri MIRIM_TASLITLESSPRISM 30 -miri MIRIM_CORON1065 30 -miri MIRIM_CORON1140 30 -miri MIRIM_CORON1550 30 -miri MIRIM_CORONLYOT 30 -miri MIRIM_KNIFE 30 -miri MIRIM_FP1MIMF 30 -miri MIRIM_FP2MIMF 30 -miri MIRIM_FP3MIMF 30 -miri MIRIM_FP4MIMF 30 -miri MIRIM_FP5MIMF 30 -miri MIRIM_SLIT 30 -miri MIRIFU_CHANNEL1A 30 -miri MIRIFU_1ASLICE01 30 -miri MIRIFU_1ASLICE02 30 -miri MIRIFU_1ASLICE03 30 -miri MIRIFU_1ASLICE04 30 -miri MIRIFU_1ASLICE05 30 -miri MIRIFU_1ASLICE06 30 -miri MIRIFU_1ASLICE07 30 -miri MIRIFU_1ASLICE08 30 -miri MIRIFU_1ASLICE09 30 -miri MIRIFU_1ASLICE10 30 -miri MIRIFU_1ASLICE11 30 -miri MIRIFU_1ASLICE12 30 -miri MIRIFU_1ASLICE13 30 -miri MIRIFU_1ASLICE14 30 -miri MIRIFU_1ASLICE15 30 -miri MIRIFU_1ASLICE16 30 -miri MIRIFU_1ASLICE17 30 -miri MIRIFU_1ASLICE18 30 -miri MIRIFU_1ASLICE19 30 -miri MIRIFU_1ASLICE20 30 -miri MIRIFU_1ASLICE21 30 -miri MIRIFU_CHANNEL1B 30 -miri MIRIFU_1BSLICE01 30 -miri MIRIFU_1BSLICE02 30 -miri MIRIFU_1BSLICE03 30 -miri MIRIFU_1BSLICE04 30 -miri MIRIFU_1BSLICE05 30 -miri MIRIFU_1BSLICE06 30 -miri MIRIFU_1BSLICE07 30 -miri MIRIFU_1BSLICE08 30 -miri MIRIFU_1BSLICE09 30 -miri MIRIFU_1BSLICE10 30 -miri MIRIFU_1BSLICE11 30 -miri MIRIFU_1BSLICE12 30 -miri MIRIFU_1BSLICE13 30 -miri MIRIFU_1BSLICE14 30 -miri MIRIFU_1BSLICE15 30 -miri MIRIFU_1BSLICE16 30 -miri MIRIFU_1BSLICE17 30 -miri MIRIFU_1BSLICE18 30 -miri MIRIFU_1BSLICE19 30 -miri MIRIFU_1BSLICE20 30 -miri MIRIFU_1BSLICE21 30 -miri MIRIFU_CHANNEL1C 30 -miri MIRIFU_1CSLICE01 30 -miri MIRIFU_1CSLICE02 30 -miri MIRIFU_1CSLICE03 30 -miri MIRIFU_1CSLICE04 30 -miri MIRIFU_1CSLICE05 30 -miri MIRIFU_1CSLICE06 30 -miri MIRIFU_1CSLICE07 30 -miri MIRIFU_1CSLICE08 30 -miri MIRIFU_1CSLICE09 30 -miri MIRIFU_1CSLICE10 30 -miri MIRIFU_1CSLICE11 30 -miri MIRIFU_1CSLICE12 30 -miri MIRIFU_1CSLICE13 30 -miri MIRIFU_1CSLICE14 30 -miri MIRIFU_1CSLICE15 30 -miri MIRIFU_1CSLICE16 30 -miri MIRIFU_1CSLICE17 30 -miri MIRIFU_1CSLICE18 30 -miri MIRIFU_1CSLICE19 30 -miri MIRIFU_1CSLICE20 30 -miri MIRIFU_1CSLICE21 30 -miri MIRIFU_CHANNEL2A 30 -miri MIRIFU_2ASLICE01 30 -miri MIRIFU_2ASLICE02 30 -miri MIRIFU_2ASLICE03 30 -miri MIRIFU_2ASLICE04 30 -miri MIRIFU_2ASLICE05 30 -miri MIRIFU_2ASLICE06 30 -miri MIRIFU_2ASLICE07 30 -miri MIRIFU_2ASLICE08 30 -miri MIRIFU_2ASLICE09 30 -miri MIRIFU_2ASLICE10 30 -miri MIRIFU_2ASLICE11 30 -miri MIRIFU_2ASLICE12 30 -miri MIRIFU_2ASLICE13 30 -miri MIRIFU_2ASLICE14 30 -miri MIRIFU_2ASLICE15 30 -miri MIRIFU_2ASLICE16 30 -miri MIRIFU_2ASLICE17 30 -miri MIRIFU_CHANNEL2B 30 -miri MIRIFU_2BSLICE01 30 -miri MIRIFU_2BSLICE02 30 -miri MIRIFU_2BSLICE03 30 -miri MIRIFU_2BSLICE04 30 -miri MIRIFU_2BSLICE05 30 -miri MIRIFU_2BSLICE06 30 -miri MIRIFU_2BSLICE07 30 -miri MIRIFU_2BSLICE08 30 -miri MIRIFU_2BSLICE09 30 -miri MIRIFU_2BSLICE10 30 -miri MIRIFU_2BSLICE11 30 -miri MIRIFU_2BSLICE12 30 -miri MIRIFU_2BSLICE13 30 -miri MIRIFU_2BSLICE14 30 -miri MIRIFU_2BSLICE15 30 -miri MIRIFU_2BSLICE16 30 -miri MIRIFU_2BSLICE17 30 -miri MIRIFU_CHANNEL2C 30 -miri MIRIFU_2CSLICE01 30 -miri MIRIFU_2CSLICE02 30 -miri MIRIFU_2CSLICE03 30 -miri MIRIFU_2CSLICE04 30 -miri MIRIFU_2CSLICE05 30 -miri MIRIFU_2CSLICE06 30 -miri MIRIFU_2CSLICE07 30 -miri MIRIFU_2CSLICE08 30 -miri MIRIFU_2CSLICE09 30 -miri MIRIFU_2CSLICE10 30 -miri MIRIFU_2CSLICE11 30 -miri MIRIFU_2CSLICE12 30 -miri MIRIFU_2CSLICE13 30 -miri MIRIFU_2CSLICE14 30 -miri MIRIFU_2CSLICE15 30 -miri MIRIFU_2CSLICE16 30 -miri MIRIFU_2CSLICE17 30 -miri MIRIFU_CHANNEL3A 30 -miri MIRIFU_3ASLICE01 30 -miri MIRIFU_3ASLICE02 30 -miri MIRIFU_3ASLICE03 30 -miri MIRIFU_3ASLICE04 30 -miri MIRIFU_3ASLICE05 30 -miri MIRIFU_3ASLICE06 30 -miri MIRIFU_3ASLICE07 30 -miri MIRIFU_3ASLICE08 30 -miri MIRIFU_3ASLICE09 30 -miri MIRIFU_3ASLICE10 30 -miri MIRIFU_3ASLICE11 30 -miri MIRIFU_3ASLICE12 30 -miri MIRIFU_3ASLICE13 30 -miri MIRIFU_3ASLICE14 30 -miri MIRIFU_3ASLICE15 30 -miri MIRIFU_3ASLICE16 30 -miri MIRIFU_CHANNEL3B 30 -miri MIRIFU_3BSLICE01 30 -miri MIRIFU_3BSLICE02 30 -miri MIRIFU_3BSLICE03 30 -miri MIRIFU_3BSLICE04 30 -miri MIRIFU_3BSLICE05 30 -miri MIRIFU_3BSLICE06 30 -miri MIRIFU_3BSLICE07 30 -miri MIRIFU_3BSLICE08 30 -miri MIRIFU_3BSLICE09 30 -miri MIRIFU_3BSLICE10 30 -miri MIRIFU_3BSLICE11 30 -miri MIRIFU_3BSLICE12 30 -miri MIRIFU_3BSLICE13 30 -miri MIRIFU_3BSLICE14 30 -miri MIRIFU_3BSLICE15 30 -miri MIRIFU_3BSLICE16 30 -miri MIRIFU_CHANNEL3C 30 -miri MIRIFU_3CSLICE01 30 -miri MIRIFU_3CSLICE02 30 -miri MIRIFU_3CSLICE03 30 -miri MIRIFU_3CSLICE04 30 -miri MIRIFU_3CSLICE05 30 -miri MIRIFU_3CSLICE06 30 -miri MIRIFU_3CSLICE07 30 -miri MIRIFU_3CSLICE08 30 -miri MIRIFU_3CSLICE09 30 -miri MIRIFU_3CSLICE10 30 -miri MIRIFU_3CSLICE11 30 -miri MIRIFU_3CSLICE12 30 -miri MIRIFU_3CSLICE13 30 -miri MIRIFU_3CSLICE14 30 -miri MIRIFU_3CSLICE15 30 -miri MIRIFU_3CSLICE16 30 -miri MIRIFU_CHANNEL4A 30 -miri MIRIFU_4ASLICE01 30 -miri MIRIFU_4ASLICE02 30 -miri MIRIFU_4ASLICE03 30 -miri MIRIFU_4ASLICE04 30 -miri MIRIFU_4ASLICE05 30 -miri MIRIFU_4ASLICE06 30 -miri MIRIFU_4ASLICE07 30 -miri MIRIFU_4ASLICE08 30 -miri MIRIFU_4ASLICE09 30 -miri MIRIFU_4ASLICE10 30 -miri MIRIFU_4ASLICE11 30 -miri MIRIFU_4ASLICE12 30 -miri MIRIFU_CHANNEL4B 30 -miri MIRIFU_4BSLICE01 30 -miri MIRIFU_4BSLICE02 30 -miri MIRIFU_4BSLICE03 30 -miri MIRIFU_4BSLICE04 30 -miri MIRIFU_4BSLICE05 30 -miri MIRIFU_4BSLICE06 30 -miri MIRIFU_4BSLICE07 30 -miri MIRIFU_4BSLICE08 30 -miri MIRIFU_4BSLICE09 30 -miri MIRIFU_4BSLICE10 30 -miri MIRIFU_4BSLICE11 30 -miri MIRIFU_4BSLICE12 30 -miri MIRIFU_CHANNEL4C 30 -miri MIRIFU_4CSLICE01 30 -miri MIRIFU_4CSLICE02 30 -miri MIRIFU_4CSLICE03 30 -miri MIRIFU_4CSLICE04 30 -miri MIRIFU_4CSLICE05 30 -miri MIRIFU_4CSLICE06 30 -miri MIRIFU_4CSLICE07 30 -miri MIRIFU_4CSLICE08 30 -miri MIRIFU_4CSLICE09 30 -miri MIRIFU_4CSLICE10 30 -miri MIRIFU_4CSLICE11 30 -miri MIRIFU_4CSLICE12 30 -nirspec NRS1_FULL_OSS 10 -nirspec NRS1_FULL 10 -nirspec NRS2_FULL_OSS 10 -nirspec NRS2_FULL 10 -nirspec NRS_S200A1_SLIT 30 -nirspec NRS_S200A2_SLIT 30 -nirspec NRS_S400A1_SLIT 30 -nirspec NRS_S1600A1_SLIT 30 -nirspec NRS_S200B1_SLIT 30 -nirspec NRS_FULL_IFU 10 -nirspec NRS_IFU_SLICE00 30 -nirspec NRS_IFU_SLICE01 30 -nirspec NRS_IFU_SLICE02 30 -nirspec NRS_IFU_SLICE03 30 -nirspec NRS_IFU_SLICE04 30 -nirspec NRS_IFU_SLICE05 30 -nirspec NRS_IFU_SLICE06 30 -nirspec NRS_IFU_SLICE07 30 -nirspec NRS_IFU_SLICE08 30 -nirspec NRS_IFU_SLICE09 30 -nirspec NRS_IFU_SLICE10 30 -nirspec NRS_IFU_SLICE11 30 -nirspec NRS_IFU_SLICE12 30 -nirspec NRS_IFU_SLICE13 30 -nirspec NRS_IFU_SLICE14 30 -nirspec NRS_IFU_SLICE15 30 -nirspec NRS_IFU_SLICE16 30 -nirspec NRS_IFU_SLICE17 30 -nirspec NRS_IFU_SLICE18 30 -nirspec NRS_IFU_SLICE19 30 -nirspec NRS_IFU_SLICE20 30 -nirspec NRS_IFU_SLICE21 30 -nirspec NRS_IFU_SLICE22 30 -nirspec NRS_IFU_SLICE23 30 -nirspec NRS_IFU_SLICE24 30 -nirspec NRS_IFU_SLICE25 30 -nirspec NRS_IFU_SLICE26 30 -nirspec NRS_IFU_SLICE27 30 -nirspec NRS_IFU_SLICE28 30 -nirspec NRS_IFU_SLICE29 30 -nirspec NRS_FULL_MSA 10 -nirspec NRS_FULL_MSA1 10 -nirspec NRS_FULL_MSA2 10 -nirspec NRS_FULL_MSA3 10 -nirspec NRS_FULL_MSA4 10 -nirspec NRS_VIGNETTED_MSA 30 -nirspec NRS_VIGNETTED_MSA1 30 -nirspec NRS_VIGNETTED_MSA2 30 -nirspec NRS_VIGNETTED_MSA3 30 -nirspec NRS_VIGNETTED_MSA4 30 -nirspec NRS_FIELD1_MSA4 30 -nirspec NRS_FIELD2_MSA4 30 -nirspec NRS1_FP1MIMF 30 -nirspec NRS1_FP2MIMF 30 -nirspec NRS1_FP3MIMF 30 -nirspec NRS2_FP4MIMF 30 -nirspec NRS2_FP5MIMF 30 -nirspec CLEAR_GWA_OTE 30 -nirspec F110W_GWA_OTE 30 -nirspec F140X_GWA_OTE 30 -nirspec NRS_SKY_OTEIP 30 -nirspec NRS_CLEAR_OTEIP_MSA_L0 30 -nirspec NRS_CLEAR_OTEIP_MSA_L1 30 -nirspec NRS_F070LP_OTEIP_MSA_L0 30 -nirspec NRS_F070LP_OTEIP_MSA_L1 30 -nirspec NRS_F100LP_OTEIP_MSA_L0 30 -nirspec NRS_F100LP_OTEIP_MSA_L1 30 -nirspec NRS_F170LP_OTEIP_MSA_L0 30 -nirspec NRS_F170LP_OTEIP_MSA_L1 30 -nirspec NRS_F290LP_OTEIP_MSA_L0 30 -nirspec NRS_F290LP_OTEIP_MSA_L1 30 -nirspec NRS_F110W_OTEIP_MSA_L0 30 -nirspec NRS_F110W_OTEIP_MSA_L1 30 -nirspec NRS_F140X_OTEIP_MSA_L0 30 -nirspec NRS_F140X_OTEIP_MSA_L1 30 -fgs FGS1_FULL_OSS 10 -fgs FGS1_FULL 10 -fgs FGS2_FULL_OSS 10 -fgs FGS2_FULL 10 -fgs FGS1_SUB128LL 30 -fgs FGS1_SUB128DIAG 30 -fgs FGS1_SUB128CNTR 30 -fgs FGS1_SUB32LL 30 -fgs FGS1_SUB32DIAG 30 -fgs FGS1_SUB32CNTR 30 -fgs FGS1_SUB8LL 30 -fgs FGS1_SUB8DIAG 30 -fgs FGS1_SUB8CNTR 30 -fgs FGS2_SUB128LL 30 -fgs FGS2_SUB128DIAG 30 -fgs FGS2_SUB128CNTR 30 -fgs FGS2_SUB32LL 30 -fgs FGS2_SUB32DIAG 30 -fgs FGS2_SUB32CNTR 30 -fgs FGS2_SUB8LL 30 -fgs FGS2_SUB8DIAG 30 -fgs FGS2_SUB8CNTR 30 -fgs FGS1_FP1MIMF 30 -fgs FGS1_FP2MIMF 30 -fgs FGS1_FP3MIMF 30 -fgs FGS1_FP4MIMF 30 -fgs FGS1_FP5MIMF 30 -fgs FGS2_FP1MIMF 30 -fgs FGS2_FP2MIMF 30 -fgs FGS2_FP3MIMF 30 -fgs FGS2_FP4MIMF 30 -fgs FGS2_FP5MIMF 30 \ No newline at end of file +Instrument Aperture Threshold N_skipped_integs +nircam NRCA1_FULL_OSS 10 0 +nircam NRCA2_FULL_OSS 10 0 +nircam NRCA3_FULL_OSS 10 0 +nircam NRCA4_FULL_OSS 10 0 +nircam NRCA5_FULL_OSS 10 0 +nircam NRCB1_FULL_OSS 10 0 +nircam NRCB2_FULL_OSS 10 0 +nircam NRCB3_FULL_OSS 10 0 +nircam NRCB4_FULL_OSS 10 0 +nircam NRCB5_FULL_OSS 10 0 +nircam NRCALL_FULL 10 0 +nircam NRCAS_FULL 10 0 +nircam NRCA1_FULL 10 0 +nircam NRCA2_FULL 10 0 +nircam NRCA3_FULL 10 0 +nircam NRCA4_FULL 10 0 +nircam NRCA5_FULL 10 0 +nircam NRCBS_FULL 10 0 +nircam NRCB1_FULL 10 0 +nircam NRCB2_FULL 10 0 +nircam NRCB3_FULL 10 0 +nircam NRCB4_FULL 10 0 +nircam NRCB5_FULL 10 0 +nircam NRCB1_FULLP 10 0 +nircam NRCB5_FULLP 10 0 +nircam NRCA1_SUB160 30 0 +nircam NRCA2_SUB160 30 0 +nircam NRCA3_SUB160 30 0 +nircam NRCA4_SUB160 30 0 +nircam NRCA5_SUB160 30 0 +nircam NRCB1_SUB160 30 0 +nircam NRCB2_SUB160 30 0 +nircam NRCB3_SUB160 30 0 +nircam NRCB4_SUB160 30 0 +nircam NRCB5_SUB160 30 0 +nircam NRCA1_SUB320 30 0 +nircam NRCA2_SUB320 30 0 +nircam NRCA3_SUB320 30 0 +nircam NRCA4_SUB320 30 0 +nircam NRCA5_SUB320 30 0 +nircam NRCB1_SUB320 30 0 +nircam NRCB2_SUB320 30 0 +nircam NRCB3_SUB320 30 0 +nircam NRCB4_SUB320 30 0 +nircam NRCB5_SUB320 30 0 +nircam NRCA1_SUB640 30 0 +nircam NRCA2_SUB640 30 0 +nircam NRCA3_SUB640 30 0 +nircam NRCA4_SUB640 30 0 +nircam NRCA5_SUB640 30 0 +nircam NRCB1_SUB640 30 0 +nircam NRCB2_SUB640 30 0 +nircam NRCB3_SUB640 30 0 +nircam NRCB4_SUB640 30 0 +nircam NRCB5_SUB640 30 0 +nircam NRCA5_GRISM256_F322W2 30 0 +nircam NRCA5_GRISM128_F322W2 30 0 +nircam NRCA5_GRISM64_F322W2 30 0 +nircam NRCA5_GRISM256_F277W 30 0 +nircam NRCA5_GRISM128_F277W 30 0 +nircam NRCA5_GRISM64_F277W 30 0 +nircam NRCA5_GRISM256_F356W 30 0 +nircam NRCA5_GRISM128_F356W 30 0 +nircam NRCA5_GRISM64_F356W 30 0 +nircam NRCA5_GRISM256_F444W 30 0 +nircam NRCA5_GRISM128_F444W 30 0 +nircam NRCA5_GRISM64_F444W 30 0 +nircam NRCA5_GRISM_F322W2 30 0 +nircam NRCA5_GRISM_F277W 30 0 +nircam NRCA5_GRISM_F356W 30 0 +nircam NRCA5_GRISM_F444W 30 0 +nircam NRCA1_GRISMTS 30 0 +nircam NRCA1_GRISMTS256 30 0 +nircam NRCA1_GRISMTS128 30 0 +nircam NRCA1_GRISMTS64 30 0 +nircam NRCA3_GRISMTS 30 0 +nircam NRCA3_GRISMTS256 30 0 +nircam NRCA3_GRISMTS128 30 0 +nircam NRCA3_GRISMTS64 30 0 +nircam NRCA5_TAGRISMTS32 30 0 +nircam NRCA5_TAGRISMTS32_F405N 30 0 +nircam NRCA5_TAGRISMTS_SCI_F322W2 30 0 +nircam NRCA5_TAGRISMTS_SCI_F444W 30 0 +nircam NRCA3_DHSPIL 30 0 +nircam NRCA3_DHSPIL_SUB96 30 0 +nircam NRCA3_DHSPIL_WEDGES 30 0 +nircam NRCB4_DHSPIL 30 0 +nircam NRCB4_DHSPIL_SUB96 30 0 +nircam NRCB4_DHSPIL_WEDGES 30 0 +nircam NRCA3_FP1 30 0 +nircam NRCA3_FP1_SUB8 30 0 +nircam NRCA3_FP1_SUB64 30 0 +nircam NRCA3_FP2MIMF 30 0 +nircam NRCA1_FP3MIMF 30 0 +nircam NRCA2_FP4MIMF 30 0 +nircam NRCA4_FP5MIMF 30 0 +nircam NRCB4_FP1 30 0 +nircam NRCB4_FP1_SUB8 30 0 +nircam NRCB4_FP1_SUB64 30 0 +nircam NRCB4_FP2MIMF 30 0 +nircam NRCB2_FP3MIMF 30 0 +nircam NRCB1_FP4MIMF 30 0 +nircam NRCB3_FP5MIMF 30 0 +nircam NRCA3_SUB64P 30 0 +nircam NRCA3_SUB160P 30 0 +nircam NRCA3_SUB400P 30 0 +nircam NRCA5_SUB64P 30 0 +nircam NRCA5_SUB160P 30 0 +nircam NRCA5_SUB400P 30 0 +nircam NRCB1_SUB64P 30 0 +nircam NRCB1_SUB160P 30 0 +nircam NRCB1_SUB400P 30 0 +nircam NRCB5_SUB64P 30 0 +nircam NRCB5_SUB160P 30 0 +nircam NRCB5_SUB400P 30 0 +nircam NRCB5_TAPSIMG32 30 0 +nircam NRCA5_GRISMC_WFSS 30 0 +nircam NRCA5_GRISMR_WFSS 30 0 +nircam NRCALL_GRISMC_WFSS 30 0 +nircam NRCALL_GRISMR_WFSS 30 0 +nircam NRCB5_GRISMC_WFSS 30 0 +nircam NRCB5_GRISMR_WFSS 30 0 +nircam NRCA2_MASK210R 30 0 +nircam NRCA5_MASK335R 30 0 +nircam NRCA5_MASK430R 30 0 +nircam NRCA4_MASKSWB 30 0 +nircam NRCA5_MASKLWB 30 0 +nircam NRCA2_TAMASK210R 30 0 +nircam NRCA5_TAMASK335R 30 0 +nircam NRCA5_TAMASK430R 30 0 +nircam NRCA4_TAMASKSWB 30 0 +nircam NRCA5_TAMASKLWB 30 0 +nircam NRCA5_TAMASKLWBL 30 0 +nircam NRCA4_TAMASKSWBS 30 0 +nircam NRCB1_MASK210R 30 0 +nircam NRCB5_MASK335R 30 0 +nircam NRCB5_MASK430R 30 0 +nircam NRCB3_MASKSWB 30 0 +nircam NRCB5_MASKLWB 30 0 +nircam NRCB1_TAMASK210R 30 0 +nircam NRCB5_TAMASK335R 30 0 +nircam NRCB5_TAMASK430R 30 0 +nircam NRCB3_TAMASKSWB 30 0 +nircam NRCB5_TAMASKLWB 30 0 +nircam NRCB5_TAMASKLWBL 30 0 +nircam NRCB3_TAMASKSWBS 30 0 +nircam NRCA2_FSTAMASK210R 30 0 +nircam NRCA4_FSTAMASKSWB 30 0 +nircam NRCA5_FSTAMASKLWB 30 0 +nircam NRCA5_FSTAMASK335R 30 0 +nircam NRCA5_FSTAMASK430R 30 0 +nircam NRCA4_MASKSWB_F182M 30 0 +nircam NRCA4_MASKSWB_F187N 30 0 +nircam NRCA4_MASKSWB_F210M 30 0 +nircam NRCA4_MASKSWB_F212N 30 0 +nircam NRCA4_MASKSWB_F200W 30 0 +nircam NRCA4_MASKSWB_NARROW 30 0 +nircam NRCA5_MASKLWB_F250M 30 0 +nircam NRCA5_MASKLWB_F300M 30 0 +nircam NRCA5_MASKLWB_F277W 30 0 +nircam NRCA5_MASKLWB_F335M 30 0 +nircam NRCA5_MASKLWB_F360M 30 0 +nircam NRCA5_MASKLWB_F356W 30 0 +nircam NRCA5_MASKLWB_F410M 30 0 +nircam NRCA5_MASKLWB_F430M 30 0 +nircam NRCA5_MASKLWB_F460M 30 0 +nircam NRCA5_MASKLWB_F480M 30 0 +nircam NRCA5_MASKLWB_F444W 30 0 +nircam NRCA5_MASKLWB_NARROW 30 0 +nircam NRCA2_FULL_MASK210R 10 0 +nircam NRCA5_FULL_MASK335R 10 0 +nircam NRCA5_FULL_MASK430R 10 0 +nircam NRCA4_FULL_MASKSWB 10 0 +nircam NRCA4_FULL_MASKSWB_F182M 10 0 +nircam NRCA4_FULL_MASKSWB_F187N 10 0 +nircam NRCA4_FULL_MASKSWB_F210M 10 0 +nircam NRCA4_FULL_MASKSWB_F212N 10 0 +nircam NRCA4_FULL_MASKSWB_F200W 10 0 +nircam NRCA5_FULL_MASKLWB 10 0 +nircam NRCA5_FULL_MASKLWB_F250M 10 0 +nircam NRCA5_FULL_MASKLWB_F300M 10 0 +nircam NRCA5_FULL_MASKLWB_F277W 10 0 +nircam NRCA5_FULL_MASKLWB_F335M 10 0 +nircam NRCA5_FULL_MASKLWB_F360M 10 0 +nircam NRCA5_FULL_MASKLWB_F356W 10 0 +nircam NRCA5_FULL_MASKLWB_F410M 10 0 +nircam NRCA5_FULL_MASKLWB_F430M 10 0 +nircam NRCA5_FULL_MASKLWB_F460M 10 0 +nircam NRCA5_FULL_MASKLWB_F480M 10 0 +nircam NRCA5_FULL_MASKLWB_F444W 10 0 +nircam NRCA2_FULL_WEDGE_RND 10 0 +nircam NRCA4_FULL_WEDGE_BAR 10 0 +nircam NRCA5_FULL_WEDGE_RND 10 0 +nircam NRCA5_FULL_WEDGE_BAR 10 0 +nircam NRCA2_FULL_TAMASK210R 10 0 +nircam NRCA5_FULL_TAMASK335R 10 0 +nircam NRCA5_FULL_TAMASK430R 10 0 +nircam NRCA4_FULL_TAMASKSWB 10 0 +nircam NRCA5_FULL_TAMASKLWB 10 0 +nircam NRCA5_FULL_TAMASKLWBL 10 0 +nircam NRCA4_FULL_TAMASKSWBS 10 0 +nircam NRCA2_FULL_FSTAMASK210R 10 0 +nircam NRCA4_FULL_FSTAMASKSWB 10 0 +nircam NRCA5_FULL_FSTAMASKLWB 10 0 +nircam NRCA5_FULL_FSTAMASK335R 10 0 +nircam NRCA5_FULL_FSTAMASK430R 10 0 +niriss NIS_CEN_OSS 10 0 +niriss NIS_CEN 10 0 +niriss NIS_AMI1 30 0 +niriss NIS_AMI2 30 0 +niriss NIS_AMI3 30 0 +niriss NIS_AMI4 30 0 +niriss NIS_AMITA 30 0 +niriss NIS_SOSSTA 30 0 +niriss NIS_WFSS_OFFSET 30 0 +niriss NIS_WFSS64 30 0 +niriss NIS_WFSS64R 30 0 +niriss NIS_WFSS64R3 30 0 +niriss NIS_WFSS64C 30 0 +niriss NIS_WFSS64C3 30 0 +niriss NIS_WFSS128 30 0 +niriss NIS_WFSS128R 30 0 +niriss NIS_WFSS128R3 30 0 +niriss NIS_WFSS128C 30 0 +niriss NIS_WFSS128C3 30 0 +niriss NIS_SUB64 30 0 +niriss NIS_SUB128 30 0 +niriss NIS_SUB256 30 0 +niriss NIS_SUBAMPCAL 30 0 +niriss NIS_SUBSTRIP96 30 0 +niriss NIS_SUBSTRIP256 30 0 +niriss NIS_FP1MIMF 30 0 +niriss NIS_FP2MIMF 30 0 +niriss NIS_FP3MIMF 30 0 +niriss NIS_FP4MIMF 30 0 +niriss NIS_FP5MIMF 30 0 +niriss NIS_AMIFULL 10 0 +niriss NIS_SOSSFULL 10 0 +niriss NIS_WFSS 10 0 +miri MIRIM_FULL_OSS 1 0 +miri MIRIM_FULL 1 0 +miri MIRIM_ILLUM 30 0 +miri MIRIM_BRIGHTSKY 30 0 +miri MIRIM_SUB256 30 0 +miri MIRIM_SUB128 30 0 +miri MIRIM_SUB64 30 0 +miri MIRIM_SLITLESSPRISM 30 0 +miri MIRIM_SLITLESSUPPER 30 0 +miri MIRIM_SLITLESSLOWER 30 0 +miri MIRIM_MASK1065 30 0 +miri MIRIM_MASK1140 30 0 +miri MIRIM_MASK1550 30 0 +miri MIRIM_MASKLYOT 30 0 +miri MIRIM_TAMRS 30 0 +miri MIRIM_TALRS 30 0 +miri MIRIM_TABLOCK 30 0 +miri MIRIM_TALYOT_UL 30 0 +miri MIRIM_TALYOT_UR 30 0 +miri MIRIM_TALYOT_LL 30 0 +miri MIRIM_TALYOT_LR 30 0 +miri MIRIM_TALYOT_CUL 30 0 +miri MIRIM_TALYOT_CUR 30 0 +miri MIRIM_TALYOT_CLL 30 0 +miri MIRIM_TALYOT_CLR 30 0 +miri MIRIM_TA1550_UL 30 0 +miri MIRIM_TA1550_UR 30 0 +miri MIRIM_TA1550_LL 30 0 +miri MIRIM_TA1550_LR 30 0 +miri MIRIM_TA1550_CUL 30 0 +miri MIRIM_TA1550_CUR 30 0 +miri MIRIM_TA1550_CLL 30 0 +miri MIRIM_TA1550_CLR 30 0 +miri MIRIM_TA1140_UL 30 0 +miri MIRIM_TA1140_UR 30 0 +miri MIRIM_TA1140_LL 30 0 +miri MIRIM_TA1140_LR 30 0 +miri MIRIM_TA1140_CUL 30 0 +miri MIRIM_TA1140_CUR 30 0 +miri MIRIM_TA1140_CLL 30 0 +miri MIRIM_TA1140_CLR 30 0 +miri MIRIM_TA1065_UL 30 0 +miri MIRIM_TA1065_UR 30 0 +miri MIRIM_TA1065_LL 30 0 +miri MIRIM_TA1065_LR 30 0 +miri MIRIM_TA1065_CUL 30 0 +miri MIRIM_TA1065_CUR 30 0 +miri MIRIM_TA1065_CLL 30 0 +miri MIRIM_TA1065_CLR 30 0 +miri MIRIM_TAFULL 10 0 +miri MIRIM_TAILLUM 30 0 +miri MIRIM_TABRIGHTSKY 30 0 +miri MIRIM_TASUB256 30 0 +miri MIRIM_TASUB128 30 0 +miri MIRIM_TASUB64 30 0 +miri MIRIM_TASLITLESSPRISM 30 0 +miri MIRIM_CORON1065 30 0 +miri MIRIM_CORON1140 30 0 +miri MIRIM_CORON1550 30 0 +miri MIRIM_CORONLYOT 30 0 +miri MIRIM_KNIFE 30 0 +miri MIRIM_FP1MIMF 30 0 +miri MIRIM_FP2MIMF 30 0 +miri MIRIM_FP3MIMF 30 0 +miri MIRIM_FP4MIMF 30 0 +miri MIRIM_FP5MIMF 30 0 +miri MIRIM_SLIT 30 0 +miri MIRIFU_CHANNEL1A 30 0 +miri MIRIFU_1ASLICE01 30 0 +miri MIRIFU_1ASLICE02 30 0 +miri MIRIFU_1ASLICE03 30 0 +miri MIRIFU_1ASLICE04 30 0 +miri MIRIFU_1ASLICE05 30 0 +miri MIRIFU_1ASLICE06 30 0 +miri MIRIFU_1ASLICE07 30 0 +miri MIRIFU_1ASLICE08 30 0 +miri MIRIFU_1ASLICE09 30 0 +miri MIRIFU_1ASLICE10 30 0 +miri MIRIFU_1ASLICE11 30 0 +miri MIRIFU_1ASLICE12 30 0 +miri MIRIFU_1ASLICE13 30 0 +miri MIRIFU_1ASLICE14 30 0 +miri MIRIFU_1ASLICE15 30 0 +miri MIRIFU_1ASLICE16 30 0 +miri MIRIFU_1ASLICE17 30 0 +miri MIRIFU_1ASLICE18 30 0 +miri MIRIFU_1ASLICE19 30 0 +miri MIRIFU_1ASLICE20 30 0 +miri MIRIFU_1ASLICE21 30 0 +miri MIRIFU_CHANNEL1B 30 0 +miri MIRIFU_1BSLICE01 30 0 +miri MIRIFU_1BSLICE02 30 0 +miri MIRIFU_1BSLICE03 30 0 +miri MIRIFU_1BSLICE04 30 0 +miri MIRIFU_1BSLICE05 30 0 +miri MIRIFU_1BSLICE06 30 0 +miri MIRIFU_1BSLICE07 30 0 +miri MIRIFU_1BSLICE08 30 0 +miri MIRIFU_1BSLICE09 30 0 +miri MIRIFU_1BSLICE10 30 0 +miri MIRIFU_1BSLICE11 30 0 +miri MIRIFU_1BSLICE12 30 0 +miri MIRIFU_1BSLICE13 30 0 +miri MIRIFU_1BSLICE14 30 0 +miri MIRIFU_1BSLICE15 30 0 +miri MIRIFU_1BSLICE16 30 0 +miri MIRIFU_1BSLICE17 30 0 +miri MIRIFU_1BSLICE18 30 0 +miri MIRIFU_1BSLICE19 30 0 +miri MIRIFU_1BSLICE20 30 0 +miri MIRIFU_1BSLICE21 30 0 +miri MIRIFU_CHANNEL1C 30 0 +miri MIRIFU_1CSLICE01 30 0 +miri MIRIFU_1CSLICE02 30 0 +miri MIRIFU_1CSLICE03 30 0 +miri MIRIFU_1CSLICE04 30 0 +miri MIRIFU_1CSLICE05 30 0 +miri MIRIFU_1CSLICE06 30 0 +miri MIRIFU_1CSLICE07 30 0 +miri MIRIFU_1CSLICE08 30 0 +miri MIRIFU_1CSLICE09 30 0 +miri MIRIFU_1CSLICE10 30 0 +miri MIRIFU_1CSLICE11 30 0 +miri MIRIFU_1CSLICE12 30 0 +miri MIRIFU_1CSLICE13 30 0 +miri MIRIFU_1CSLICE14 30 0 +miri MIRIFU_1CSLICE15 30 0 +miri MIRIFU_1CSLICE16 30 0 +miri MIRIFU_1CSLICE17 30 0 +miri MIRIFU_1CSLICE18 30 0 +miri MIRIFU_1CSLICE19 30 0 +miri MIRIFU_1CSLICE20 30 0 +miri MIRIFU_1CSLICE21 30 0 +miri MIRIFU_CHANNEL2A 30 0 +miri MIRIFU_2ASLICE01 30 0 +miri MIRIFU_2ASLICE02 30 0 +miri MIRIFU_2ASLICE03 30 0 +miri MIRIFU_2ASLICE04 30 0 +miri MIRIFU_2ASLICE05 30 0 +miri MIRIFU_2ASLICE06 30 0 +miri MIRIFU_2ASLICE07 30 0 +miri MIRIFU_2ASLICE08 30 0 +miri MIRIFU_2ASLICE09 30 0 +miri MIRIFU_2ASLICE10 30 0 +miri MIRIFU_2ASLICE11 30 0 +miri MIRIFU_2ASLICE12 30 0 +miri MIRIFU_2ASLICE13 30 0 +miri MIRIFU_2ASLICE14 30 0 +miri MIRIFU_2ASLICE15 30 0 +miri MIRIFU_2ASLICE16 30 0 +miri MIRIFU_2ASLICE17 30 0 +miri MIRIFU_CHANNEL2B 30 0 +miri MIRIFU_2BSLICE01 30 0 +miri MIRIFU_2BSLICE02 30 0 +miri MIRIFU_2BSLICE03 30 0 +miri MIRIFU_2BSLICE04 30 0 +miri MIRIFU_2BSLICE05 30 0 +miri MIRIFU_2BSLICE06 30 0 +miri MIRIFU_2BSLICE07 30 0 +miri MIRIFU_2BSLICE08 30 0 +miri MIRIFU_2BSLICE09 30 0 +miri MIRIFU_2BSLICE10 30 0 +miri MIRIFU_2BSLICE11 30 0 +miri MIRIFU_2BSLICE12 30 0 +miri MIRIFU_2BSLICE13 30 0 +miri MIRIFU_2BSLICE14 30 0 +miri MIRIFU_2BSLICE15 30 0 +miri MIRIFU_2BSLICE16 30 0 +miri MIRIFU_2BSLICE17 30 0 +miri MIRIFU_CHANNEL2C 30 0 +miri MIRIFU_2CSLICE01 30 0 +miri MIRIFU_2CSLICE02 30 0 +miri MIRIFU_2CSLICE03 30 0 +miri MIRIFU_2CSLICE04 30 0 +miri MIRIFU_2CSLICE05 30 0 +miri MIRIFU_2CSLICE06 30 0 +miri MIRIFU_2CSLICE07 30 0 +miri MIRIFU_2CSLICE08 30 0 +miri MIRIFU_2CSLICE09 30 0 +miri MIRIFU_2CSLICE10 30 0 +miri MIRIFU_2CSLICE11 30 0 +miri MIRIFU_2CSLICE12 30 0 +miri MIRIFU_2CSLICE13 30 0 +miri MIRIFU_2CSLICE14 30 0 +miri MIRIFU_2CSLICE15 30 0 +miri MIRIFU_2CSLICE16 30 0 +miri MIRIFU_2CSLICE17 30 0 +miri MIRIFU_CHANNEL3A 30 0 +miri MIRIFU_3ASLICE01 30 0 +miri MIRIFU_3ASLICE02 30 0 +miri MIRIFU_3ASLICE03 30 0 +miri MIRIFU_3ASLICE04 30 0 +miri MIRIFU_3ASLICE05 30 0 +miri MIRIFU_3ASLICE06 30 0 +miri MIRIFU_3ASLICE07 30 0 +miri MIRIFU_3ASLICE08 30 0 +miri MIRIFU_3ASLICE09 30 0 +miri MIRIFU_3ASLICE10 30 0 +miri MIRIFU_3ASLICE11 30 0 +miri MIRIFU_3ASLICE12 30 0 +miri MIRIFU_3ASLICE13 30 0 +miri MIRIFU_3ASLICE14 30 0 +miri MIRIFU_3ASLICE15 30 0 +miri MIRIFU_3ASLICE16 30 0 +miri MIRIFU_CHANNEL3B 30 0 +miri MIRIFU_3BSLICE01 30 0 +miri MIRIFU_3BSLICE02 30 0 +miri MIRIFU_3BSLICE03 30 0 +miri MIRIFU_3BSLICE04 30 0 +miri MIRIFU_3BSLICE05 30 0 +miri MIRIFU_3BSLICE06 30 0 +miri MIRIFU_3BSLICE07 30 0 +miri MIRIFU_3BSLICE08 30 0 +miri MIRIFU_3BSLICE09 30 0 +miri MIRIFU_3BSLICE10 30 0 +miri MIRIFU_3BSLICE11 30 0 +miri MIRIFU_3BSLICE12 30 0 +miri MIRIFU_3BSLICE13 30 0 +miri MIRIFU_3BSLICE14 30 0 +miri MIRIFU_3BSLICE15 30 0 +miri MIRIFU_3BSLICE16 30 0 +miri MIRIFU_CHANNEL3C 30 0 +miri MIRIFU_3CSLICE01 30 0 +miri MIRIFU_3CSLICE02 30 0 +miri MIRIFU_3CSLICE03 30 0 +miri MIRIFU_3CSLICE04 30 0 +miri MIRIFU_3CSLICE05 30 0 +miri MIRIFU_3CSLICE06 30 0 +miri MIRIFU_3CSLICE07 30 0 +miri MIRIFU_3CSLICE08 30 0 +miri MIRIFU_3CSLICE09 30 0 +miri MIRIFU_3CSLICE10 30 0 +miri MIRIFU_3CSLICE11 30 0 +miri MIRIFU_3CSLICE12 30 0 +miri MIRIFU_3CSLICE13 30 0 +miri MIRIFU_3CSLICE14 30 0 +miri MIRIFU_3CSLICE15 30 0 +miri MIRIFU_3CSLICE16 30 0 +miri MIRIFU_CHANNEL4A 30 0 +miri MIRIFU_4ASLICE01 30 0 +miri MIRIFU_4ASLICE02 30 0 +miri MIRIFU_4ASLICE03 30 0 +miri MIRIFU_4ASLICE04 30 0 +miri MIRIFU_4ASLICE05 30 0 +miri MIRIFU_4ASLICE06 30 0 +miri MIRIFU_4ASLICE07 30 0 +miri MIRIFU_4ASLICE08 30 0 +miri MIRIFU_4ASLICE09 30 0 +miri MIRIFU_4ASLICE10 30 0 +miri MIRIFU_4ASLICE11 30 0 +miri MIRIFU_4ASLICE12 30 0 +miri MIRIFU_CHANNEL4B 30 0 +miri MIRIFU_4BSLICE01 30 0 +miri MIRIFU_4BSLICE02 30 0 +miri MIRIFU_4BSLICE03 30 0 +miri MIRIFU_4BSLICE04 30 0 +miri MIRIFU_4BSLICE05 30 0 +miri MIRIFU_4BSLICE06 30 0 +miri MIRIFU_4BSLICE07 30 0 +miri MIRIFU_4BSLICE08 30 0 +miri MIRIFU_4BSLICE09 30 0 +miri MIRIFU_4BSLICE10 30 0 +miri MIRIFU_4BSLICE11 30 0 +miri MIRIFU_4BSLICE12 30 0 +miri MIRIFU_CHANNEL4C 30 0 +miri MIRIFU_4CSLICE01 30 0 +miri MIRIFU_4CSLICE02 30 0 +miri MIRIFU_4CSLICE03 30 0 +miri MIRIFU_4CSLICE04 30 0 +miri MIRIFU_4CSLICE05 30 0 +miri MIRIFU_4CSLICE06 30 0 +miri MIRIFU_4CSLICE07 30 0 +miri MIRIFU_4CSLICE08 30 0 +miri MIRIFU_4CSLICE09 30 0 +miri MIRIFU_4CSLICE10 30 0 +miri MIRIFU_4CSLICE11 30 0 +miri MIRIFU_4CSLICE12 30 0 +nirspec NRS1_FULL_OSS 10 0 +nirspec NRS1_FULL 10 0 +nirspec NRS2_FULL_OSS 10 0 +nirspec NRS2_FULL 10 0 +nirspec NRS_S200A1_SLIT 30 0 +nirspec NRS_S200A2_SLIT 30 0 +nirspec NRS_S400A1_SLIT 30 0 +nirspec NRS_S1600A1_SLIT 30 0 +nirspec NRS_S200B1_SLIT 30 0 +nirspec NRS_FULL_IFU 10 0 +nirspec NRS_IFU_SLICE00 30 0 +nirspec NRS_IFU_SLICE01 30 0 +nirspec NRS_IFU_SLICE02 30 0 +nirspec NRS_IFU_SLICE03 30 0 +nirspec NRS_IFU_SLICE04 30 0 +nirspec NRS_IFU_SLICE05 30 0 +nirspec NRS_IFU_SLICE06 30 0 +nirspec NRS_IFU_SLICE07 30 0 +nirspec NRS_IFU_SLICE08 30 0 +nirspec NRS_IFU_SLICE09 30 0 +nirspec NRS_IFU_SLICE10 30 0 +nirspec NRS_IFU_SLICE11 30 0 +nirspec NRS_IFU_SLICE12 30 0 +nirspec NRS_IFU_SLICE13 30 0 +nirspec NRS_IFU_SLICE14 30 0 +nirspec NRS_IFU_SLICE15 30 0 +nirspec NRS_IFU_SLICE16 30 0 +nirspec NRS_IFU_SLICE17 30 0 +nirspec NRS_IFU_SLICE18 30 0 +nirspec NRS_IFU_SLICE19 30 0 +nirspec NRS_IFU_SLICE20 30 0 +nirspec NRS_IFU_SLICE21 30 0 +nirspec NRS_IFU_SLICE22 30 0 +nirspec NRS_IFU_SLICE23 30 0 +nirspec NRS_IFU_SLICE24 30 0 +nirspec NRS_IFU_SLICE25 30 0 +nirspec NRS_IFU_SLICE26 30 0 +nirspec NRS_IFU_SLICE27 30 0 +nirspec NRS_IFU_SLICE28 30 0 +nirspec NRS_IFU_SLICE29 30 0 +nirspec NRS_FULL_MSA 10 0 +nirspec NRS_FULL_MSA1 10 0 +nirspec NRS_FULL_MSA2 10 0 +nirspec NRS_FULL_MSA3 10 0 +nirspec NRS_FULL_MSA4 10 0 +nirspec NRS_VIGNETTED_MSA 30 0 +nirspec NRS_VIGNETTED_MSA1 30 0 +nirspec NRS_VIGNETTED_MSA2 30 0 +nirspec NRS_VIGNETTED_MSA3 30 0 +nirspec NRS_VIGNETTED_MSA4 30 0 +nirspec NRS_FIELD1_MSA4 30 0 +nirspec NRS_FIELD2_MSA4 30 0 +nirspec NRS1_FP1MIMF 30 0 +nirspec NRS1_FP2MIMF 30 0 +nirspec NRS1_FP3MIMF 30 0 +nirspec NRS2_FP4MIMF 30 0 +nirspec NRS2_FP5MIMF 30 0 +nirspec CLEAR_GWA_OTE 30 0 +nirspec F110W_GWA_OTE 30 0 +nirspec F140X_GWA_OTE 30 0 +nirspec NRS_SKY_OTEIP 30 0 +nirspec NRS_CLEAR_OTEIP_MSA_L0 30 0 +nirspec NRS_CLEAR_OTEIP_MSA_L1 30 0 +nirspec NRS_F070LP_OTEIP_MSA_L0 30 0 +nirspec NRS_F070LP_OTEIP_MSA_L1 30 0 +nirspec NRS_F100LP_OTEIP_MSA_L0 30 0 +nirspec NRS_F100LP_OTEIP_MSA_L1 30 0 +nirspec NRS_F170LP_OTEIP_MSA_L0 30 0 +nirspec NRS_F170LP_OTEIP_MSA_L1 30 0 +nirspec NRS_F290LP_OTEIP_MSA_L0 30 0 +nirspec NRS_F290LP_OTEIP_MSA_L1 30 0 +nirspec NRS_F110W_OTEIP_MSA_L0 30 0 +nirspec NRS_F110W_OTEIP_MSA_L1 30 0 +nirspec NRS_F140X_OTEIP_MSA_L0 30 0 +nirspec NRS_F140X_OTEIP_MSA_L1 30 0 +fgs FGS1_FULL_OSS 10 0 +fgs FGS1_FULL 10 0 +fgs FGS2_FULL_OSS 10 0 +fgs FGS2_FULL 10 0 +fgs FGS1_SUB128LL 30 0 +fgs FGS1_SUB128DIAG 30 0 +fgs FGS1_SUB128CNTR 30 0 +fgs FGS1_SUB32LL 30 0 +fgs FGS1_SUB32DIAG 30 0 +fgs FGS1_SUB32CNTR 30 0 +fgs FGS1_SUB8LL 30 0 +fgs FGS1_SUB8DIAG 30 0 +fgs FGS1_SUB8CNTR 30 0 +fgs FGS2_SUB128LL 30 0 +fgs FGS2_SUB128DIAG 30 0 +fgs FGS2_SUB128CNTR 30 0 +fgs FGS2_SUB32LL 30 0 +fgs FGS2_SUB32DIAG 30 0 +fgs FGS2_SUB32CNTR 30 0 +fgs FGS2_SUB8LL 30 0 +fgs FGS2_SUB8DIAG 30 0 +fgs FGS2_SUB8CNTR 30 0 +fgs FGS1_FP1MIMF 30 0 +fgs FGS1_FP2MIMF 30 0 +fgs FGS1_FP3MIMF 30 0 +fgs FGS1_FP4MIMF 30 0 +fgs FGS1_FP5MIMF 30 0 +fgs FGS2_FP1MIMF 30 0 +fgs FGS2_FP2MIMF 30 0 +fgs FGS2_FP3MIMF 30 0 +fgs FGS2_FP4MIMF 30 0 +fgs FGS2_FP5MIMF 30 0 diff --git a/jwql/instrument_monitors/pipeline_tools.py b/jwql/instrument_monitors/pipeline_tools.py index e9754bd17..4e7142128 100644 --- a/jwql/instrument_monitors/pipeline_tools.py +++ b/jwql/instrument_monitors/pipeline_tools.py @@ -197,7 +197,7 @@ def get_pipeline_steps(instrument): return required_steps -def image_stack(file_list): +def image_stack(file_list, skipped_initial_ints=0): """Given a list of fits files containing 2D images, read in all data and place into a 3D stack @@ -206,6 +206,11 @@ def image_stack(file_list): file_list : list List of fits file names + skipped_initial_ints : int + Number of initial integrations from each file to skip over and + not include in the stack. Only works with files containing 3D + arrays (e.g. rateints files). + Returns ------- cube : numpy.ndarray @@ -223,7 +228,8 @@ def image_stack(file_list): if i == 0: ndim_base = image.shape if len(ndim_base) == 3: - cube = copy.deepcopy(image) + cube = copy.deepcopy(image[skipped_initial_ints:, :, :]) + num_ints -= skipped_initial_ints elif len(ndim_base) == 2: cube = np.expand_dims(image, 0) else: @@ -231,9 +237,12 @@ def image_stack(file_list): if ndim_base[-2:] == ndim[-2:]: if len(ndim) == 2: image = np.expand_dims(image, 0) + cube = np.vstack((cube, image)) + elif len(ndim) == 3: + cube = np.vstack((cube, image[skipped_initial_ints:, :, :])) + num_ints -= skipped_initial_ints elif len(ndim) > 3: raise ValueError("4-dimensional input slope images not supported.") - cube = np.vstack((cube, image)) else: raise ValueError("Input images are of inconsistent size in x/y dimension.") exptimes.append([exptime] * num_ints) diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index b9eb705a8..910cf0b0a 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -166,7 +166,7 @@ # been combined into a mean dark rate DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME = {'nircam': 10., 'niriss': 10., - 'miri': 10., + 'miri': 0.00001, # Treat each MIRI exposure separately 'nirspec': 10., 'fgs': 10. } From 5493e8987d2dc118d1b2583b8e87cb970c926815 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Tue, 25 Apr 2023 17:11:41 -0400 Subject: [PATCH 06/38] rate vs rateints differences. Ready for initial testing --- .../common_monitors/dark_monitor.py | 45 ++++++------------- .../dark_monitor_file_thresholds.txt | 4 +- 2 files changed, 15 insertions(+), 34 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 252773b79..f12de5a40 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -694,22 +694,22 @@ def process(self, file_list): # Basic metadata that will be needed later self.get_metadata(file_list[0]) + # For MIRI, save the rateints files. For other instruments save the rate files. + if self.instrument == 'miri': + output_suffix = 'rateints' + else: + output_suffix = 'rate' + # Run pipeline steps on files, generating slope files pipeline_files = [] slope_files = [] for filename in file_list: - logging.info('\tWorking on file: {}'.format(filename)) - - need to deal with rateints files here - - rate_file = filename.replace("dark", "rate") + rate_file = filename.replace("dark", output_suffix) rate_file_name = os.path.basename(rate_file) local_rate_file = os.path.join(self.data_dir, rate_file_name) - - if os.path.isfile(local_rate_file): logging.info("\t\tFile {} exists, skipping pipeline".format(local_rate_file)) slope_files.append(local_rate_file) @@ -717,12 +717,6 @@ def process(self, file_list): logging.info("\t\tAdding {} to calibration set".format(filename)) pipeline_files.append(filename) - # For MIRI, save the rateints files. For other instruments save the rate files. - if self.instrument == 'miri': - output_suffix = 'rateints' - else: - output_suffix = 'rate' - # For other instruments, just save the rate files outputs = run_parallel_pipeline(pipeline_files, "dark", [output_suffix], self.instrument) @@ -747,24 +741,9 @@ def process(self, file_list): mid_time = instrument_properties.mean_time(obs_times) try: - - - - do we bother switching to use rateints files where available, so that we can create a sigma-clipped mean - slope rather than the basic mean that goes into the rate file? If we are confident in the jump flagging - then it seems like a straight mean might be ok? My concern with rateints files is that the pipeline - might not output them in all cases? - - - for MIRI, we want rateints files, and we want to throw out the first int of each one before creating mean slope images - - - - - # Read in all slope images and create a stack of ints (from rateints files) # or mean ints (from rate files) - slope_image_stack, slope_exptimes = pipeline_tools.image_stack(slope_files, skipped_initial_ints=) + slope_image_stack, slope_exptimes = pipeline_tools.image_stack(slope_files, skipped_initial_ints=self.skipped_initial_ints) # Calculate a mean slope image from the inputs slope_image, stdev_image = calculations.mean_image(slope_image_stack, sigma_threshold=3) @@ -965,11 +944,13 @@ def run(self): # If the aperture is not listed in the threshold file, we need # a default if not np.any(match): - integration_count_threshold = 30 + integration_count_threshold = 1 + self.skipped_initial_ints = 0 logging.warning(('\tAperture {} is not present in the threshold file. Continuing ' - 'with the default threshold of 30 files.'.format(aperture))) + 'with the default threshold of 1 file, and no skipped integrations.'.format(aperture))) else: integration_count_threshold = limits['Threshold'][match][0] + self.skipped_initial_ints = limits['N_skipped_integs'][match][0] self.aperture = aperture # We need a separate search for each readout pattern @@ -1026,7 +1007,7 @@ def run(self): if xsize == expected_xsize and ysize == expected_ysize: temp_filenames.append(new_file) total_integrations += int(nints) - integrations.append(int(nints)) + integrations.append(int(nints) - self.skipped_initial_ints) starting_times.append(hdulist[0].header['EXPSTART']) ending_times.append(hdulist[0].header['EXPEND']) else: diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt b/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt index bf6bdc34a..22e49f0e8 100644 --- a/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt +++ b/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt @@ -238,8 +238,8 @@ niriss NIS_FP5MIMF 30 0 niriss NIS_AMIFULL 10 0 niriss NIS_SOSSFULL 10 0 niriss NIS_WFSS 10 0 -miri MIRIM_FULL_OSS 1 0 -miri MIRIM_FULL 1 0 +miri MIRIM_FULL_OSS 1 1 +miri MIRIM_FULL 1 1 miri MIRIM_ILLUM 30 0 miri MIRIM_BRIGHTSKY 30 0 miri MIRIM_SUB256 30 0 From 8a2c2ba7e207aa528bfacf820443280abb5a2a5c Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Mon, 1 May 2023 16:38:22 -0400 Subject: [PATCH 07/38] Add test for file splitting --- .../common_monitors/dark_monitor.py | 125 +++++++++++++----- jwql/tests/test_dark_monitor.py | 121 +++++++++++++++++ 2 files changed, 211 insertions(+), 35 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index f12de5a40..314a510af 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -103,8 +103,9 @@ from jwql.jwql_monitors import monitor_mast from jwql.shared_tasks.shared_tasks import only_one, run_pipeline, run_parallel_pipeline from jwql.utils import calculations, instrument_properties, monitor_utils -from jwql.utils.constants import ASIC_TEMPLATES, DARK_MONITOR_MAX_BADPOINTS_TO_PLOT, JWST_INSTRUMENT_NAMES, FULL_FRAME_APERTURES -from jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE, JWST_DATAPRODUCTS, RAPID_READPATTERNS +from jwql.utils.constants import ASIC_TEMPLATES, DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME, DARK_MONITOR_MAX_BADPOINTS_TO_PLOT +from jwql.utils.constants import JWST_INSTRUMENT_NAMES, FULL_FRAME_APERTURES, JWST_INSTRUMENT_NAMES_MIXEDCASE +from jwql.utils.constants import JWST_DATAPRODUCTS, RAPID_READPATTERNS from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.permissions import set_permissions from jwql.utils.utils import copy_files, ensure_dir_exists, get_config, filesystem_path @@ -1236,11 +1237,17 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # Add dividers at the beginning index to make the coding easier dividers = np.insert(dividers, 0, 0) - # If no epoch boundaries are found, then add a divider at the end, and the entire - # set of files will be treated as a single batch - if len(dividers) == 1: - dividers = np.insert(dividers, len(dividers), len(dividers)) + # If there is no divider at the end of the list of files, then add one + if dividers[-1] < len(delta_t): + dividers = np.insert(dividers, len(dividers), len(delta_t)) + + print('delta_t', delta_t) + print('dividers:', dividers) + print('threshold:', threshold) + + + # Loop over epochs. # Within each batch, divide up the integrations into multiple batches if the total # number of integrations are above 2*threshold for i in range(len(dividers) - 1): @@ -1250,30 +1257,58 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ batch_end_times = end_times[dividers[i]:dividers[i+1]] batch_int_sum = np.sum(batch_ints) + + print('batch_ints', batch_ints) + print('batch_files', batch_files) + + + # Calculate how many subgroups to break up the batch into, # based on the threshold, and under the assumption that we # don't want to skip running on any of the files. n_subgroups = int(batch_int_sum / threshold) + + print('n_subgroups', n_subgroups) + + if n_subgroups == 0: + + + + print('i and len(dividers)-1:', i, len(dividers) - 1, dividers) + # Here, we are in a batch where the total number of integrations - # is less than the treshold (but the batch was identified due to + # is less than the threshold (but the batch was identified due to # the gaps in time before and after the batch.) In this case, we'll - # run the monitor with fewer than the threshold number of integrations - self.file_batches.append(batch_files) - self.start_time_batches.append(batch_start_times) - self.end_time_batches.append(batch_end_times) - self.integration_batches.append(batch_ints) - if n_subgroups == 1: - # Here there are not enough integrations to split the batch into - # more than one subgroup - self.file_batches.append(batch_files) - self.start_time_batches.append(batch_start_times) - self.end_time_batches.append(batch_end_times) - self.integration_batches.append(batch_ints) - - elif n_subgroups > 1: - # Here there are enough integrations to break the batch up + # run the monitor with fewer than the threshold number of integrations, + # but only if this is not the final batch. In that case it may be that + # more observations are coming that should be grouped with the batch. + if i < (len(dividers) - 2): + self.file_batches.append(batch_files) + self.start_time_batches.append(batch_start_times) + self.end_time_batches.append(batch_end_times) + self.integration_batches.append(batch_ints) + else: + #if (i == len(dividers) - 1) and (batchnum == (n_subgroups - 1)) + # In this case, we are in the final epoch division AND we do not + # have enough integrations to subdivide the data. So we'll skip + # this data and wait for a future run of the monitor to bundle + # it with more, new data. + print('subgroup 0 in final epoch does not have enough ints, and the final delta t is too small. skipping.') + pass + + #elif n_subgroups == 1: + # # Here there are not enough integrations to split the batch into + # # more than one subgroup + # self.file_batches.append(batch_files) + # self.start_time_batches.append(batch_start_times) + # self.end_time_batches.append(batch_end_times) + # self.integration_batches.append(batch_ints) + + elif n_subgroups >= 1: + # Here there are enough integrations to meet the threshold, + # or possibly enough to break the batch up # into more than one subgroup. We can't split within a file, # so we split after the file that gets the total number of # integrations above the threshold. @@ -1285,31 +1320,51 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ startidx = 0 endidx = 0 complete = False - for batchnum in range(len(n_subgroups)): + for batchnum in range(n_subgroups): endidx = np.where(batch_int_sums >= (base + threshold))[0] # Check if we reach the end of the file list if len(endidx) == 0: - endidx = len(batch_int_sum) + endidx = len(batch_int_sums) complete = True else: endidx = endidx[0] - subgroup_ints = batch_ints[startidx: endidx] - subgroup_files = batch_files[startidx: endidx] - subgroup_start_times = batch_start_times[startidx: endidx] - subgroup_end_times = batch_end_times[startidx: endidx] + subgroup_ints = batch_ints[startidx: endidx + 1] + subgroup_files = batch_files[startidx: endidx + 1] + subgroup_start_times = batch_start_times[startidx: endidx + 1] + subgroup_end_times = batch_end_times[startidx: endidx + 1] subgroup_int_sum = np.sum(subgroup_ints) - # Add to output lists - self.file_batches.append(subgroup_files) - self.start_time_batches.append(subgroup_start_times) - self.end_time_batches.append(subgroup_end_times) - self.integration_batches.append(subgroup_ints) + + print('batchnum: ', batchnum) + print(batch_ints[startidx: endidx + 1]) + print(batch_files[startidx: endidx + 1]) + print(i, len(dividers) - 1, batchnum, n_subgroups-1) + + + + + # Add to output lists. The exception is if we are in the + # final subgroup of the final epoch. In that case, we don't know + # if more data are coming soon that may be able to be combined. So + # in that case, we ignore the files for this run of the monitor. + if (i == len(dividers) - 1) and (batchnum == (n_subgroups - 1)): + # Here we are in the final subgroup of the final epoch, where we + # mayb not necessarily know if there will be future data to combine + # with these data + pass + else: + #if (i < len(dividers) - 1) and (batchnum < (n_subgroups - 1)): + print('ADDED') + self.file_batches.append(subgroup_files) + self.start_time_batches.append(subgroup_start_times) + self.end_time_batches.append(subgroup_end_times) + self.integration_batches.append(subgroup_ints) if not complete: - startidx = deepcopy(endidx) - base = batch_int_sums[endidx - 1] + startidx = deepcopy(endidx + 1) + base = batch_int_sums[endidx] else: # If we reach the end of the list before the expected number of # subgroups, then we quit. diff --git a/jwql/tests/test_dark_monitor.py b/jwql/tests/test_dark_monitor.py index 9cb11a96f..8ba0a7b49 100644 --- a/jwql/tests/test_dark_monitor.py +++ b/jwql/tests/test_dark_monitor.py @@ -28,11 +28,123 @@ from jwql.instrument_monitors.common_monitors import dark_monitor from jwql.tests.resources import has_test_db from jwql.utils.monitor_utils import mast_query_darks +from jwql.utils.constants import DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME from jwql.utils.utils import get_config ON_GITHUB_ACTIONS = '/home/runner' in os.path.expanduser('~') or '/Users/runner' in os.path.expanduser('~') +def generate_data_for_file_splitting_test(): + # Define data for parameterized test_split_files_into_sub_lists calls + files = [f'file_{idx}.fits' for idx in range(10)] + now = Time.now().mjd + deltat = [26., 25., 24., 23., 22., 4., 3., 2., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s+0.1 for s in start_times] + threshold = 5. # integrations + integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits', 'file_4.fits'], + ['file_5.fits', 'file_6.fits', 'file_7.fits', 'file_8.fits', 'file_9.fits'] + ] + test1 = (files, start_times, end_times, integration_list, threshold, expected) + + # Final epoch may not be over. Not enough ints in final epoch + deltat = [26., 25., 24., 23., 22., 4., 3., 2., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s+0.1 for s in start_times] + threshold = 6. # integrations + integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits', 'file_4.fits'] + ] + test2 = (files, start_times, end_times, integration_list, threshold, expected) + + # Final epoch may not be over. Not enough ints in final subgroup of final epoch + deltat = [26., 25., 24., 23., 22., 4., 3., 2., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s+0.1 for s in start_times] + threshold = 6. # integrations + integration_list = [3, 3, 2, 2, 2, 1, 3, 3, 2, 2] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits', 'file_4.fits'], + ['file_5.fits', 'file_6.fits', 'file_7.fits'] + ] + test3= (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [40., 39., 38., 37., 36., 18., 17., 16., 15., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s+0.1 for s in start_times] + threshold = 5. # integrations + integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits', 'file_4.fits'], + ['file_5.fits', 'file_6.fits', 'file_7.fits', 'file_8.fits'] + ] + test4 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [40., 39., 38., 37., 36., 18., 17., 16., 15., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s+0.1 for s in start_times] + threshold = 6. # integrations + integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits', 'file_4.fits'], + ['file_5.fits', 'file_6.fits', 'file_7.fits', 'file_8.fits'] + ] + test5 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [9., 8., 7., 6., 5., 4., 3., 2., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s+0.1 for s in start_times] + integration_list = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + threshold = 6 + expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits']] + test6 = (files, start_times, end_times, integration_list, threshold, expected) + + threshold = 9 + expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits', + 'file_6.fits', 'file_7.fits', 'file_8.fits']] + test7 = (files, start_times, end_times, integration_list, threshold, expected) + + integration_list = [1] * len(start_times) + threshold = 10 + expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits', + 'file_6.fits', 'file_7.fits', 'file_8.fits', 'file_9.fits'] + ] + test8 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [23., 22., 21., 20., 19., 18., 17., 16., 15., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s+0.1 for s in start_times] + integration_list = [1] * len(start_times) + threshold = 10 + expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits', + 'file_6.fits', 'file_7.fits', 'file_8.fits'] + ] + test9 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [9., 8., 7., 6., 5., 4., 3., 2., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s+0.1 for s in start_times] + integration_list = [1] * len(start_times) + threshold = 10 + expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits', + 'file_6.fits', 'file_7.fits', 'file_8.fits', 'file_9.fits'] + ] + test10 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [9., 8., 7., 6., 5., 4., 3., 2., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s+0.1 for s in start_times] + integration_list = [1] * len(start_times) + threshold = 11 + expected = [] + test11 = (files, start_times, end_times, integration_list, threshold, expected) + + return [test1, test2, test3, test4, test5, test6, test7, test8, test9, test10, test11] + + def test_find_hot_dead_pixels(): """Test hot and dead pixel searches""" monitor = dark_monitor.Dark() @@ -138,6 +250,15 @@ def test_shift_to_full_frame(): assert np.all(new_coords[1] == np.array([518, 515])) +@pytest.mark.parametrize("files,start_times,end_times,integration_list,threshold,expected", generate_data_for_file_splitting_test()) +def test_split_files_into_sub_lists(files, start_times, end_times, integration_list, threshold, expected): + """Test that file lists are appropriately split into subgroups for separate monitor runs""" + d = dark_monitor.Dark() + d.instrument = 'nircam' + d.split_files_into_sub_lists(files, start_times, end_times, integration_list, threshold) + assert d.file_batches == expected + + @pytest.mark.skipif(not has_test_db(), reason='Modifies test database.') def test_add_bad_pix(): coord = ([1, 2, 3], [4, 5, 6]) From 51a550807de10bf9803af633c051895ee7497997 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Tue, 2 May 2023 14:23:56 -0400 Subject: [PATCH 08/38] Fix final epoch/subgroup. Tests all passing. --- .../common_monitors/dark_monitor.py | 28 +++++++++++++++---- jwql/tests/test_dark_monitor.py | 25 ++++++++++++++++- 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 314a510af..756d66fff 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -1266,15 +1266,16 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # Calculate how many subgroups to break up the batch into, # based on the threshold, and under the assumption that we # don't want to skip running on any of the files. - n_subgroups = int(batch_int_sum / threshold) + n_subgroups = int(np.ceil(batch_int_sum / threshold)) print('n_subgroups', n_subgroups) + print(batch_int_sum, threshold) if n_subgroups == 0: - + print('IF N_SUBGROUPS USES NP.CEIL THEN IT IS NOT POSSIBLE TO HAVE N_SUBGROUPS == 0') print('i and len(dividers)-1:', i, len(dividers) - 1, dividers) @@ -1290,12 +1291,12 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ self.end_time_batches.append(batch_end_times) self.integration_batches.append(batch_ints) else: + print('do we need a smarter if statment, like the line commented below?') #if (i == len(dividers) - 1) and (batchnum == (n_subgroups - 1)) # In this case, we are in the final epoch division AND we do not # have enough integrations to subdivide the data. So we'll skip # this data and wait for a future run of the monitor to bundle # it with more, new data. - print('subgroup 0 in final epoch does not have enough ints, and the final delta t is too small. skipping.') pass #elif n_subgroups == 1: @@ -1349,11 +1350,28 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # final subgroup of the final epoch. In that case, we don't know # if more data are coming soon that may be able to be combined. So # in that case, we ignore the files for this run of the monitor. - if (i == len(dividers) - 1) and (batchnum == (n_subgroups - 1)): + if (i == len(dividers) - 2) and (batchnum == (n_subgroups - 1)): # Here we are in the final subgroup of the final epoch, where we # mayb not necessarily know if there will be future data to combine # with these data - pass + + #Here..... we do not know for sure the epoch is over? Confirm that we do not know this. + #If that is true, we can still check to see if we have reached the threshold number of + #integrations and run if so. + #print('final subgroup of final epoch. if the epoch is not over, so skipping files') + + if np.sum(subgroup_ints) >= threshold: + print('ADDED - final subgroup of final epoch') + self.file_batches.append(subgroup_files) + self.start_time_batches.append(subgroup_start_times) + self.end_time_batches.append(subgroup_end_times) + self.integration_batches.append(subgroup_ints) + else: + # Here the final subgroup does not have enough integrations to reach the threshold + # and we're not sure if the epoch is complete, so we skip these files and save them + # for a future dark monitor run + pass + else: #if (i < len(dividers) - 1) and (batchnum < (n_subgroups - 1)): print('ADDED') diff --git a/jwql/tests/test_dark_monitor.py b/jwql/tests/test_dark_monitor.py index 8ba0a7b49..f06d1002b 100644 --- a/jwql/tests/test_dark_monitor.py +++ b/jwql/tests/test_dark_monitor.py @@ -142,7 +142,30 @@ def generate_data_for_file_splitting_test(): expected = [] test11 = (files, start_times, end_times, integration_list, threshold, expected) - return [test1, test2, test3, test4, test5, test6, test7, test8, test9, test10, test11] + deltat = [40., 39., 38., 37., 24., 23., 22., 21., 1., 0.] + start_times = [now - dt for dt in deltat] + end_times = [s+0.1 for s in start_times] + threshold = 6 # integrations + integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits'], + ['file_4.fits', 'file_5.fits', 'file_6.fits', 'file_7.fits'] + ] + test12 = (files, start_times, end_times, integration_list, threshold, expected) + + deltat = [50., 49., 48., 47., 34., 33., 32., 31., 20., 19.] + start_times = [now - dt for dt in deltat] + end_times = [s+0.1 for s in start_times] + threshold = 6 # integrations + integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] + expected = [['file_0.fits', 'file_1.fits'], + ['file_2.fits', 'file_3.fits'], + ['file_4.fits', 'file_5.fits', 'file_6.fits', 'file_7.fits'], + ['file_8.fits', 'file_9.fits'] + ] + test13 = (files, start_times, end_times, integration_list, threshold, expected) + + return [test1, test2, test3, test4, test5, test6, test7, test8, test9, test10, test11, test12, test13] def test_find_hot_dead_pixels(): From 3dfde30795532cd522a73237076e95b2c4bac506 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Tue, 2 May 2023 14:31:46 -0400 Subject: [PATCH 09/38] Set start and end time for each batch of files --- jwql/instrument_monitors/common_monitors/dark_monitor.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 756d66fff..b996d65b3 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -1054,6 +1054,10 @@ def run(self): # Run the dark monitor self.process(dark_files) + # Get the starting and ending time of the files in this monitor run + batch_start_time = np.min(np.array(batch_start_time)) + batch_end_time = np.max(np.array(batch_end_time)) + # Update the query history once for each group of files new_entry = {'instrument': instrument, 'aperture': aperture, From 5fa41692b6c75157d463c6a5b456dd3a3f3bac72 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Tue, 2 May 2023 14:47:06 -0400 Subject: [PATCH 10/38] Turn off analysis and db updates, for testing --- .../common_monitors/dark_monitor.py | 62 ++++++++++++++++--- 1 file changed, 52 insertions(+), 10 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index b996d65b3..c50d8d99d 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -960,7 +960,18 @@ def run(self): logging.info('\tWorking on readout pattern: {}'.format(self.readpatt)) # Locate the record of the most recent MAST search - self.query_start = self.most_recent_search() + #self.query_start = self.most_recent_search() + + + + + logging.info('SETTING SELF.QUERY_START TO 59500 (PRE-LAUNCH) FOR TESTING.') + self.query_start = 59500. + + + + + logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end)) # Query MAST using the aperture and the time of the @@ -1051,8 +1062,15 @@ def run(self): logging.info('\tCopied to working dir: {}'.format(dark_files)) logging.info('\tNot copied: {}'.format(not_copied)) + + + # Run the dark monitor - self.process(dark_files) + #self.process(dark_files) + logging.info('HERE IS WHERE THE MONITOR WOULD RUN ON THE GIVEN BATCH OF FILES. THIS IS TURNED OFF FOR TESTING.') + + + # Get the starting and ending time of the files in this monitor run batch_start_time = np.min(np.array(batch_start_time)) @@ -1067,10 +1085,21 @@ def run(self): 'files_found': len(dark_files), 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - with engine.begin() as connection: - connection.execute( - self.query_table.__table__.insert(), new_entry) - logging.info('\tUpdated the query history table') + + + + #with engine.begin() as connection: + # connection.execute( + # self.query_table.__table__.insert(), new_entry) + #logging.info('\tUpdated the query history table') + logging.info('NEW ENTRY: ') + logging.info(new_entry) + logging.info('TURNED OFF DATABASE UPDATES DURING TESTING') + + + + + else: logging.info(f'\tThis is below the threshold of {integration_count_threshold} integrations. Monitor not run.') monitor_run = False @@ -1084,10 +1113,23 @@ def run(self): 'files_found': len(new_entries), 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - with engine.begin() as connection: - connection.execute( - self.query_table.__table__.insert(), new_entry) - logging.info('\tUpdated the query history table') + + + + + #with engine.begin() as connection: + # connection.execute( + # self.query_table.__table__.insert(), new_entry) + #logging.info('\tUpdated the query history table') + logging.info('NEW ENTRY: ') + logging.info(new_entry) + logging.info('TURNED OFF DATABASE UPDATES DURING TESTING') + + + + + + logging.info('Dark Monitor completed successfully.') From 7f9ce6e9f51a60abc4f1b2d10445dc643f1405fb Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Tue, 2 May 2023 14:48:42 -0400 Subject: [PATCH 11/38] Turn off file copying, for testing --- .../common_monitors/dark_monitor.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index c50d8d99d..5b34e7927 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -1055,7 +1055,18 @@ def run(self): # Run the monitor once on each list for new_file_list, batch_start_time, batch_end_time in zip(self.file_batches, self.start_time_batches, self.end_time_batches): # Copy files from filesystem - dark_files, not_copied = copy_files(new_file_list, self.data_dir) + + + + + #dark_files, not_copied = copy_files(new_file_list, self.data_dir) + # Fake dark_files and not_copied, for testing + dark_files = new_file_list + not_copied = [] + + + + logging.info('\tNew_filenames: {}'.format(new_file_list)) logging.info('\tData dir: {}'.format(self.data_dir)) From fc4af15e88d1740332b30bb592b68baa30fdc67a Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Tue, 2 May 2023 16:34:16 -0400 Subject: [PATCH 12/38] Put dark file query results in chronological order --- .../common_monitors/dark_monitor.py | 33 +++++++++++++++++-- jwql/utils/monitor_utils.py | 5 +++ 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 5b34e7927..d9510f71c 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -978,6 +978,12 @@ def run(self): # most recent previous search as the starting time new_entries = monitor_utils.mast_query_darks(instrument, aperture, self.query_start, self.query_end, readpatt=self.readpatt) + + + looks like the files above are not returned in chronological order. We need to do this somewhere. Maybe + in the function above since the other monitors probably also assume the files are in order. + + # Exclude ASIC tuning data len_new_darks = len(new_entries) new_entries = monitor_utils.exclude_asic_tuning(new_entries) @@ -1033,8 +1039,20 @@ def run(self): # Check to see if there are enough new integrations to meet the # monitor's signal-to-noise requirements logging.info((f'\tFilesystem search for new dark integrations for {self.instrument}, {self.aperture}, ' - f'{self.readpatt} has found {total_integrations} in {len(new_filenames)} files.')) + f'{self.readpatt} has found {total_integrations} integrations spread across {len(new_filenames)} files.')) if total_integrations >= integration_count_threshold: + + + + # for testing + logging.info('FULL BATCH STARTING TIMES:') + logging.info(starting_times) + logging.info('ENDING TIMES:') + logging.info(ending_times) + + + + logging.info(f'\tThis meets the threshold of {integration_count_threshold} integrations.') monitor_run = True @@ -1050,7 +1068,7 @@ def run(self): # in order to produce results with roughly the same signal-to-noise. This # also prevents the monitor running on a huge chunk of files in the case # where it hasn't been run in a while and data have piled up in the meantime. - self.split_files_into_sub_lists(new_filenames, integrations, starting_times, ending_times, integration_count_threshold) + self.split_files_into_sub_lists(new_filenames, starting_times, ending_times, integrations, integration_count_threshold) # Run the monitor once on each list for new_file_list, batch_start_time, batch_end_time in zip(self.file_batches, self.start_time_batches, self.end_time_batches): @@ -1075,6 +1093,15 @@ def run(self): + # for testing + logging.info('STARTING TIMES FOR BATCH:') + logging.info(batch_start_time) + logging.info('ENDING TIMES FOR BATCH:') + logging.info(batch_end_time) + + + + # Run the dark monitor #self.process(dark_files) @@ -1091,7 +1118,7 @@ def run(self): new_entry = {'instrument': instrument, 'aperture': aperture, 'readpattern': self.readpatt, - 'start_time_mjd': batch_start_time, + 'start_time_mjd': batch_start_time, #-- something is wrong here. Seeing 0.0 and 2.0 in testing on server 'end_time_mjd': batch_end_time, 'files_found': len(dark_files), 'run_monitor': monitor_run, diff --git a/jwql/utils/monitor_utils.py b/jwql/utils/monitor_utils.py index f9258e15f..d95389eaf 100644 --- a/jwql/utils/monitor_utils.py +++ b/jwql/utils/monitor_utils.py @@ -143,6 +143,11 @@ def mast_query_darks(instrument, aperture, start_date, end_date, readpatt=None): if len(query['data']) > 0: query_results.extend(query['data']) + # Put the file entries in chronological order + expstarts = [e['expstart'] for e in query_results] + idx = np.argsort(expstarts) + query_results = list(np.array(query_results)[idx]) + return query_results From 6c3f97f553f92ad50a88e12e709a08ac236603b5 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Tue, 2 May 2023 16:43:11 -0400 Subject: [PATCH 13/38] missing import. --- jwql/instrument_monitors/common_monitors/dark_monitor.py | 6 ------ jwql/utils/monitor_utils.py | 2 +- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index d9510f71c..a0fb593b6 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -978,12 +978,6 @@ def run(self): # most recent previous search as the starting time new_entries = monitor_utils.mast_query_darks(instrument, aperture, self.query_start, self.query_end, readpatt=self.readpatt) - - - looks like the files above are not returned in chronological order. We need to do this somewhere. Maybe - in the function above since the other monitors probably also assume the files are in order. - - # Exclude ASIC tuning data len_new_darks = len(new_entries) new_entries = monitor_utils.exclude_asic_tuning(new_entries) diff --git a/jwql/utils/monitor_utils.py b/jwql/utils/monitor_utils.py index d95389eaf..e47c9fd24 100644 --- a/jwql/utils/monitor_utils.py +++ b/jwql/utils/monitor_utils.py @@ -19,7 +19,7 @@ import datetime import os from astroquery.mast import Mast, Observations - +import numpy as np from jwql.database.database_interface import Monitor, engine from jwql.jwql_monitors import monitor_mast From 57c0f283301abf6387134a1f2df2aadbbf246d0e Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 3 May 2023 11:20:39 -0400 Subject: [PATCH 14/38] allow monitor to run on apertures not defined in pysiaf --- jwql/instrument_monitors/common_monitors/dark_monitor.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index a0fb593b6..3e4a7fc02 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -1016,7 +1016,9 @@ def run(self): xsize = hdulist[0].header['SUBSIZE1'] ysize = hdulist[0].header['SUBSIZE2'] nints = hdulist[0].header['NINTS'] - if xsize == expected_xsize and ysize == expected_ysize: + # If the array size matches expectataions, or if Siaf doesn't give an expected size, then + # keep the file. + if ((xsize == expected_xsize and ysize == expected_ysize) or expected_xsize is None or expected_ysize is None): temp_filenames.append(new_file) total_integrations += int(nints) integrations.append(int(nints) - self.skipped_initial_ints) @@ -1065,7 +1067,7 @@ def run(self): self.split_files_into_sub_lists(new_filenames, starting_times, ending_times, integrations, integration_count_threshold) # Run the monitor once on each list - for new_file_list, batch_start_time, batch_end_time in zip(self.file_batches, self.start_time_batches, self.end_time_batches): + for new_file_list, batch_start_time, batch_end_time, batch_integrations in zip(self.file_batches, self.start_time_batches, self.end_time_batches, self.integration_batches): # Copy files from filesystem @@ -1092,6 +1094,8 @@ def run(self): logging.info(batch_start_time) logging.info('ENDING TIMES FOR BATCH:') logging.info(batch_end_time) + logging.info('INTEGRATIONS FOR BATCH:') + logging.info(batch_integrations) From 30fef8d9431b40c1b452d91186b84417ebbe4250 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 3 May 2023 11:45:19 -0400 Subject: [PATCH 15/38] tweaks for ignoring the initial integration --- jwql/instrument_monitors/common_monitors/dark_monitor.py | 2 +- jwql/instrument_monitors/pipeline_tools.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 3e4a7fc02..53073bb57 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -1331,7 +1331,7 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # Loop over epochs. # Within each batch, divide up the integrations into multiple batches if the total - # number of integrations are above 2*threshold + # number of integrations are above 2*threshold. for i in range(len(dividers) - 1): batch_ints = integration_list[dividers[i]:dividers[i+1]] batch_files = files[dividers[i]:dividers[i+1]] diff --git a/jwql/instrument_monitors/pipeline_tools.py b/jwql/instrument_monitors/pipeline_tools.py index 4e7142128..e554816bd 100644 --- a/jwql/instrument_monitors/pipeline_tools.py +++ b/jwql/instrument_monitors/pipeline_tools.py @@ -198,7 +198,7 @@ def get_pipeline_steps(instrument): def image_stack(file_list, skipped_initial_ints=0): - """Given a list of fits files containing 2D images, read in all data + """Given a list of fits files containing 2D or 3D images, read in all data and place into a 3D stack Parameters @@ -209,7 +209,9 @@ def image_stack(file_list, skipped_initial_ints=0): skipped_initial_ints : int Number of initial integrations from each file to skip over and not include in the stack. Only works with files containing 3D - arrays (e.g. rateints files). + arrays (e.g. rateints files). This is primarily for MIRI, where + we want to skip the first N integrations due to dark current + instability. Returns ------- From 9bcaf0a5b3ccc7f7ebdb35a1613342b068fc6ef0 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 3 May 2023 12:49:10 -0400 Subject: [PATCH 16/38] trivial --- jwql/instrument_monitors/common_monitors/dark_monitor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 53073bb57..2742061a5 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -745,6 +745,7 @@ def process(self, file_list): # Read in all slope images and create a stack of ints (from rateints files) # or mean ints (from rate files) slope_image_stack, slope_exptimes = pipeline_tools.image_stack(slope_files, skipped_initial_ints=self.skipped_initial_ints) + logging.info(f'Shape of slope image stack: {slope_image_stack.shape}') # Calculate a mean slope image from the inputs slope_image, stdev_image = calculations.mean_image(slope_image_stack, sigma_threshold=3) From 9f00339bc289b4b797a87ac1e61fc1e734b618d0 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Tue, 19 Dec 2023 16:38:07 -0500 Subject: [PATCH 17/38] Group files by time and total integrations --- .../common_monitors/dark_monitor.py | 309 +++++++++++++----- .../dark_monitor_file_thresholds.txt | 22 +- 2 files changed, 242 insertions(+), 89 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 2742061a5..74ff1dab1 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -705,7 +705,7 @@ def process(self, file_list): pipeline_files = [] slope_files = [] for filename in file_list: - logging.info('\tWorking on file: {}'.format(filename)) + logging.info(f'\tWorking on file: {filename}') rate_file = filename.replace("dark", output_suffix) rate_file_name = os.path.basename(rate_file) @@ -728,7 +728,7 @@ def process(self, file_list): os.remove(filename) obs_times = [] - logging.info('\tSlope images to use in the dark monitor for {}, {}:'.format(self.instrument, self.aperture)) + logging.info(f'\tSlope images to use in the dark monitor for {self.instrument}, {self.aperture}:') for item in slope_files: logging.info('\t\t{}'.format(item)) # Get the observation time for each file @@ -923,7 +923,7 @@ def run(self): self.query_end = Time.now().mjd # Loop over all instruments - for instrument in JWST_INSTRUMENT_NAMES: + for instrument in ['miri']: #JWST_INSTRUMENT_NAMES: self.instrument = instrument # Identify which database tables to use @@ -938,7 +938,7 @@ def run(self): for aperture in possible_apertures: logging.info('') - logging.info('Working on aperture {} in {}'.format(aperture, instrument)) + logging.info(f'Working on aperture {aperture} in {instrument}') # Find appropriate threshold for the number of new files needed match = aperture == limits['Aperture'] @@ -958,7 +958,7 @@ def run(self): # We need a separate search for each readout pattern for readpatt in possible_readpatts: self.readpatt = readpatt - logging.info('\tWorking on readout pattern: {}'.format(self.readpatt)) + logging.info(f'\tWorking on readout pattern: {self.readpatt}') # Locate the record of the most recent MAST search #self.query_start = self.most_recent_search() @@ -973,21 +973,21 @@ def run(self): - logging.info('\tQuery times: {} {}'.format(self.query_start, self.query_end)) + logging.info(f'\tQuery times: {self.query_start} {self.query_end}') # Query MAST using the aperture and the time of the # most recent previous search as the starting time - new_entries = monitor_utils.mast_query_darks(instrument, aperture, self.query_start, self.query_end, readpatt=self.readpatt) + new_entries = monitor_utils.mast_query_darks(instrument, aperture, self.query_start, + self.query_end, readpatt=self.readpatt) # Exclude ASIC tuning data len_new_darks = len(new_entries) new_entries = monitor_utils.exclude_asic_tuning(new_entries) len_no_asic = len(new_entries) num_asic = len_new_darks - len_no_asic - logging.info("\tFiltering out ASIC tuning files removed {} dark files.".format(num_asic)) + logging.info(f"\tFiltering out ASIC tuning files. Removed {num_asic} dark files.") - logging.info('\tAperture: {}, Readpattern: {}, new entries: {}'.format(self.aperture, self.readpatt, - len(new_entries))) + logging.info(f'\tAperture: {self.aperture}, Readpattern: {self.readpatt}, new entries: {len(new_entries)}') # Get full paths to the files new_filenames = [] @@ -995,8 +995,7 @@ def run(self): try: new_filenames.append(filesystem_path(file_entry['filename'])) except FileNotFoundError: - logging.warning('\t\tUnable to locate {} in filesystem. Not including in processing.' - .format(file_entry['filename'])) + logging.warning(f"\t\tUnable to locate {file_entry['filename']} in filesystem. Not including in processing.") # Generate a count of the total number of integrations across the files. This number will # be compared to the threshold value to determine if the monitor is run. @@ -1018,8 +1017,10 @@ def run(self): ysize = hdulist[0].header['SUBSIZE2'] nints = hdulist[0].header['NINTS'] # If the array size matches expectataions, or if Siaf doesn't give an expected size, then - # keep the file. - if ((xsize == expected_xsize and ysize == expected_ysize) or expected_xsize is None or expected_ysize is None): + # keep the file. Also, make sure there is at leasat one integration, after ignoring any user-input + # number of integrations. + keep_ints = int(nints) - self.skipped_initial_ints + if ((keep_ints > 0) and ((xsize == expected_xsize and ysize == expected_ysize) or expected_xsize is None or expected_ysize is None)): temp_filenames.append(new_file) total_integrations += int(nints) integrations.append(int(nints) - self.skipped_initial_ints) @@ -1299,6 +1300,19 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ #for a future run, where the final delta_t is long enough that we can assume #that epoch of the cal program has completed. + + + print('Splitting into sub-lists. Inputs at the beginning:') + print(files) + print(start_times) + print(end_times) + print(integration_list) + print(threshold) + print('\n') + + + + # Eventual return parameters self.file_batches = [] self.start_time_batches = [] @@ -1306,7 +1320,8 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ self.integration_batches = [] # Add the current time onto the end of start_times - start_times = np.append(start_times, Time.now().mjd) + #start_times = np.append(start_times, Time.now().mjd) + start_times = np.array(start_times) # Get the delta t between each pair of files. Insert 0 as the initial # delta_t, to make the coding easier @@ -1317,30 +1332,37 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # dark current during each "epoch" within a calibration proposal dividers = np.where(delta_t >= DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME[self.instrument])[0] + + print('Initial dividers: ', dividers) + + # Add dividers at the beginning index to make the coding easier dividers = np.insert(dividers, 0, 0) + print('Prepend zero to dividers: ', dividers) + # If there is no divider at the end of the list of files, then add one if dividers[-1] < len(delta_t): dividers = np.insert(dividers, len(dividers), len(delta_t)) print('delta_t', delta_t) - print('dividers:', dividers) - print('threshold:', threshold) + print('Final dividers (divide data based on time gaps between files):', dividers) + print('threshold (number of integrations):', threshold) + print('\n') # Loop over epochs. - # Within each batch, divide up the integrations into multiple batches if the total + # Within each batch, divide up the exposures into multiple batches if the total # number of integrations are above 2*threshold. - for i in range(len(dividers) - 1): + for i in range(len(dividers) - 1): # should this be len(dividers)-2??? we seem to be ending with empty results in the final loop batch_ints = integration_list[dividers[i]:dividers[i+1]] batch_files = files[dividers[i]:dividers[i+1]] batch_start_times = start_times[dividers[i]:dividers[i+1]] batch_end_times = end_times[dividers[i]:dividers[i+1]] batch_int_sum = np.sum(batch_ints) - + print(f'Loop over time-based batches. Working on batch {i}') print('batch_ints', batch_ints) print('batch_files', batch_files) @@ -1349,16 +1371,36 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # Calculate how many subgroups to break up the batch into, # based on the threshold, and under the assumption that we # don't want to skip running on any of the files. - n_subgroups = int(np.ceil(batch_int_sum / threshold)) + #n_subgroups = int(np.ceil(batch_int_sum / threshold)) - don't use this + + # Don't create mulitple subgroups for a single file. Treat the exposure as the + # base unit. + #intsum = 0 + #for batch_int, batch_file, start_time, end_time in zip(batch_ints, batch_files, batch_start_times, batch_end_times): + # intsum += batch_int + # if intsum >= + + + + #print('n_subgroups (based on number of integrations vs threshold)', n_subgroups) + #print('total number of integs in the batch: ', batch_int_sum) + #print('integ-based threshold to use: ', threshold) + - print('n_subgroups', n_subgroups) - print(batch_int_sum, threshold) + ## FOR TESTING + #n_subgroups = 1 # eventually n_subgroups should go away and the if else block below can + # be replaced by a single block of code that does not rely on n_subgroups + + + + """ if n_subgroups == 0: print('IF N_SUBGROUPS USES NP.CEIL THEN IT IS NOT POSSIBLE TO HAVE N_SUBGROUPS == 0') + stop print('i and len(dividers)-1:', i, len(dividers) - 1, dividers) @@ -1391,85 +1433,196 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # self.integration_batches.append(batch_ints) elif n_subgroups >= 1: + """ # Here there are enough integrations to meet the threshold, # or possibly enough to break the batch up # into more than one subgroup. We can't split within a file, # so we split after the file that gets the total number of # integrations above the threshold. - # Calculate the total number of integrations up to each file - batch_int_sums = np.array([ np.sum(batch_ints[0:jj]) for jj in range(1, len(batch_ints)) ]) - base = 0 - startidx = 0 - endidx = 0 - complete = False - for batchnum in range(n_subgroups): - endidx = np.where(batch_int_sums >= (base + threshold))[0] - # Check if we reach the end of the file list - if len(endidx) == 0: - endidx = len(batch_int_sums) + + """ + ###### Potential replacement for the 'for batchnum' loop below + ###### Potential replacement for the 'for batchnum' loop below + ###### Potential replacement for the 'for batchnum' loop below + startidx = 0 + working_batch_ints = deepcopy(batch_ints) + + + + # still need an exiting condition below.... + while True: + + batch_int_sums = np.array([np.sum(working_batch_ints[0:jj]) for jj in range(1, len(working_batch_ints) + 1)]) + ints_group = batch_int_sums // threshold + endidx = np.where(working_batch_ints > 0)[0] + + # Check if we reach the end of the file list + if len(endidx) == 0: + endidx = len(batch_ints) - 1 + complete = True + else: + endidx = endidx[0] + + subgroup_ints = batch_ints[startidx: endidx + 1] + subgroup_files = batch_files[startidx: endidx + 1] + subgroup_start_times = batch_start_times[startidx: endidx + 1] + subgroup_end_times = batch_end_times[startidx: endidx + 1] + subgroup_int_sum = np.sum(subgroup_ints) + + if (i == len(dividers) - 2) and endidx == len(batch_files) - 1: + if np.sum(subgroup_ints) >= threshold: + print('ADDED - final subgroup of final epoch') + self.file_batches.append(subgroup_files) + self.start_time_batches.append(subgroup_start_times) + self.end_time_batches.append(subgroup_end_times) + self.integration_batches.append(subgroup_ints) + else: + # Here the final subgroup does not have enough integrations to reach the threshold + # and we're not sure if the epoch is complete, so we skip these files and save them + # for a future dark monitor run + pass + + else: + #if (i < len(dividers) - 1) and (batchnum < (n_subgroups - 1)): + print('ADDED') + self.file_batches.append(subgroup_files) + self.start_time_batches.append(subgroup_start_times) + self.end_time_batches.append(subgroup_end_times) + self.integration_batches.append(subgroup_ints) + + if not complete: + startidx = deepcopy(endidx + 1) + base = batch_int_sums[endidx] + else: + # If we reach the end of the list before the expected number of + # subgroups, then we quit. + break + + + + ###### Potential replacement for the 'for batchnum' loop below + ###### Potential replacement for the 'for batchnum' loop below + ###### Potential replacement for the 'for batchnum' loop below + """ + + + + + + + + # Calculate the total number of integrations up to each file + batch_int_sums = np.array([np.sum(batch_ints[0:jj]) for jj in range(1, len(batch_ints) + 1)]) + + print('batch_int_sums: ', batch_int_sums) + + base = 0 + startidx = 0 + endidx = 0 + complete = False + #for batchnum in range(n_subgroups): - just need to fix this loop since we don't know n_subgroups ahead of time + #for batchnum in range(len(batch_files)): # worst case - each file is its own batch... change batchnum to filenum, in order to make things easier to interpret + + + while True: # this instead of "for batchnum" makes more sense + + + endidx = np.where(batch_int_sums >= (base + threshold))[0] + + print('startidx: ', startidx) + print('endidx: ', endidx) + + # Check if we reach the end of the file list + if len(endidx) == 0: + endidx = len(batch_int_sums) - 1 + complete = True + else: + endidx = endidx[0] + if endidx == (len(batch_int_sums) - 1): complete = True - else: - endidx = endidx[0] - subgroup_ints = batch_ints[startidx: endidx + 1] - subgroup_files = batch_files[startidx: endidx + 1] - subgroup_start_times = batch_start_times[startidx: endidx + 1] - subgroup_end_times = batch_end_times[startidx: endidx + 1] - subgroup_int_sum = np.sum(subgroup_ints) + print('startidx: ', startidx) + print('endidx: ', endidx) + print('complete: ', complete) + subgroup_ints = batch_ints[startidx: endidx + 1] + subgroup_files = batch_files[startidx: endidx + 1] + subgroup_start_times = batch_start_times[startidx: endidx + 1] + subgroup_end_times = batch_end_times[startidx: endidx + 1] + subgroup_int_sum = np.sum(subgroup_ints) - print('batchnum: ', batchnum) - print(batch_ints[startidx: endidx + 1]) - print(batch_files[startidx: endidx + 1]) - print(i, len(dividers) - 1, batchnum, n_subgroups-1) + print('subgroup_ints: ', subgroup_ints) + print('subgroup_files: ', subgroup_files) + print('subgroup_int_sum: ', subgroup_int_sum) + #print('batchnum: ', batchnum) + #print(batch_ints[startidx: endidx + 1]) + #print(batch_files[startidx: endidx + 1]) + #print(i, len(dividers) - 1, batchnum, n_subgroups-1) - # Add to output lists. The exception is if we are in the - # final subgroup of the final epoch. In that case, we don't know - # if more data are coming soon that may be able to be combined. So - # in that case, we ignore the files for this run of the monitor. - if (i == len(dividers) - 2) and (batchnum == (n_subgroups - 1)): - # Here we are in the final subgroup of the final epoch, where we - # mayb not necessarily know if there will be future data to combine - # with these data - #Here..... we do not know for sure the epoch is over? Confirm that we do not know this. - #If that is true, we can still check to see if we have reached the threshold number of - #integrations and run if so. - #print('final subgroup of final epoch. if the epoch is not over, so skipping files') - if np.sum(subgroup_ints) >= threshold: - print('ADDED - final subgroup of final epoch') - self.file_batches.append(subgroup_files) - self.start_time_batches.append(subgroup_start_times) - self.end_time_batches.append(subgroup_end_times) - self.integration_batches.append(subgroup_ints) - else: - # Here the final subgroup does not have enough integrations to reach the threshold - # and we're not sure if the epoch is complete, so we skip these files and save them - # for a future dark monitor run - pass + # Add to output lists. The exception is if we are in the + # final subgroup of the final epoch. In that case, we don't know + # if more data are coming soon that may be able to be combined. So + # in that case, we ignore the files for this run of the monitor. + #if (i == len(dividers) - 2) and (batchnum == (n_subgroups - 1)): + if (i == len(dividers) - 2) and endidx == len(batch_files) - 1: + # Here we are in the final subgroup of the final epoch, where we + # mayb not necessarily know if there will be future data to combine + # with these data - else: - #if (i < len(dividers) - 1) and (batchnum < (n_subgroups - 1)): - print('ADDED') + #Here..... we do not know for sure the epoch is over? Confirm that we do not know this. + #If that is true, we can still check to see if we have reached the threshold number of + #integrations and run if so. + #print('final subgroup of final epoch. if the epoch is not over, so skipping files') + + print('should be final epoch and final subgroup. epoch number: ', i) + + if np.sum(subgroup_ints) >= threshold: + print('ADDED - final subgroup of final epoch') self.file_batches.append(subgroup_files) self.start_time_batches.append(subgroup_start_times) self.end_time_batches.append(subgroup_end_times) self.integration_batches.append(subgroup_ints) - - if not complete: - startidx = deepcopy(endidx + 1) - base = batch_int_sums[endidx] else: - # If we reach the end of the list before the expected number of - # subgroups, then we quit. - break + # Here the final subgroup does not have enough integrations to reach the threshold + # and we're not sure if the epoch is complete, so we skip these files and save them + # for a future dark monitor run + pass + + else: + #if (i < len(dividers) - 1) and (batchnum < (n_subgroups - 1)): + print('Not the final epoch, and not the final subgroup') + print('ADDED') + self.file_batches.append(subgroup_files) + self.start_time_batches.append(subgroup_start_times) + self.end_time_batches.append(subgroup_end_times) + self.integration_batches.append(subgroup_ints) + + if not complete: + startidx = deepcopy(endidx + 1) + base = batch_int_sums[endidx] + else: + # If we reach the end of the list before the expected number of + # subgroups, then we quit. + break + + + + print('Epoch number: ', i) + print('batch_files: ', batch_files) + print('batch_ints: ', batch_ints) + print('self.file_batches: ', self.file_batches) + print('self.integration_batches: ', self.integration_batches) + print('threshold: ', threshold) + print('DONE WITH SUBGROUPS\n\n\n\n') + def stats_by_amp(self, image, amps): """Calculate statistics in the input image for each amplifier as diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt b/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt index 22e49f0e8..f82f20810 100644 --- a/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt +++ b/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt @@ -11,17 +11,17 @@ nircam NRCB4_FULL_OSS 10 0 nircam NRCB5_FULL_OSS 10 0 nircam NRCALL_FULL 10 0 nircam NRCAS_FULL 10 0 -nircam NRCA1_FULL 10 0 -nircam NRCA2_FULL 10 0 -nircam NRCA3_FULL 10 0 -nircam NRCA4_FULL 10 0 -nircam NRCA5_FULL 10 0 -nircam NRCBS_FULL 10 0 -nircam NRCB1_FULL 10 0 -nircam NRCB2_FULL 10 0 -nircam NRCB3_FULL 10 0 -nircam NRCB4_FULL 10 0 -nircam NRCB5_FULL 10 0 +nircam NRCA1_FULL 2 0 +nircam NRCA2_FULL 2 0 +nircam NRCA3_FULL 2 0 +nircam NRCA4_FULL 2 0 +nircam NRCA5_FULL 2 0 +nircam NRCBS_FULL 2 0 +nircam NRCB1_FULL 2 0 +nircam NRCB2_FULL 2 0 +nircam NRCB3_FULL 2 0 +nircam NRCB4_FULL 2 0 +nircam NRCB5_FULL 2 0 nircam NRCB1_FULLP 10 0 nircam NRCB5_FULLP 10 0 nircam NRCA1_SUB160 30 0 From 6ac77ebcde9f5f2d8843dd2b61f84682b37087f2 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 20 Dec 2023 12:38:23 -0500 Subject: [PATCH 18/38] Turn db additions back on. Limit apertures that it runs on --- .../common_monitors/dark_monitor.py | 145 ++-- .../dark_monitor_file_thresholds.txt | 709 +++--------------- jwql/shared_tasks/shared_tasks.py | 34 +- .../jwql/monitor_pages/monitor_dark_bokeh.py | 15 +- 4 files changed, 176 insertions(+), 727 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 74ff1dab1..a795ba261 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -923,15 +923,20 @@ def run(self): self.query_end = Time.now().mjd # Loop over all instruments - for instrument in ['miri']: #JWST_INSTRUMENT_NAMES: + for instrument in JWST_INSTRUMENT_NAMES: self.instrument = instrument + logging.info(f'\n\nWorking on {instrument}') # Identify which database tables to use self.identify_tables() # Get a list of all possible apertures from pysiaf - possible_apertures = list(Siaf(instrument).apernames) - possible_apertures = [ap for ap in possible_apertures if ap not in apertures_to_skip] + #possible_apertures = list(Siaf(instrument).apernames) + #possible_apertures = [ap for ap in possible_apertures if ap not in apertures_to_skip] + + # Run the monitor only on the apertures listed in the threshold file. Skip all others. + instrument_entries = limits['Instrument'] == instrument + possible_apertures = limits['Aperture'][instrument_entries] # Get a list of all possible readout patterns associated with the aperture possible_readpatts = RAPID_READPATTERNS[instrument] @@ -985,7 +990,7 @@ def run(self): new_entries = monitor_utils.exclude_asic_tuning(new_entries) len_no_asic = len(new_entries) num_asic = len_new_darks - len_no_asic - logging.info(f"\tFiltering out ASIC tuning files. Removed {num_asic} dark files.") + #logging.info(f"\tFiltering out ASIC tuning files. Removed {num_asic} dark files.") logging.info(f'\tAperture: {self.aperture}, Readpattern: {self.readpatt}, new entries: {len(new_entries)}') @@ -1029,27 +1034,22 @@ def run(self): else: bad_size_filenames.append(new_file) if len(temp_filenames) != len(new_filenames): - logging.info('\tSome files returned by MAST have unexpected aperture sizes. These files will be ignored: ') + logging.info('\t\tSome files returned by MAST have unexpected aperture sizes. These files will be ignored: ') for badfile in bad_size_filenames: - logging.info('\t\t{}'.format(badfile)) + logging.info('\t\t\t{}'.format(badfile)) new_filenames = deepcopy(temp_filenames) # Check to see if there are enough new integrations to meet the # monitor's signal-to-noise requirements - logging.info((f'\tFilesystem search for new dark integrations for {self.instrument}, {self.aperture}, ' - f'{self.readpatt} has found {total_integrations} integrations spread across {len(new_filenames)} files.')) + if len(new_filenames) > 0: + logging.info((f'\t\tFilesystem search for new dark integrations for {self.instrument}, {self.aperture}, ' + f'{self.readpatt} has found {total_integrations} integrations spread across {len(new_filenames)} files.')) if total_integrations >= integration_count_threshold: - - - # for testing - logging.info('FULL BATCH STARTING TIMES:') - logging.info(starting_times) - logging.info('ENDING TIMES:') - logging.info(ending_times) - - - + #logging.info('FULL BATCH STARTING TIMES:') + #logging.info(starting_times) + #logging.info('ENDING TIMES:') + #logging.info(ending_times) logging.info(f'\tThis meets the threshold of {integration_count_threshold} integrations.') monitor_run = True @@ -1075,10 +1075,10 @@ def run(self): - #dark_files, not_copied = copy_files(new_file_list, self.data_dir) + dark_files, not_copied = copy_files(new_file_list, self.data_dir) # Fake dark_files and not_copied, for testing - dark_files = new_file_list - not_copied = [] + #dark_files = new_file_list + #not_copied = [] @@ -1086,7 +1086,7 @@ def run(self): logging.info('\tNew_filenames: {}'.format(new_file_list)) logging.info('\tData dir: {}'.format(self.data_dir)) - logging.info('\tCopied to working dir: {}'.format(dark_files)) + logging.info('\tCopied to data dir: {}'.format(dark_files)) logging.info('\tNot copied: {}'.format(not_copied)) @@ -1104,8 +1104,8 @@ def run(self): # Run the dark monitor - #self.process(dark_files) - logging.info('HERE IS WHERE THE MONITOR WOULD RUN ON THE GIVEN BATCH OF FILES. THIS IS TURNED OFF FOR TESTING.') + self.process(dark_files) + #logging.info('HERE IS WHERE THE MONITOR WOULD RUN ON THE GIVEN BATCH OF FILES. THIS IS TURNED OFF FOR TESTING.') @@ -1126,13 +1126,13 @@ def run(self): - #with engine.begin() as connection: - # connection.execute( - # self.query_table.__table__.insert(), new_entry) - #logging.info('\tUpdated the query history table') + with engine.begin() as connection: + connection.execute( + self.query_table.__table__.insert(), new_entry) + logging.info('\tUpdated the query history table') logging.info('NEW ENTRY: ') logging.info(new_entry) - logging.info('TURNED OFF DATABASE UPDATES DURING TESTING') + #logging.info('TURNED OFF DATABASE UPDATES DURING TESTING') @@ -1155,13 +1155,13 @@ def run(self): - #with engine.begin() as connection: - # connection.execute( - # self.query_table.__table__.insert(), new_entry) - #logging.info('\tUpdated the query history table') + with engine.begin() as connection: + connection.execute( + self.query_table.__table__.insert(), new_entry) + logging.info('\tUpdated the query history table') logging.info('NEW ENTRY: ') logging.info(new_entry) - logging.info('TURNED OFF DATABASE UPDATES DURING TESTING') + #logging.info('TURNED OFF DATABASE UPDATES DURING TESTING') @@ -1302,13 +1302,10 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ - print('Splitting into sub-lists. Inputs at the beginning:') - print(files) - print(start_times) - print(end_times) - print(integration_list) - print(threshold) - print('\n') + logging.info('\t\tSplitting into sub-lists. Inputs at the beginning: (file, start time, end time, nints, threshold)') + for f, st, et, inte in zip(files, start_times, end_times, integration_list): + logging.info(f'\t\t {f}, {st}, {et}, {inte}, {threshold}') + logging.info('\n') @@ -1332,25 +1329,16 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # dark current during each "epoch" within a calibration proposal dividers = np.where(delta_t >= DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME[self.instrument])[0] - - print('Initial dividers: ', dividers) - - # Add dividers at the beginning index to make the coding easier dividers = np.insert(dividers, 0, 0) - print('Prepend zero to dividers: ', dividers) - # If there is no divider at the end of the list of files, then add one if dividers[-1] < len(delta_t): dividers = np.insert(dividers, len(dividers), len(delta_t)) - - print('delta_t', delta_t) - print('Final dividers (divide data based on time gaps between files):', dividers) - print('threshold (number of integrations):', threshold) - print('\n') - + logging.info(f'\t\t\tdelta_t between files: {delta_t}') + logging.info(f'\t\t\tFinal dividers (divide data based on time gaps between files): {dividers}') + logging.info('\n') # Loop over epochs. # Within each batch, divide up the exposures into multiple batches if the total @@ -1362,11 +1350,10 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ batch_end_times = end_times[dividers[i]:dividers[i+1]] batch_int_sum = np.sum(batch_ints) - print(f'Loop over time-based batches. Working on batch {i}') - print('batch_ints', batch_ints) - print('batch_files', batch_files) - - + logging.info(f'\t\t\tLoop over time-based batches. Working on batch {i}') + logging.info(f'\t\t\tBatch Files, Batch integrations') + for bi, bf in zip(batch_ints, batch_files): + logging.info(f'\t\t\t{bf}, {bi}') # Calculate how many subgroups to break up the batch into, # based on the threshold, and under the assumption that we @@ -1517,8 +1504,6 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # Calculate the total number of integrations up to each file batch_int_sums = np.array([np.sum(batch_ints[0:jj]) for jj in range(1, len(batch_ints) + 1)]) - print('batch_int_sums: ', batch_int_sums) - base = 0 startidx = 0 endidx = 0 @@ -1532,9 +1517,6 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ endidx = np.where(batch_int_sums >= (base + threshold))[0] - print('startidx: ', startidx) - print('endidx: ', endidx) - # Check if we reach the end of the file list if len(endidx) == 0: endidx = len(batch_int_sums) - 1 @@ -1544,9 +1526,9 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ if endidx == (len(batch_int_sums) - 1): complete = True - print('startidx: ', startidx) - print('endidx: ', endidx) - print('complete: ', complete) + logging.info(f'\t\t\tstartidx: {startidx}') + logging.info(f'\t\t\tendidx: {endidx}') + logging.info(f'\t\t\tcomplete: {complete}') subgroup_ints = batch_ints[startidx: endidx + 1] subgroup_files = batch_files[startidx: endidx + 1] @@ -1555,9 +1537,9 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ subgroup_int_sum = np.sum(subgroup_ints) - print('subgroup_ints: ', subgroup_ints) - print('subgroup_files: ', subgroup_files) - print('subgroup_int_sum: ', subgroup_int_sum) + logging.info(f'\t\t\tsubgroup_ints: {subgroup_ints}') + logging.info(f'\t\t\tsubgroup_files: {subgroup_files}') + logging.info(f'\t\t\tsubgroup_int_sum: {subgroup_int_sum}') #print('batchnum: ', batchnum) #print(batch_ints[startidx: endidx + 1]) @@ -1582,10 +1564,10 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ #integrations and run if so. #print('final subgroup of final epoch. if the epoch is not over, so skipping files') - print('should be final epoch and final subgroup. epoch number: ', i) + logging.info(f'\t\t\tShould be final epoch and final subgroup. epoch number: {i}') if np.sum(subgroup_ints) >= threshold: - print('ADDED - final subgroup of final epoch') + logging.info('\t\t\tADDED - final subgroup of final epoch') self.file_batches.append(subgroup_files) self.start_time_batches.append(subgroup_start_times) self.end_time_batches.append(subgroup_end_times) @@ -1594,12 +1576,11 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # Here the final subgroup does not have enough integrations to reach the threshold # and we're not sure if the epoch is complete, so we skip these files and save them # for a future dark monitor run + logging.info('\t\t\tSkipping final subgroup. Not clear if the epoch is complete') pass else: #if (i < len(dividers) - 1) and (batchnum < (n_subgroups - 1)): - print('Not the final epoch, and not the final subgroup') - print('ADDED') self.file_batches.append(subgroup_files) self.start_time_batches.append(subgroup_start_times) self.end_time_batches.append(subgroup_end_times) @@ -1613,15 +1594,15 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # subgroups, then we quit. break - - - print('Epoch number: ', i) - print('batch_files: ', batch_files) - print('batch_ints: ', batch_ints) - print('self.file_batches: ', self.file_batches) - print('self.integration_batches: ', self.integration_batches) - print('threshold: ', threshold) - print('DONE WITH SUBGROUPS\n\n\n\n') + logging.info(f'\n\t\t\tEpoch number: {i}') + logging.info('\t\t\tBatch File, Bath integration') + for bi, bf in zip(batch_ints, batch_files): + logging.info(f'\t\t\t{bf}, {bi}') + logging.info(f'\n\t\t\tSplit into separate subgroups for processing:') + logging.info('\t\t\tFile batches, integration batches') + for fb, ib in zip(self.file_batches, self.integration_batches): + logging.info(f'\t\t\t{fb}, {ib}') + logging.info(f'\t\t\tDONE WITH SUBGROUPS\n\n\n\n') def stats_by_amp(self, image, amps): diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt b/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt index f82f20810..010831446 100644 --- a/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt +++ b/jwql/instrument_monitors/common_monitors/dark_monitor_file_thresholds.txt @@ -1,624 +1,89 @@ Instrument Aperture Threshold N_skipped_integs -nircam NRCA1_FULL_OSS 10 0 -nircam NRCA2_FULL_OSS 10 0 -nircam NRCA3_FULL_OSS 10 0 -nircam NRCA4_FULL_OSS 10 0 -nircam NRCA5_FULL_OSS 10 0 -nircam NRCB1_FULL_OSS 10 0 -nircam NRCB2_FULL_OSS 10 0 -nircam NRCB3_FULL_OSS 10 0 -nircam NRCB4_FULL_OSS 10 0 -nircam NRCB5_FULL_OSS 10 0 -nircam NRCALL_FULL 10 0 -nircam NRCAS_FULL 10 0 -nircam NRCA1_FULL 2 0 -nircam NRCA2_FULL 2 0 -nircam NRCA3_FULL 2 0 -nircam NRCA4_FULL 2 0 -nircam NRCA5_FULL 2 0 -nircam NRCBS_FULL 2 0 -nircam NRCB1_FULL 2 0 -nircam NRCB2_FULL 2 0 -nircam NRCB3_FULL 2 0 -nircam NRCB4_FULL 2 0 -nircam NRCB5_FULL 2 0 -nircam NRCB1_FULLP 10 0 -nircam NRCB5_FULLP 10 0 -nircam NRCA1_SUB160 30 0 -nircam NRCA2_SUB160 30 0 -nircam NRCA3_SUB160 30 0 -nircam NRCA4_SUB160 30 0 -nircam NRCA5_SUB160 30 0 -nircam NRCB1_SUB160 30 0 -nircam NRCB2_SUB160 30 0 -nircam NRCB3_SUB160 30 0 -nircam NRCB4_SUB160 30 0 -nircam NRCB5_SUB160 30 0 -nircam NRCA1_SUB320 30 0 -nircam NRCA2_SUB320 30 0 -nircam NRCA3_SUB320 30 0 -nircam NRCA4_SUB320 30 0 -nircam NRCA5_SUB320 30 0 -nircam NRCB1_SUB320 30 0 -nircam NRCB2_SUB320 30 0 -nircam NRCB3_SUB320 30 0 -nircam NRCB4_SUB320 30 0 -nircam NRCB5_SUB320 30 0 -nircam NRCA1_SUB640 30 0 -nircam NRCA2_SUB640 30 0 -nircam NRCA3_SUB640 30 0 -nircam NRCA4_SUB640 30 0 -nircam NRCA5_SUB640 30 0 -nircam NRCB1_SUB640 30 0 -nircam NRCB2_SUB640 30 0 -nircam NRCB3_SUB640 30 0 -nircam NRCB4_SUB640 30 0 -nircam NRCB5_SUB640 30 0 -nircam NRCA5_GRISM256_F322W2 30 0 -nircam NRCA5_GRISM128_F322W2 30 0 -nircam NRCA5_GRISM64_F322W2 30 0 -nircam NRCA5_GRISM256_F277W 30 0 -nircam NRCA5_GRISM128_F277W 30 0 -nircam NRCA5_GRISM64_F277W 30 0 -nircam NRCA5_GRISM256_F356W 30 0 -nircam NRCA5_GRISM128_F356W 30 0 -nircam NRCA5_GRISM64_F356W 30 0 -nircam NRCA5_GRISM256_F444W 30 0 -nircam NRCA5_GRISM128_F444W 30 0 -nircam NRCA5_GRISM64_F444W 30 0 -nircam NRCA5_GRISM_F322W2 30 0 -nircam NRCA5_GRISM_F277W 30 0 -nircam NRCA5_GRISM_F356W 30 0 -nircam NRCA5_GRISM_F444W 30 0 -nircam NRCA1_GRISMTS 30 0 -nircam NRCA1_GRISMTS256 30 0 -nircam NRCA1_GRISMTS128 30 0 -nircam NRCA1_GRISMTS64 30 0 -nircam NRCA3_GRISMTS 30 0 -nircam NRCA3_GRISMTS256 30 0 -nircam NRCA3_GRISMTS128 30 0 -nircam NRCA3_GRISMTS64 30 0 -nircam NRCA5_TAGRISMTS32 30 0 -nircam NRCA5_TAGRISMTS32_F405N 30 0 -nircam NRCA5_TAGRISMTS_SCI_F322W2 30 0 -nircam NRCA5_TAGRISMTS_SCI_F444W 30 0 -nircam NRCA3_DHSPIL 30 0 -nircam NRCA3_DHSPIL_SUB96 30 0 -nircam NRCA3_DHSPIL_WEDGES 30 0 -nircam NRCB4_DHSPIL 30 0 -nircam NRCB4_DHSPIL_SUB96 30 0 -nircam NRCB4_DHSPIL_WEDGES 30 0 -nircam NRCA3_FP1 30 0 -nircam NRCA3_FP1_SUB8 30 0 -nircam NRCA3_FP1_SUB64 30 0 -nircam NRCA3_FP2MIMF 30 0 -nircam NRCA1_FP3MIMF 30 0 -nircam NRCA2_FP4MIMF 30 0 -nircam NRCA4_FP5MIMF 30 0 -nircam NRCB4_FP1 30 0 -nircam NRCB4_FP1_SUB8 30 0 -nircam NRCB4_FP1_SUB64 30 0 -nircam NRCB4_FP2MIMF 30 0 -nircam NRCB2_FP3MIMF 30 0 -nircam NRCB1_FP4MIMF 30 0 -nircam NRCB3_FP5MIMF 30 0 -nircam NRCA3_SUB64P 30 0 -nircam NRCA3_SUB160P 30 0 -nircam NRCA3_SUB400P 30 0 -nircam NRCA5_SUB64P 30 0 -nircam NRCA5_SUB160P 30 0 -nircam NRCA5_SUB400P 30 0 -nircam NRCB1_SUB64P 30 0 -nircam NRCB1_SUB160P 30 0 -nircam NRCB1_SUB400P 30 0 -nircam NRCB5_SUB64P 30 0 -nircam NRCB5_SUB160P 30 0 -nircam NRCB5_SUB400P 30 0 -nircam NRCB5_TAPSIMG32 30 0 -nircam NRCA5_GRISMC_WFSS 30 0 -nircam NRCA5_GRISMR_WFSS 30 0 -nircam NRCALL_GRISMC_WFSS 30 0 -nircam NRCALL_GRISMR_WFSS 30 0 -nircam NRCB5_GRISMC_WFSS 30 0 -nircam NRCB5_GRISMR_WFSS 30 0 -nircam NRCA2_MASK210R 30 0 -nircam NRCA5_MASK335R 30 0 -nircam NRCA5_MASK430R 30 0 -nircam NRCA4_MASKSWB 30 0 -nircam NRCA5_MASKLWB 30 0 -nircam NRCA2_TAMASK210R 30 0 -nircam NRCA5_TAMASK335R 30 0 -nircam NRCA5_TAMASK430R 30 0 -nircam NRCA4_TAMASKSWB 30 0 -nircam NRCA5_TAMASKLWB 30 0 -nircam NRCA5_TAMASKLWBL 30 0 -nircam NRCA4_TAMASKSWBS 30 0 -nircam NRCB1_MASK210R 30 0 -nircam NRCB5_MASK335R 30 0 -nircam NRCB5_MASK430R 30 0 -nircam NRCB3_MASKSWB 30 0 -nircam NRCB5_MASKLWB 30 0 -nircam NRCB1_TAMASK210R 30 0 -nircam NRCB5_TAMASK335R 30 0 -nircam NRCB5_TAMASK430R 30 0 -nircam NRCB3_TAMASKSWB 30 0 -nircam NRCB5_TAMASKLWB 30 0 -nircam NRCB5_TAMASKLWBL 30 0 -nircam NRCB3_TAMASKSWBS 30 0 -nircam NRCA2_FSTAMASK210R 30 0 -nircam NRCA4_FSTAMASKSWB 30 0 -nircam NRCA5_FSTAMASKLWB 30 0 -nircam NRCA5_FSTAMASK335R 30 0 -nircam NRCA5_FSTAMASK430R 30 0 -nircam NRCA4_MASKSWB_F182M 30 0 -nircam NRCA4_MASKSWB_F187N 30 0 -nircam NRCA4_MASKSWB_F210M 30 0 -nircam NRCA4_MASKSWB_F212N 30 0 -nircam NRCA4_MASKSWB_F200W 30 0 -nircam NRCA4_MASKSWB_NARROW 30 0 -nircam NRCA5_MASKLWB_F250M 30 0 -nircam NRCA5_MASKLWB_F300M 30 0 -nircam NRCA5_MASKLWB_F277W 30 0 -nircam NRCA5_MASKLWB_F335M 30 0 -nircam NRCA5_MASKLWB_F360M 30 0 -nircam NRCA5_MASKLWB_F356W 30 0 -nircam NRCA5_MASKLWB_F410M 30 0 -nircam NRCA5_MASKLWB_F430M 30 0 -nircam NRCA5_MASKLWB_F460M 30 0 -nircam NRCA5_MASKLWB_F480M 30 0 -nircam NRCA5_MASKLWB_F444W 30 0 -nircam NRCA5_MASKLWB_NARROW 30 0 -nircam NRCA2_FULL_MASK210R 10 0 -nircam NRCA5_FULL_MASK335R 10 0 -nircam NRCA5_FULL_MASK430R 10 0 -nircam NRCA4_FULL_MASKSWB 10 0 -nircam NRCA4_FULL_MASKSWB_F182M 10 0 -nircam NRCA4_FULL_MASKSWB_F187N 10 0 -nircam NRCA4_FULL_MASKSWB_F210M 10 0 -nircam NRCA4_FULL_MASKSWB_F212N 10 0 -nircam NRCA4_FULL_MASKSWB_F200W 10 0 -nircam NRCA5_FULL_MASKLWB 10 0 -nircam NRCA5_FULL_MASKLWB_F250M 10 0 -nircam NRCA5_FULL_MASKLWB_F300M 10 0 -nircam NRCA5_FULL_MASKLWB_F277W 10 0 -nircam NRCA5_FULL_MASKLWB_F335M 10 0 -nircam NRCA5_FULL_MASKLWB_F360M 10 0 -nircam NRCA5_FULL_MASKLWB_F356W 10 0 -nircam NRCA5_FULL_MASKLWB_F410M 10 0 -nircam NRCA5_FULL_MASKLWB_F430M 10 0 -nircam NRCA5_FULL_MASKLWB_F460M 10 0 -nircam NRCA5_FULL_MASKLWB_F480M 10 0 -nircam NRCA5_FULL_MASKLWB_F444W 10 0 -nircam NRCA2_FULL_WEDGE_RND 10 0 -nircam NRCA4_FULL_WEDGE_BAR 10 0 -nircam NRCA5_FULL_WEDGE_RND 10 0 -nircam NRCA5_FULL_WEDGE_BAR 10 0 -nircam NRCA2_FULL_TAMASK210R 10 0 -nircam NRCA5_FULL_TAMASK335R 10 0 -nircam NRCA5_FULL_TAMASK430R 10 0 -nircam NRCA4_FULL_TAMASKSWB 10 0 -nircam NRCA5_FULL_TAMASKLWB 10 0 -nircam NRCA5_FULL_TAMASKLWBL 10 0 -nircam NRCA4_FULL_TAMASKSWBS 10 0 -nircam NRCA2_FULL_FSTAMASK210R 10 0 -nircam NRCA4_FULL_FSTAMASKSWB 10 0 -nircam NRCA5_FULL_FSTAMASKLWB 10 0 -nircam NRCA5_FULL_FSTAMASK335R 10 0 -nircam NRCA5_FULL_FSTAMASK430R 10 0 -niriss NIS_CEN_OSS 10 0 -niriss NIS_CEN 10 0 -niriss NIS_AMI1 30 0 -niriss NIS_AMI2 30 0 -niriss NIS_AMI3 30 0 -niriss NIS_AMI4 30 0 -niriss NIS_AMITA 30 0 -niriss NIS_SOSSTA 30 0 -niriss NIS_WFSS_OFFSET 30 0 -niriss NIS_WFSS64 30 0 -niriss NIS_WFSS64R 30 0 -niriss NIS_WFSS64R3 30 0 -niriss NIS_WFSS64C 30 0 -niriss NIS_WFSS64C3 30 0 -niriss NIS_WFSS128 30 0 -niriss NIS_WFSS128R 30 0 -niriss NIS_WFSS128R3 30 0 -niriss NIS_WFSS128C 30 0 -niriss NIS_WFSS128C3 30 0 -niriss NIS_SUB64 30 0 -niriss NIS_SUB128 30 0 -niriss NIS_SUB256 30 0 -niriss NIS_SUBAMPCAL 30 0 -niriss NIS_SUBSTRIP96 30 0 -niriss NIS_SUBSTRIP256 30 0 -niriss NIS_FP1MIMF 30 0 -niriss NIS_FP2MIMF 30 0 -niriss NIS_FP3MIMF 30 0 -niriss NIS_FP4MIMF 30 0 -niriss NIS_FP5MIMF 30 0 -niriss NIS_AMIFULL 10 0 -niriss NIS_SOSSFULL 10 0 -niriss NIS_WFSS 10 0 -miri MIRIM_FULL_OSS 1 1 +nircam NRCA1_FULL 1 0 +nircam NRCA2_FULL 1 0 +nircam NRCA3_FULL 1 0 +nircam NRCA4_FULL 1 0 +nircam NRCA5_FULL 1 0 +nircam NRCB1_FULL 1 0 +nircam NRCB2_FULL 1 0 +nircam NRCB3_FULL 1 0 +nircam NRCB4_FULL 1 0 +nircam NRCB5_FULL 1 0 +nircam NRCA1_SUB160 4 0 +nircam NRCA2_SUB160 4 0 +nircam NRCA3_SUB160 4 0 +nircam NRCA4_SUB160 4 0 +nircam NRCA5_SUB160 4 0 +nircam NRCB1_SUB160 4 0 +nircam NRCB2_SUB160 4 0 +nircam NRCB3_SUB160 4 0 +nircam NRCB4_SUB160 4 0 +nircam NRCB5_SUB160 4 0 +nircam NRCA1_SUB320 4 0 +nircam NRCA2_SUB320 4 0 +nircam NRCA3_SUB320 4 0 +nircam NRCA4_SUB320 4 0 +nircam NRCA5_SUB320 4 0 +nircam NRCB1_SUB320 4 0 +nircam NRCB2_SUB320 4 0 +nircam NRCB3_SUB320 4 0 +nircam NRCB4_SUB320 4 0 +nircam NRCB5_SUB320 4 0 +nircam NRCA1_SUB640 4 0 +nircam NRCA2_SUB640 4 0 +nircam NRCA3_SUB640 4 0 +nircam NRCA4_SUB640 4 0 +nircam NRCA5_SUB640 4 0 +nircam NRCB1_SUB640 4 0 +nircam NRCB2_SUB640 4 0 +nircam NRCB3_SUB640 4 0 +nircam NRCB4_SUB640 4 0 +nircam NRCB5_SUB640 4 0 +niriss NIS_CEN 1 0 +niriss NIS_AMI1 1 0 +niriss NIS_AMI2 1 0 +niriss NIS_AMI3 1 0 +niriss NIS_AMI4 1 0 +niriss NIS_SUB64 1 0 +niriss NIS_SUB128 1 0 +niriss NIS_SUB256 1 0 miri MIRIM_FULL 1 1 -miri MIRIM_ILLUM 30 0 -miri MIRIM_BRIGHTSKY 30 0 -miri MIRIM_SUB256 30 0 -miri MIRIM_SUB128 30 0 -miri MIRIM_SUB64 30 0 -miri MIRIM_SLITLESSPRISM 30 0 -miri MIRIM_SLITLESSUPPER 30 0 -miri MIRIM_SLITLESSLOWER 30 0 -miri MIRIM_MASK1065 30 0 -miri MIRIM_MASK1140 30 0 -miri MIRIM_MASK1550 30 0 -miri MIRIM_MASKLYOT 30 0 -miri MIRIM_TAMRS 30 0 -miri MIRIM_TALRS 30 0 -miri MIRIM_TABLOCK 30 0 -miri MIRIM_TALYOT_UL 30 0 -miri MIRIM_TALYOT_UR 30 0 -miri MIRIM_TALYOT_LL 30 0 -miri MIRIM_TALYOT_LR 30 0 -miri MIRIM_TALYOT_CUL 30 0 -miri MIRIM_TALYOT_CUR 30 0 -miri MIRIM_TALYOT_CLL 30 0 -miri MIRIM_TALYOT_CLR 30 0 -miri MIRIM_TA1550_UL 30 0 -miri MIRIM_TA1550_UR 30 0 -miri MIRIM_TA1550_LL 30 0 -miri MIRIM_TA1550_LR 30 0 -miri MIRIM_TA1550_CUL 30 0 -miri MIRIM_TA1550_CUR 30 0 -miri MIRIM_TA1550_CLL 30 0 -miri MIRIM_TA1550_CLR 30 0 -miri MIRIM_TA1140_UL 30 0 -miri MIRIM_TA1140_UR 30 0 -miri MIRIM_TA1140_LL 30 0 -miri MIRIM_TA1140_LR 30 0 -miri MIRIM_TA1140_CUL 30 0 -miri MIRIM_TA1140_CUR 30 0 -miri MIRIM_TA1140_CLL 30 0 -miri MIRIM_TA1140_CLR 30 0 -miri MIRIM_TA1065_UL 30 0 -miri MIRIM_TA1065_UR 30 0 -miri MIRIM_TA1065_LL 30 0 -miri MIRIM_TA1065_LR 30 0 -miri MIRIM_TA1065_CUL 30 0 -miri MIRIM_TA1065_CUR 30 0 -miri MIRIM_TA1065_CLL 30 0 -miri MIRIM_TA1065_CLR 30 0 -miri MIRIM_TAFULL 10 0 -miri MIRIM_TAILLUM 30 0 -miri MIRIM_TABRIGHTSKY 30 0 -miri MIRIM_TASUB256 30 0 -miri MIRIM_TASUB128 30 0 -miri MIRIM_TASUB64 30 0 -miri MIRIM_TASLITLESSPRISM 30 0 -miri MIRIM_CORON1065 30 0 -miri MIRIM_CORON1140 30 0 -miri MIRIM_CORON1550 30 0 -miri MIRIM_CORONLYOT 30 0 -miri MIRIM_KNIFE 30 0 -miri MIRIM_FP1MIMF 30 0 -miri MIRIM_FP2MIMF 30 0 -miri MIRIM_FP3MIMF 30 0 -miri MIRIM_FP4MIMF 30 0 -miri MIRIM_FP5MIMF 30 0 -miri MIRIM_SLIT 30 0 -miri MIRIFU_CHANNEL1A 30 0 -miri MIRIFU_1ASLICE01 30 0 -miri MIRIFU_1ASLICE02 30 0 -miri MIRIFU_1ASLICE03 30 0 -miri MIRIFU_1ASLICE04 30 0 -miri MIRIFU_1ASLICE05 30 0 -miri MIRIFU_1ASLICE06 30 0 -miri MIRIFU_1ASLICE07 30 0 -miri MIRIFU_1ASLICE08 30 0 -miri MIRIFU_1ASLICE09 30 0 -miri MIRIFU_1ASLICE10 30 0 -miri MIRIFU_1ASLICE11 30 0 -miri MIRIFU_1ASLICE12 30 0 -miri MIRIFU_1ASLICE13 30 0 -miri MIRIFU_1ASLICE14 30 0 -miri MIRIFU_1ASLICE15 30 0 -miri MIRIFU_1ASLICE16 30 0 -miri MIRIFU_1ASLICE17 30 0 -miri MIRIFU_1ASLICE18 30 0 -miri MIRIFU_1ASLICE19 30 0 -miri MIRIFU_1ASLICE20 30 0 -miri MIRIFU_1ASLICE21 30 0 -miri MIRIFU_CHANNEL1B 30 0 -miri MIRIFU_1BSLICE01 30 0 -miri MIRIFU_1BSLICE02 30 0 -miri MIRIFU_1BSLICE03 30 0 -miri MIRIFU_1BSLICE04 30 0 -miri MIRIFU_1BSLICE05 30 0 -miri MIRIFU_1BSLICE06 30 0 -miri MIRIFU_1BSLICE07 30 0 -miri MIRIFU_1BSLICE08 30 0 -miri MIRIFU_1BSLICE09 30 0 -miri MIRIFU_1BSLICE10 30 0 -miri MIRIFU_1BSLICE11 30 0 -miri MIRIFU_1BSLICE12 30 0 -miri MIRIFU_1BSLICE13 30 0 -miri MIRIFU_1BSLICE14 30 0 -miri MIRIFU_1BSLICE15 30 0 -miri MIRIFU_1BSLICE16 30 0 -miri MIRIFU_1BSLICE17 30 0 -miri MIRIFU_1BSLICE18 30 0 -miri MIRIFU_1BSLICE19 30 0 -miri MIRIFU_1BSLICE20 30 0 -miri MIRIFU_1BSLICE21 30 0 -miri MIRIFU_CHANNEL1C 30 0 -miri MIRIFU_1CSLICE01 30 0 -miri MIRIFU_1CSLICE02 30 0 -miri MIRIFU_1CSLICE03 30 0 -miri MIRIFU_1CSLICE04 30 0 -miri MIRIFU_1CSLICE05 30 0 -miri MIRIFU_1CSLICE06 30 0 -miri MIRIFU_1CSLICE07 30 0 -miri MIRIFU_1CSLICE08 30 0 -miri MIRIFU_1CSLICE09 30 0 -miri MIRIFU_1CSLICE10 30 0 -miri MIRIFU_1CSLICE11 30 0 -miri MIRIFU_1CSLICE12 30 0 -miri MIRIFU_1CSLICE13 30 0 -miri MIRIFU_1CSLICE14 30 0 -miri MIRIFU_1CSLICE15 30 0 -miri MIRIFU_1CSLICE16 30 0 -miri MIRIFU_1CSLICE17 30 0 -miri MIRIFU_1CSLICE18 30 0 -miri MIRIFU_1CSLICE19 30 0 -miri MIRIFU_1CSLICE20 30 0 -miri MIRIFU_1CSLICE21 30 0 -miri MIRIFU_CHANNEL2A 30 0 -miri MIRIFU_2ASLICE01 30 0 -miri MIRIFU_2ASLICE02 30 0 -miri MIRIFU_2ASLICE03 30 0 -miri MIRIFU_2ASLICE04 30 0 -miri MIRIFU_2ASLICE05 30 0 -miri MIRIFU_2ASLICE06 30 0 -miri MIRIFU_2ASLICE07 30 0 -miri MIRIFU_2ASLICE08 30 0 -miri MIRIFU_2ASLICE09 30 0 -miri MIRIFU_2ASLICE10 30 0 -miri MIRIFU_2ASLICE11 30 0 -miri MIRIFU_2ASLICE12 30 0 -miri MIRIFU_2ASLICE13 30 0 -miri MIRIFU_2ASLICE14 30 0 -miri MIRIFU_2ASLICE15 30 0 -miri MIRIFU_2ASLICE16 30 0 -miri MIRIFU_2ASLICE17 30 0 -miri MIRIFU_CHANNEL2B 30 0 -miri MIRIFU_2BSLICE01 30 0 -miri MIRIFU_2BSLICE02 30 0 -miri MIRIFU_2BSLICE03 30 0 -miri MIRIFU_2BSLICE04 30 0 -miri MIRIFU_2BSLICE05 30 0 -miri MIRIFU_2BSLICE06 30 0 -miri MIRIFU_2BSLICE07 30 0 -miri MIRIFU_2BSLICE08 30 0 -miri MIRIFU_2BSLICE09 30 0 -miri MIRIFU_2BSLICE10 30 0 -miri MIRIFU_2BSLICE11 30 0 -miri MIRIFU_2BSLICE12 30 0 -miri MIRIFU_2BSLICE13 30 0 -miri MIRIFU_2BSLICE14 30 0 -miri MIRIFU_2BSLICE15 30 0 -miri MIRIFU_2BSLICE16 30 0 -miri MIRIFU_2BSLICE17 30 0 -miri MIRIFU_CHANNEL2C 30 0 -miri MIRIFU_2CSLICE01 30 0 -miri MIRIFU_2CSLICE02 30 0 -miri MIRIFU_2CSLICE03 30 0 -miri MIRIFU_2CSLICE04 30 0 -miri MIRIFU_2CSLICE05 30 0 -miri MIRIFU_2CSLICE06 30 0 -miri MIRIFU_2CSLICE07 30 0 -miri MIRIFU_2CSLICE08 30 0 -miri MIRIFU_2CSLICE09 30 0 -miri MIRIFU_2CSLICE10 30 0 -miri MIRIFU_2CSLICE11 30 0 -miri MIRIFU_2CSLICE12 30 0 -miri MIRIFU_2CSLICE13 30 0 -miri MIRIFU_2CSLICE14 30 0 -miri MIRIFU_2CSLICE15 30 0 -miri MIRIFU_2CSLICE16 30 0 -miri MIRIFU_2CSLICE17 30 0 -miri MIRIFU_CHANNEL3A 30 0 -miri MIRIFU_3ASLICE01 30 0 -miri MIRIFU_3ASLICE02 30 0 -miri MIRIFU_3ASLICE03 30 0 -miri MIRIFU_3ASLICE04 30 0 -miri MIRIFU_3ASLICE05 30 0 -miri MIRIFU_3ASLICE06 30 0 -miri MIRIFU_3ASLICE07 30 0 -miri MIRIFU_3ASLICE08 30 0 -miri MIRIFU_3ASLICE09 30 0 -miri MIRIFU_3ASLICE10 30 0 -miri MIRIFU_3ASLICE11 30 0 -miri MIRIFU_3ASLICE12 30 0 -miri MIRIFU_3ASLICE13 30 0 -miri MIRIFU_3ASLICE14 30 0 -miri MIRIFU_3ASLICE15 30 0 -miri MIRIFU_3ASLICE16 30 0 -miri MIRIFU_CHANNEL3B 30 0 -miri MIRIFU_3BSLICE01 30 0 -miri MIRIFU_3BSLICE02 30 0 -miri MIRIFU_3BSLICE03 30 0 -miri MIRIFU_3BSLICE04 30 0 -miri MIRIFU_3BSLICE05 30 0 -miri MIRIFU_3BSLICE06 30 0 -miri MIRIFU_3BSLICE07 30 0 -miri MIRIFU_3BSLICE08 30 0 -miri MIRIFU_3BSLICE09 30 0 -miri MIRIFU_3BSLICE10 30 0 -miri MIRIFU_3BSLICE11 30 0 -miri MIRIFU_3BSLICE12 30 0 -miri MIRIFU_3BSLICE13 30 0 -miri MIRIFU_3BSLICE14 30 0 -miri MIRIFU_3BSLICE15 30 0 -miri MIRIFU_3BSLICE16 30 0 -miri MIRIFU_CHANNEL3C 30 0 -miri MIRIFU_3CSLICE01 30 0 -miri MIRIFU_3CSLICE02 30 0 -miri MIRIFU_3CSLICE03 30 0 -miri MIRIFU_3CSLICE04 30 0 -miri MIRIFU_3CSLICE05 30 0 -miri MIRIFU_3CSLICE06 30 0 -miri MIRIFU_3CSLICE07 30 0 -miri MIRIFU_3CSLICE08 30 0 -miri MIRIFU_3CSLICE09 30 0 -miri MIRIFU_3CSLICE10 30 0 -miri MIRIFU_3CSLICE11 30 0 -miri MIRIFU_3CSLICE12 30 0 -miri MIRIFU_3CSLICE13 30 0 -miri MIRIFU_3CSLICE14 30 0 -miri MIRIFU_3CSLICE15 30 0 -miri MIRIFU_3CSLICE16 30 0 -miri MIRIFU_CHANNEL4A 30 0 -miri MIRIFU_4ASLICE01 30 0 -miri MIRIFU_4ASLICE02 30 0 -miri MIRIFU_4ASLICE03 30 0 -miri MIRIFU_4ASLICE04 30 0 -miri MIRIFU_4ASLICE05 30 0 -miri MIRIFU_4ASLICE06 30 0 -miri MIRIFU_4ASLICE07 30 0 -miri MIRIFU_4ASLICE08 30 0 -miri MIRIFU_4ASLICE09 30 0 -miri MIRIFU_4ASLICE10 30 0 -miri MIRIFU_4ASLICE11 30 0 -miri MIRIFU_4ASLICE12 30 0 -miri MIRIFU_CHANNEL4B 30 0 -miri MIRIFU_4BSLICE01 30 0 -miri MIRIFU_4BSLICE02 30 0 -miri MIRIFU_4BSLICE03 30 0 -miri MIRIFU_4BSLICE04 30 0 -miri MIRIFU_4BSLICE05 30 0 -miri MIRIFU_4BSLICE06 30 0 -miri MIRIFU_4BSLICE07 30 0 -miri MIRIFU_4BSLICE08 30 0 -miri MIRIFU_4BSLICE09 30 0 -miri MIRIFU_4BSLICE10 30 0 -miri MIRIFU_4BSLICE11 30 0 -miri MIRIFU_4BSLICE12 30 0 -miri MIRIFU_CHANNEL4C 30 0 -miri MIRIFU_4CSLICE01 30 0 -miri MIRIFU_4CSLICE02 30 0 -miri MIRIFU_4CSLICE03 30 0 -miri MIRIFU_4CSLICE04 30 0 -miri MIRIFU_4CSLICE05 30 0 -miri MIRIFU_4CSLICE06 30 0 -miri MIRIFU_4CSLICE07 30 0 -miri MIRIFU_4CSLICE08 30 0 -miri MIRIFU_4CSLICE09 30 0 -miri MIRIFU_4CSLICE10 30 0 -miri MIRIFU_4CSLICE11 30 0 -miri MIRIFU_4CSLICE12 30 0 -nirspec NRS1_FULL_OSS 10 0 -nirspec NRS1_FULL 10 0 -nirspec NRS2_FULL_OSS 10 0 -nirspec NRS2_FULL 10 0 -nirspec NRS_S200A1_SLIT 30 0 -nirspec NRS_S200A2_SLIT 30 0 -nirspec NRS_S400A1_SLIT 30 0 -nirspec NRS_S1600A1_SLIT 30 0 -nirspec NRS_S200B1_SLIT 30 0 -nirspec NRS_FULL_IFU 10 0 -nirspec NRS_IFU_SLICE00 30 0 -nirspec NRS_IFU_SLICE01 30 0 -nirspec NRS_IFU_SLICE02 30 0 -nirspec NRS_IFU_SLICE03 30 0 -nirspec NRS_IFU_SLICE04 30 0 -nirspec NRS_IFU_SLICE05 30 0 -nirspec NRS_IFU_SLICE06 30 0 -nirspec NRS_IFU_SLICE07 30 0 -nirspec NRS_IFU_SLICE08 30 0 -nirspec NRS_IFU_SLICE09 30 0 -nirspec NRS_IFU_SLICE10 30 0 -nirspec NRS_IFU_SLICE11 30 0 -nirspec NRS_IFU_SLICE12 30 0 -nirspec NRS_IFU_SLICE13 30 0 -nirspec NRS_IFU_SLICE14 30 0 -nirspec NRS_IFU_SLICE15 30 0 -nirspec NRS_IFU_SLICE16 30 0 -nirspec NRS_IFU_SLICE17 30 0 -nirspec NRS_IFU_SLICE18 30 0 -nirspec NRS_IFU_SLICE19 30 0 -nirspec NRS_IFU_SLICE20 30 0 -nirspec NRS_IFU_SLICE21 30 0 -nirspec NRS_IFU_SLICE22 30 0 -nirspec NRS_IFU_SLICE23 30 0 -nirspec NRS_IFU_SLICE24 30 0 -nirspec NRS_IFU_SLICE25 30 0 -nirspec NRS_IFU_SLICE26 30 0 -nirspec NRS_IFU_SLICE27 30 0 -nirspec NRS_IFU_SLICE28 30 0 -nirspec NRS_IFU_SLICE29 30 0 -nirspec NRS_FULL_MSA 10 0 -nirspec NRS_FULL_MSA1 10 0 -nirspec NRS_FULL_MSA2 10 0 -nirspec NRS_FULL_MSA3 10 0 -nirspec NRS_FULL_MSA4 10 0 -nirspec NRS_VIGNETTED_MSA 30 0 -nirspec NRS_VIGNETTED_MSA1 30 0 -nirspec NRS_VIGNETTED_MSA2 30 0 -nirspec NRS_VIGNETTED_MSA3 30 0 -nirspec NRS_VIGNETTED_MSA4 30 0 -nirspec NRS_FIELD1_MSA4 30 0 -nirspec NRS_FIELD2_MSA4 30 0 -nirspec NRS1_FP1MIMF 30 0 -nirspec NRS1_FP2MIMF 30 0 -nirspec NRS1_FP3MIMF 30 0 -nirspec NRS2_FP4MIMF 30 0 -nirspec NRS2_FP5MIMF 30 0 -nirspec CLEAR_GWA_OTE 30 0 -nirspec F110W_GWA_OTE 30 0 -nirspec F140X_GWA_OTE 30 0 -nirspec NRS_SKY_OTEIP 30 0 -nirspec NRS_CLEAR_OTEIP_MSA_L0 30 0 -nirspec NRS_CLEAR_OTEIP_MSA_L1 30 0 -nirspec NRS_F070LP_OTEIP_MSA_L0 30 0 -nirspec NRS_F070LP_OTEIP_MSA_L1 30 0 -nirspec NRS_F100LP_OTEIP_MSA_L0 30 0 -nirspec NRS_F100LP_OTEIP_MSA_L1 30 0 -nirspec NRS_F170LP_OTEIP_MSA_L0 30 0 -nirspec NRS_F170LP_OTEIP_MSA_L1 30 0 -nirspec NRS_F290LP_OTEIP_MSA_L0 30 0 -nirspec NRS_F290LP_OTEIP_MSA_L1 30 0 -nirspec NRS_F110W_OTEIP_MSA_L0 30 0 -nirspec NRS_F110W_OTEIP_MSA_L1 30 0 -nirspec NRS_F140X_OTEIP_MSA_L0 30 0 -nirspec NRS_F140X_OTEIP_MSA_L1 30 0 -fgs FGS1_FULL_OSS 10 0 -fgs FGS1_FULL 10 0 -fgs FGS2_FULL_OSS 10 0 -fgs FGS2_FULL 10 0 -fgs FGS1_SUB128LL 30 0 -fgs FGS1_SUB128DIAG 30 0 -fgs FGS1_SUB128CNTR 30 0 -fgs FGS1_SUB32LL 30 0 -fgs FGS1_SUB32DIAG 30 0 -fgs FGS1_SUB32CNTR 30 0 -fgs FGS1_SUB8LL 30 0 -fgs FGS1_SUB8DIAG 30 0 -fgs FGS1_SUB8CNTR 30 0 -fgs FGS2_SUB128LL 30 0 -fgs FGS2_SUB128DIAG 30 0 -fgs FGS2_SUB128CNTR 30 0 -fgs FGS2_SUB32LL 30 0 -fgs FGS2_SUB32DIAG 30 0 -fgs FGS2_SUB32CNTR 30 0 -fgs FGS2_SUB8LL 30 0 -fgs FGS2_SUB8DIAG 30 0 -fgs FGS2_SUB8CNTR 30 0 -fgs FGS1_FP1MIMF 30 0 -fgs FGS1_FP2MIMF 30 0 -fgs FGS1_FP3MIMF 30 0 -fgs FGS1_FP4MIMF 30 0 -fgs FGS1_FP5MIMF 30 0 -fgs FGS2_FP1MIMF 30 0 -fgs FGS2_FP2MIMF 30 0 -fgs FGS2_FP3MIMF 30 0 -fgs FGS2_FP4MIMF 30 0 -fgs FGS2_FP5MIMF 30 0 +miri MIRIM_BRIGHTSKY 1 0 +miri MIRIM_SUB256 1 0 +miri MIRIM_SUB128 1 0 +miri MIRIM_SUB64 1 0 +miri MIRIM_SLITLESSPRISM 1 0 +miri MIRIM_MASK1065 1 0 +miri MIRIM_MASK1140 1 0 +miri MIRIM_MASK1550 1 0 +miri MIRIM_MASKLYOT 1 0 +miri MIRIM_CORON1065 1 0 +miri MIRIM_CORON1140 1 0 +miri MIRIM_CORON1550 1 0 +miri MIRIM_CORONLYOT 1 0 +miri MIRIM_SLIT 1 0 +miri MIRIFU_CHANNEL1A 1 0 +miri MIRIFU_CHANNEL1B 1 0 +miri MIRIFU_CHANNEL1C 1 0 +miri MIRIFU_CHANNEL2A 1 0 +miri MIRIFU_CHANNEL2B 1 0 +miri MIRIFU_CHANNEL2C 1 0 +miri MIRIFU_CHANNEL3A 1 0 +miri MIRIFU_CHANNEL3B 1 0 +miri MIRIFU_CHANNEL3C 1 0 +miri MIRIFU_CHANNEL4A 1 0 +miri MIRIFU_CHANNEL4B 1 0 +miri MIRIFU_CHANNEL4C 1 0 +nirspec NRS1_FULL 1 0 +nirspec NRS2_FULL 1 0 +nirspec NRS_S200A1_SLIT 1 0 +nirspec NRS_S200A2_SLIT 1 0 +nirspec NRS_S400A1_SLIT 1 0 +nirspec NRS_S1600A1_SLIT 1 0 +nirspec NRS_S200B1_SLIT 1 0 +nirspec NRS_FULL_IFU 1 0 +nirspec NRS_FULL_MSA 1 0 +fgs FGS1_FULL 1 0 +fgs FGS2_FULL 1 0 +fgs FGS1_SUB128CNTR 1 0 +fgs FGS2_SUB128CNTR 1 0 diff --git a/jwql/shared_tasks/shared_tasks.py b/jwql/shared_tasks/shared_tasks.py index 516738ed0..070c0483d 100644 --- a/jwql/shared_tasks/shared_tasks.py +++ b/jwql/shared_tasks/shared_tasks.py @@ -231,7 +231,7 @@ def run_subprocess(name, cmd, outputs, cal_dir, ins, in_file, short_name, res_fi process = Popen(command, shell=True, executable="/bin/bash", stderr=PIPE) with process.stderr: log_subprocess_output(process.stderr) - result = process.wait() + result = process.wait() logging.info("Subprocess result was {}".format(result)) if not os.path.isfile(res_file): @@ -241,7 +241,7 @@ def run_subprocess(name, cmd, outputs, cal_dir, ins, in_file, short_name, res_fi for line in status: logging.error(line.strip()) return status - + with open(res_file, 'r') as inf: status = inf.readlines() return status @@ -251,17 +251,17 @@ def run_subprocess(name, cmd, outputs, cal_dir, ins, in_file, short_name, res_fi def run_calwebb_detector1(input_file_name, short_name, ext_or_exts, instrument, step_args={}): """Run the steps of ``calwebb_detector1`` on the input file, saving the result of each step as a separate output file, then return the name-and-path of the file as reduced - in the reduction directory. Once all requested extensions have been produced, the + in the reduction directory. Once all requested extensions have been produced, the pipeline will return. Parameters ---------- input_file_name : str File on which to run the pipeline steps - + short_name : str Name of the file to be calibrated after any extensions have been stripped off. - + ext_or_exts : list List of extensions to be retrieved. @@ -291,7 +291,7 @@ def run_calwebb_detector1(input_file_name, short_name, ext_or_exts, instrument, output_dir = os.path.join(config['transfer_dir'], "outgoing") msg = "Input from {}, calibrate in {}, output to {}" logging.info(msg.format(input_dir, cal_dir, output_dir)) - + input_file = os.path.join(input_dir, input_file_name) current_dir = os.path.dirname(__file__) cmd_name = os.path.join(current_dir, "run_pipeline.py") @@ -308,9 +308,9 @@ def run_calwebb_detector1(input_file_name, short_name, ext_or_exts, instrument, logging.info("Requesting {}".format(calibrated_files)) cores = 'all' - status = run_subprocess(cmd_name, "cal", outputs, cal_dir, instrument, input_file, + status = run_subprocess(cmd_name, "cal", outputs, cal_dir, instrument, input_file, short_name, result_file, cores) - + if status[-1].strip() == "SUCCEEDED": logging.info("Subprocess reports successful finish.") else: @@ -323,7 +323,7 @@ def run_calwebb_detector1(input_file_name, short_name, ext_or_exts, instrument, logging.error("\t{}".format(line.strip())) if core_fail: cores = "half" - status = run_subprocess(cmd_name, "cal", outputs, cal_dir, instrument, + status = run_subprocess(cmd_name, "cal", outputs, cal_dir, instrument, input_file, short_name, result_file, cores) if status[-1].strip() == "SUCCEEDED": logging.info("Subprocess reports successful finish.") @@ -337,7 +337,7 @@ def run_calwebb_detector1(input_file_name, short_name, ext_or_exts, instrument, logging.error("\t{}".format(line.strip())) if core_fail: cores = "none" - status = run_subprocess(cmd_name, "cal", outputs, cal_dir, instrument, + status = run_subprocess(cmd_name, "cal", outputs, cal_dir, instrument, input_file, short_name, result_file, cores) if status[-1].strip() == "SUCCEEDED": logging.info("Subprocess reports successful finish.") @@ -346,7 +346,7 @@ def run_calwebb_detector1(input_file_name, short_name, ext_or_exts, instrument, logging.error("Pipeline subprocess failed.") if not managed: raise ValueError("Pipeline Failed") - + for file in calibrated_files: logging.info("Checking for output {}".format(file)) if not os.path.isfile(os.path.join(cal_dir, file)): @@ -420,14 +420,14 @@ def calwebb_detector1_save_jump(input_file_name, instrument, ramp_fit=True, save short_name = input_file_name.replace("_uncal", "").replace("_0thgroup", "").replace(".fits", "") ensure_dir_exists(cal_dir) output_dir = os.path.join(config["transfer_dir"], "outgoing") - + cmd_name = os.path.join(os.path.dirname(__file__), "run_pipeline.py") result_file = os.path.join(cal_dir, short_name+"_status.txt") cores = 'all' - status = run_subprocess(cmd_name, "jump", "all", cal_dir, instrument, input_file, + status = run_subprocess(cmd_name, "jump", "all", cal_dir, instrument, input_file, short_name, result_file, cores) - + if status[-1].strip() == "SUCCEEDED": logging.info("Subprocess reports successful finish.") else: @@ -440,7 +440,7 @@ def calwebb_detector1_save_jump(input_file_name, instrument, ramp_fit=True, save logging.error("\t{}".format(line.strip())) if core_fail: cores = "half" - status = run_subprocess(cmd_name, "jump", "all", cal_dir, instrument, + status = run_subprocess(cmd_name, "jump", "all", cal_dir, instrument, input_file, short_name, result_file, cores) if status[-1].strip() == "SUCCEEDED": logging.info("Subprocess reports successful finish.") @@ -454,7 +454,7 @@ def calwebb_detector1_save_jump(input_file_name, instrument, ramp_fit=True, save logging.error("\t{}".format(line.strip())) if core_fail: cores = "none" - status = run_subprocess(cmd_name, "jump", "all", cal_dir, instrument, + status = run_subprocess(cmd_name, "jump", "all", cal_dir, instrument, input_file, short_name, result_file, cores) if status[-1].strip() == "SUCCEEDED": logging.info("Subprocess reports successful finish.") @@ -762,7 +762,7 @@ def run_parallel_pipeline(input_files, in_ext, ext_or_exts, instrument, jump_pip file_or_files : str or list-of-str Name (or names) of the result file(s), including path(s) """ - logging.info("Pipeline call requestion calibrated extensions {}".format(ext_or_exts)) + logging.info("Pipeline call requesting calibrated extensions {}".format(ext_or_exts)) for input_file in input_files: logging.info("\tCalibrating {}".format(input_file)) diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py index e601127fd..9b7c44bc5 100755 --- a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py @@ -60,7 +60,7 @@ class DarkHistPlot(): plot : bokeh.figure Figure containing the histogram plot """ - def __init__(self, aperture, data): + def __init__(self, aperture, data, obsdate): """Create the plot Parameters @@ -74,6 +74,7 @@ def __init__(self, aperture, data): """ self.data = data self.aperture = aperture + self.obsdate = obsdate self.create_plot() def calc_bin_edges(self, centers): @@ -109,7 +110,7 @@ def create_plot(self): else: use_amp = '1' - title_str = f'{self.aperture}: Dark Rate Histogram' + title_str = f'{self.aperture}: Dark Rate Histogram. {self.obsdate.strftime("%d %b %Y")}' x_label = 'Dark Rate (DN/sec)' y_label = 'Number of Pixels' @@ -462,7 +463,7 @@ def __init__(self, instrument): # Retrieve data from database. Since the mean dark image plots are # produced by the dark monitor itself, all we need for that is the - # name of the file. then we need the histogram and trending data. All + # name of the file. Then we need the histogram and trending data. All # of this is in the dark monitor stats table. No need to query the # dark monitor pixel table. self.db.retrieve_data(self.aperture, get_pixtable_for_detector=False) @@ -479,7 +480,7 @@ def __init__(self, instrument): self.get_trending_data() # Now that we have all the data, create the acutal plots - self.hist_plots[aperture] = DarkHistPlot(self.aperture, self.hist_data).plot + self.hist_plots[aperture] = DarkHistPlot(self.aperture, self.hist_data, self.hist_date).plot self.trending_plots[aperture] = DarkTrendPlot(self.aperture, self.mean_dark, self.stdev_dark, self.obstime).plot def ensure_all_full_frame_apertures(self): @@ -549,12 +550,15 @@ def get_latest_histogram_data(self): most_recent_idx = np.where(self._entry_dates > (latest_date - delta_time))[0] # Store the histogram data in a dictionary where the keys are the - # amplifier values (note that these are strings e.g. '1''), and the + # amplifier values (note that these are strings e.g. '1'), and the # values are tuples of (x, y) lists for idx in most_recent_idx: self.hist_data[self.db.stats_data[idx].amplifier] = (self.db.stats_data[idx].hist_dark_values, self.db.stats_data[idx].hist_amplitudes) + # Keep track of the observation date of the most recent entry + self.hist_date = self.db.stats_data[most_recent_idx[0].obs_mid_time] + def get_trending_data(self): """Organize data for the trending plot. Here we need all the data for the aperture. Keep amplifier-specific data separated. @@ -576,7 +580,6 @@ def stats_data_to_lists(self): """Create arrays from some of the stats database columns that are used by multiple plot types """ - #apertures = np.array([e.aperture for e in self.db.stats_data]) self._amplifiers = np.array([e.amplifier for e in self.db.stats_data]) self._entry_dates = np.array([e.entry_date for e in self.db.stats_data]) self._mean = np.array([e.mean for e in self.db.stats_data]) From 201eb15fbcb5efda9d02dd9f3ec18b40c0609add Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 20 Dec 2023 22:28:34 -0500 Subject: [PATCH 19/38] Tweaks to work with rateints files --- .../common_monitors/dark_monitor.py | 81 ++++++++++++++----- jwql/utils/instrument_properties.py | 5 ++ 2 files changed, 67 insertions(+), 19 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 562ab29a5..696da0dbb 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -234,7 +234,8 @@ def add_bad_pix(self, coordinates, pixel_type, files, mean_filename, baseline_fi with engine.begin() as connection: connection.execute(self.pixel_table.__table__.insert(), entry) - def create_mean_slope_figure(self, image, num_files, hotxy=None, deadxy=None, noisyxy=None, baseline_file=None): + def create_mean_slope_figure(self, image, num_files, hotxy=None, deadxy=None, noisyxy=None, baseline_file=None, + min_time='', max_time=''): """Create and save a png containing the mean dark slope image, to be displayed in the web app @@ -258,10 +259,17 @@ def create_mean_slope_figure(self, image, num_files, hotxy=None, deadxy=None, no baseline_file : str Name of fits file containing the mean slope image to which ``image`` was compared when looking for new hot/dead/noisy pixels + + min_time : str + Earliest observation time, in MJD, used in the creation of ``image``. + + max_time : str + Latest observation time, in MJD, used in the creation of ``image``. + """ output_filename = '{}_{}_{}_to_{}_mean_slope_image.png'.format(self.instrument.lower(), self.aperture.lower(), - self.query_start, self.query_end) + min_time, max_time) mean_slope_dir = os.path.join(get_config()['outputs'], 'dark_monitor', 'mean_slope_images') @@ -275,8 +283,8 @@ def create_mean_slope_figure(self, image, num_files, hotxy=None, deadxy=None, no img_mn, img_med, img_dev = sigma_clipped_stats(image[4: ny - 4, 4: nx - 4]) # Create figure - start_time = Time(float(self.query_start), format='mjd').tt.datetime.strftime("%m/%d/%Y") - end_time = Time(float(self.query_end), format='mjd').tt.datetime.strftime("%m/%d/%Y") + start_time = Time(float(min_time), format='mjd').tt.datetime.strftime("%m/%d/%Y") + end_time = Time(float(max_time), format='mjd').tt.datetime.strftime("%m/%d/%Y") self.plot = figure(title=f'{self.aperture}: {num_files} files. {start_time} to {end_time}', tools='') # tools='pan,box_zoom,reset,wheel_zoom,save') @@ -733,7 +741,16 @@ def process(self, file_list): # Calculate a mean slope image from the inputs slope_image, stdev_image = calculations.mean_image(slope_image_stack, sigma_threshold=3) - mean_slope_file = self.save_mean_slope_image(slope_image, stdev_image, slope_files) + + # Use the min and max observation time of the input files to create the slope file name + min_time_str = min_time.strftime('%Y-%m-%dT%H:%m:%S') + min_time_mjd = Time(min_time_str, format='isot', scale='utc').mjd + min_time_mjd_trunc = "{:.4f}".format(min_time_mjd) + max_time_str = max_time.strftime('%Y-%m-%dT%H:%m:%S') + max_time_mjd = Time(max_time_str, format='isot', scale='utc').mjd + max_time_mjd_trunc = "{:.4f}".format(max_time_mjd) + mean_slope_file = self.save_mean_slope_image(slope_image, stdev_image, slope_files, + min_time_mjd_trunc, max_time_mjd_trunc) # Free up memory del slope_image_stack @@ -761,7 +778,15 @@ def process(self, file_list): baseline_stdev = deepcopy(stdev_image) else: logging.info('\tBaseline file is {}'.format(baseline_file)) - baseline_mean, baseline_stdev = self.read_baseline_slope_image(baseline_file) + + if not os.path.isfile(baseline_file): + logging.warning((f'\tBaseline file {baseline_file} does not exist. Setting ' + 'the current mean slope image to be the new baseline.')) + baseline_file = mean_slope_file + baseline_mean = deepcopy(slope_image) + baseline_stdev = deepcopy(stdev_image) + else: + baseline_mean, baseline_stdev = self.read_baseline_slope_image(baseline_file) # Check the hot/dead pixel population for changes logging.info("\tFinding new hot/dead pixels") @@ -804,17 +829,22 @@ def process(self, file_list): logging.info('\tFound {} new noisy pixels'.format(len(new_noisy_pixels[0]))) self.add_bad_pix(new_noisy_pixels, 'noisy', file_list, mean_slope_file, baseline_file, min_time, mid_time, max_time) - logging.info("Creating Mean Slope Image {}".format(slope_image)) - # Create png file of mean slope image. Add bad pixels only for full frame apertures self.create_mean_slope_figure(slope_image, len(slope_files), hotxy=new_hot_pix, deadxy=new_dead_pix, - noisyxy=new_noisy_pixels, baseline_file=baseline_file) + noisyxy=new_noisy_pixels, baseline_file=baseline_file, + min_time=min_time_mjd_trunc, max_time=max_time_mjd_trunc) logging.info('\tSigma-clipped mean of the slope images saved to: {}'.format(mean_slope_file)) # ----- Calculate image statistics ----- # Find amplifier boundaries so per-amp statistics can be calculated number_of_amps, amp_bounds = instrument_properties.amplifier_info(slope_files[0]) + + print('amps:') + print(number_of_amps, amp_bounds) + stop + + logging.info('\tAmplifier boundaries: {}'.format(amp_bounds)) # Calculate mean and stdev values, and fit a Gaussian to the @@ -935,14 +965,14 @@ def run(self): # If the aperture is not listed in the threshold file, we need # a default - if not np.any(match): - integration_count_threshold = 1 - self.skipped_initial_ints = 0 - logging.warning(('\tAperture {} is not present in the threshold file. Continuing ' - 'with the default threshold of 1 file, and no skipped integrations.'.format(aperture))) - else: - integration_count_threshold = limits['Threshold'][match][0] - self.skipped_initial_ints = limits['N_skipped_integs'][match][0] + #if not np.any(match): + # integration_count_threshold = 1 + # self.skipped_initial_ints = 0 + # logging.warning(('\tAperture {} is not present in the threshold file. Continuing ' + # 'with the default threshold of 1 file, and no skipped integrations.'.format(aperture))) + #else: + integration_count_threshold = limits['Threshold'][match][0] + self.skipped_initial_ints = limits['N_skipped_integs'][match][0] self.aperture = aperture # We need a separate search for each readout pattern @@ -1165,7 +1195,7 @@ def run(self): logging.info('Dark Monitor completed successfully.') - def save_mean_slope_image(self, slope_img, stdev_img, files): + def save_mean_slope_image(self, slope_img, stdev_img, files, min_time, max_time): """Save the mean slope image and associated stdev image to a file @@ -1181,6 +1211,12 @@ def save_mean_slope_image(self, slope_img, stdev_img, files): files : list List of input files used to construct the mean slope image + min_time : str + Earliest observation time, in MJD, corresponding to ``files``. + + max_time : str + Latest observation time, in MJD, corresponding to ``files``. + Returns ------- output_filename : str @@ -1189,7 +1225,7 @@ def save_mean_slope_image(self, slope_img, stdev_img, files): output_filename = '{}_{}_{}_to_{}_mean_slope_image.fits'.format(self.instrument.lower(), self.aperture.lower(), - self.query_start, self.query_end) + min_time, max_time) mean_slope_dir = os.path.join(get_config()['outputs'], 'dark_monitor', 'mean_slope_images') ensure_dir_exists(mean_slope_dir) @@ -1201,6 +1237,8 @@ def save_mean_slope_image(self, slope_img, stdev_img, files): primary_hdu.header['APERTURE'] = (self.aperture, 'Aperture name') primary_hdu.header['QRY_STRT'] = (self.query_start, 'MAST Query start time (MJD)') primary_hdu.header['QRY_END'] = (self.query_end, 'MAST Query end time (MJD)') + primary_hdu.header['MIN_TIME'] = (min_time, 'Beginning obs time (MJD)') + primary_hdu.header['MAX_TIME'] = (max_time, 'Ending obs time (MJD)') files_string = 'FILES USED: ' for filename in files: @@ -1692,6 +1730,11 @@ def stats_by_amp(self, image, amps): lower_bound = (amp_mean - 7 * amp_stdev) upper_bound = (amp_mean + 7 * amp_stdev) + + print(y_start,y_end,y_step, x_start,x_end,x_step) + print(amp_mean, amp_stdev, lower_bound, upper_bound) + + hist, bin_edges = np.histogram(image[indexes[0], indexes[1]], bins='auto', range=(lower_bound, upper_bound)) diff --git a/jwql/utils/instrument_properties.py b/jwql/utils/instrument_properties.py index 33d900b4d..88acb5465 100644 --- a/jwql/utils/instrument_properties.py +++ b/jwql/utils/instrument_properties.py @@ -129,6 +129,11 @@ def amplifier_info(filename, omit_reference_pixels=True): except KeyError: raise KeyError('DQ extension not found.') + # If the file contains multiple frames (e.g. rateints file) + # keep just the first + if len(data_quality.shape) == 3: + data_quality = data_quality[0, :, :] + # Reference pixels should be flagged in the DQ array with the # REFERENCE_PIXEL flag. Find the science pixels by looping for # pixels that don't have that bit set. From 64b1ee37fd07337d8573c427b21a214fc658e5e6 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 27 Dec 2023 15:54:53 -0500 Subject: [PATCH 20/38] Fix failing test --- jwql/tests/test_dark_monitor.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/jwql/tests/test_dark_monitor.py b/jwql/tests/test_dark_monitor.py index f06d1002b..2226038ba 100644 --- a/jwql/tests/test_dark_monitor.py +++ b/jwql/tests/test_dark_monitor.py @@ -153,6 +153,12 @@ def generate_data_for_file_splitting_test(): ] test12 = (files, start_times, end_times, integration_list, threshold, expected) + # In this case, the final 2 files are grouped together due to being taken close + # in time to one another. However, they do not contain enough integrations to + # reach the threshold. Since these are the final two files, we have no way of + # knowing if they are just the first two observations of a larger set that should + # be grouped. Therefore, the dark monitor ignores these final two files, under + # the assumption that they will be used the next time the monitor is run. deltat = [50., 49., 48., 47., 34., 33., 32., 31., 20., 19.] start_times = [now - dt for dt in deltat] end_times = [s+0.1 for s in start_times] @@ -160,8 +166,7 @@ def generate_data_for_file_splitting_test(): integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] expected = [['file_0.fits', 'file_1.fits'], ['file_2.fits', 'file_3.fits'], - ['file_4.fits', 'file_5.fits', 'file_6.fits', 'file_7.fits'], - ['file_8.fits', 'file_9.fits'] + ['file_4.fits', 'file_5.fits', 'file_6.fits', 'file_7.fits'] ] test13 = (files, start_times, end_times, integration_list, threshold, expected) @@ -279,6 +284,10 @@ def test_split_files_into_sub_lists(files, start_times, end_times, integration_l d = dark_monitor.Dark() d.instrument = 'nircam' d.split_files_into_sub_lists(files, start_times, end_times, integration_list, threshold) + + print(files, start_times, end_times, integration_list, threshold, expected) + + assert d.file_batches == expected From 15c8882b00371d169c534a512bcbbb6cf15119f3 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Thu, 28 Dec 2023 12:50:49 -0500 Subject: [PATCH 21/38] Make mean img stats robust against NaNs. PEP-8 fixes. --- .../common_monitors/dark_monitor.py | 11 ------- jwql/tests/test_dark_monitor.py | 33 +++++++++---------- jwql/utils/calculations.py | 6 ++-- 3 files changed, 19 insertions(+), 31 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 696da0dbb..7e5dd8b59 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -839,12 +839,6 @@ def process(self, file_list): # Find amplifier boundaries so per-amp statistics can be calculated number_of_amps, amp_bounds = instrument_properties.amplifier_info(slope_files[0]) - - print('amps:') - print(number_of_amps, amp_bounds) - stop - - logging.info('\tAmplifier boundaries: {}'.format(amp_bounds)) # Calculate mean and stdev values, and fit a Gaussian to the @@ -1730,11 +1724,6 @@ def stats_by_amp(self, image, amps): lower_bound = (amp_mean - 7 * amp_stdev) upper_bound = (amp_mean + 7 * amp_stdev) - - print(y_start,y_end,y_step, x_start,x_end,x_step) - print(amp_mean, amp_stdev, lower_bound, upper_bound) - - hist, bin_edges = np.histogram(image[indexes[0], indexes[1]], bins='auto', range=(lower_bound, upper_bound)) diff --git a/jwql/tests/test_dark_monitor.py b/jwql/tests/test_dark_monitor.py index 2226038ba..ef206daba 100644 --- a/jwql/tests/test_dark_monitor.py +++ b/jwql/tests/test_dark_monitor.py @@ -40,7 +40,7 @@ def generate_data_for_file_splitting_test(): now = Time.now().mjd deltat = [26., 25., 24., 23., 22., 4., 3., 2., 1., 0.] start_times = [now - dt for dt in deltat] - end_times = [s+0.1 for s in start_times] + end_times = [s + 0.1 for s in start_times] threshold = 5. # integrations integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] expected = [['file_0.fits', 'file_1.fits'], @@ -52,7 +52,7 @@ def generate_data_for_file_splitting_test(): # Final epoch may not be over. Not enough ints in final epoch deltat = [26., 25., 24., 23., 22., 4., 3., 2., 1., 0.] start_times = [now - dt for dt in deltat] - end_times = [s+0.1 for s in start_times] + end_times = [s + 0.1 for s in start_times] threshold = 6. # integrations integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] expected = [['file_0.fits', 'file_1.fits'], @@ -63,18 +63,18 @@ def generate_data_for_file_splitting_test(): # Final epoch may not be over. Not enough ints in final subgroup of final epoch deltat = [26., 25., 24., 23., 22., 4., 3., 2., 1., 0.] start_times = [now - dt for dt in deltat] - end_times = [s+0.1 for s in start_times] + end_times = [s + 0.1 for s in start_times] threshold = 6. # integrations integration_list = [3, 3, 2, 2, 2, 1, 3, 3, 2, 2] expected = [['file_0.fits', 'file_1.fits'], ['file_2.fits', 'file_3.fits', 'file_4.fits'], ['file_5.fits', 'file_6.fits', 'file_7.fits'] ] - test3= (files, start_times, end_times, integration_list, threshold, expected) + test3 = (files, start_times, end_times, integration_list, threshold, expected) deltat = [40., 39., 38., 37., 36., 18., 17., 16., 15., 0.] start_times = [now - dt for dt in deltat] - end_times = [s+0.1 for s in start_times] + end_times = [s + 0.1 for s in start_times] threshold = 5. # integrations integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] expected = [['file_0.fits', 'file_1.fits'], @@ -85,7 +85,7 @@ def generate_data_for_file_splitting_test(): deltat = [40., 39., 38., 37., 36., 18., 17., 16., 15., 0.] start_times = [now - dt for dt in deltat] - end_times = [s+0.1 for s in start_times] + end_times = [s + 0.1 for s in start_times] threshold = 6. # integrations integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] expected = [['file_0.fits', 'file_1.fits'], @@ -96,7 +96,7 @@ def generate_data_for_file_splitting_test(): deltat = [9., 8., 7., 6., 5., 4., 3., 2., 1., 0.] start_times = [now - dt for dt in deltat] - end_times = [s+0.1 for s in start_times] + end_times = [s + 0.1 for s in start_times] integration_list = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] threshold = 6 expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits']] @@ -111,32 +111,32 @@ def generate_data_for_file_splitting_test(): threshold = 10 expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits', 'file_6.fits', 'file_7.fits', 'file_8.fits', 'file_9.fits'] - ] + ] test8 = (files, start_times, end_times, integration_list, threshold, expected) deltat = [23., 22., 21., 20., 19., 18., 17., 16., 15., 0.] start_times = [now - dt for dt in deltat] - end_times = [s+0.1 for s in start_times] + end_times = [s + 0.1 for s in start_times] integration_list = [1] * len(start_times) threshold = 10 expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits', 'file_6.fits', 'file_7.fits', 'file_8.fits'] - ] + ] test9 = (files, start_times, end_times, integration_list, threshold, expected) deltat = [9., 8., 7., 6., 5., 4., 3., 2., 1., 0.] start_times = [now - dt for dt in deltat] - end_times = [s+0.1 for s in start_times] + end_times = [s + 0.1 for s in start_times] integration_list = [1] * len(start_times) threshold = 10 expected = [['file_0.fits', 'file_1.fits', 'file_2.fits', 'file_3.fits', 'file_4.fits', 'file_5.fits', 'file_6.fits', 'file_7.fits', 'file_8.fits', 'file_9.fits'] - ] + ] test10 = (files, start_times, end_times, integration_list, threshold, expected) deltat = [9., 8., 7., 6., 5., 4., 3., 2., 1., 0.] start_times = [now - dt for dt in deltat] - end_times = [s+0.1 for s in start_times] + end_times = [s + 0.1 for s in start_times] integration_list = [1] * len(start_times) threshold = 11 expected = [] @@ -144,7 +144,7 @@ def generate_data_for_file_splitting_test(): deltat = [40., 39., 38., 37., 24., 23., 22., 21., 1., 0.] start_times = [now - dt for dt in deltat] - end_times = [s+0.1 for s in start_times] + end_times = [s + 0.1 for s in start_times] threshold = 6 # integrations integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] expected = [['file_0.fits', 'file_1.fits'], @@ -161,7 +161,7 @@ def generate_data_for_file_splitting_test(): # the assumption that they will be used the next time the monitor is run. deltat = [50., 49., 48., 47., 34., 33., 32., 31., 20., 19.] start_times = [now - dt for dt in deltat] - end_times = [s+0.1 for s in start_times] + end_times = [s + 0.1 for s in start_times] threshold = 6 # integrations integration_list = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1] expected = [['file_0.fits', 'file_1.fits'], @@ -285,9 +285,6 @@ def test_split_files_into_sub_lists(files, start_times, end_times, integration_l d.instrument = 'nircam' d.split_files_into_sub_lists(files, start_times, end_times, integration_list, threshold) - print(files, start_times, end_times, integration_list, threshold, expected) - - assert d.file_batches == expected diff --git a/jwql/utils/calculations.py b/jwql/utils/calculations.py index 34e866c2e..a2a44ac3c 100644 --- a/jwql/utils/calculations.py +++ b/jwql/utils/calculations.py @@ -17,6 +17,7 @@ """ import numpy as np +import warnings from astropy.modeling import fitting, models from astropy.stats import sigma_clip @@ -169,8 +170,9 @@ def mean_stdev(image, sigma_threshold=3): stdev_value : float Sigma-clipped standard deviation of image """ - - clipped, lower, upper = sigmaclip(image, low=sigma_threshold, high=sigma_threshold) + # Ignore the warning about NaNs being clipped. + warnings.filterwarnings('ignore', message='Input data contains invalid values (NaNs or infs), which were automatically clipped.*') + clipped = sigma_clip(image, sigma=sigma_threshold, masked=False) mean_value = np.mean(clipped) stdev_value = np.std(clipped) From 393a8775f12be67f1993668c2cb6f42ad0a22f86 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Thu, 28 Dec 2023 12:54:06 -0500 Subject: [PATCH 22/38] More PEP-8 fixes --- .../apps/jwql/monitor_pages/monitor_dark_bokeh.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py index d522817f0..196f779a2 100755 --- a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py @@ -175,7 +175,7 @@ def create_plot(self): fill_color=color, line_color="white", alpha=0.25, legend_label=f'Amp {amp}') # Set ranges - self.plot.extra_y_ranges = {"cdf_line": Range1d(0,1)} + self.plot.extra_y_ranges = {"cdf_line": Range1d(0, 1)} self.plot.add_layout(LinearAxis(y_range_name='cdf_line', axis_label="Cumulative Distribution"), "right") # Add cumulative distribution function @@ -194,7 +194,7 @@ def create_plot(self): self.plot.x_range.end = mainx[disp_index[-1]] self.plot.legend.location = "top_left" self.plot.legend.background_fill_color = "#fefefe" - self.plot.grid.grid_line_color="white" + self.plot.grid.grid_line_color = "white" else: # If self.data is empty, then make a placeholder plot self.plot = PlaceholderPlot(title_str, x_label, y_label).plot @@ -539,8 +539,7 @@ def get_latest_histogram_data(self): self.hist_data = {} if len(self._entry_dates) > 0: # Find the index of the most recent entry - #self._aperture_entries = np.where((self._apertures == aperture))[0] - latest_date = np.max(self._entry_dates) #[self._aperture_entries]) + latest_date = np.max(self._entry_dates) # Get indexes of entries for all amps that were added in the # most recent run of the monitor for the aperture. All entries @@ -682,7 +681,7 @@ def create_plot(self): error_upper=error_upper, time=self.obstime[use_amp] ) - ) + ) self.plot = figure(title=f'{self.aperture}: Mean +/- 1-sigma Dark Rate', tools='pan,box_zoom,reset,wheel_zoom,save', background_fill_color="#fafafa") @@ -740,7 +739,7 @@ def create_plot(self): self.plot.y_range.end = max_val * 1.05 self.plot.legend.location = "top_right" self.plot.legend.background_fill_color = "#fefefe" - self.plot.grid.grid_line_color="white" + self.plot.grid.grid_line_color = "white" else: # If there are no data, make a placeholder plot self.plot = figure(title=f'{self.aperture}: Mean +/- 1-sigma Dark Rate', tools='pan,box_zoom,reset,wheel_zoom,save', @@ -751,7 +750,7 @@ def create_plot(self): self.plot.y_range.end = 1 source = ColumnDataSource(data=dict(x=[0.5], y=[0.5], text=['No data'])) - glyph = Text(x="x", y="y", text="text", angle=0., text_color="navy", text_font_size={'value':'20px'}) + glyph = Text(x="x", y="y", text="text", angle=0., text_color="navy", text_font_size={'value': '20px'}) self.plot.add_glyph(source, glyph) self.plot.xaxis.axis_label = 'Date' From 52b27cbfa11b34ec7b678a9c16ad192fdd657535 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Thu, 28 Dec 2023 16:59:04 -0500 Subject: [PATCH 23/38] Clean up --- .../common_monitors/dark_monitor.py | 291 ++---------------- .../jwql/monitor_pages/monitor_dark_bokeh.py | 11 +- 2 files changed, 35 insertions(+), 267 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 7e5dd8b59..1c363556a 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -441,10 +441,6 @@ def exclude_existing_badpix(self, badpix, pixel_type): new_pixels_y.append(y) logging.info("\t\tKeeping {} {} pixels".format(len(new_pixels_x), pixel_type)) -# pixel = (x, y) -# if pixel not in already_found: -# new_pixels_x.append(x) -# new_pixels_y.append(y) session.close() return (new_pixels_x, new_pixels_y) @@ -939,10 +935,6 @@ def run(self): # Identify which database tables to use self.identify_tables() - # Get a list of all possible apertures from pysiaf - #possible_apertures = list(Siaf(instrument).apernames) - #possible_apertures = [ap for ap in possible_apertures if ap not in apertures_to_skip] - # Run the monitor only on the apertures listed in the threshold file. Skip all others. instrument_entries = limits['Instrument'] == instrument possible_apertures = limits['Aperture'][instrument_entries] @@ -956,15 +948,6 @@ def run(self): # Find appropriate threshold for the number of new files needed match = aperture == limits['Aperture'] - - # If the aperture is not listed in the threshold file, we need - # a default - #if not np.any(match): - # integration_count_threshold = 1 - # self.skipped_initial_ints = 0 - # logging.warning(('\tAperture {} is not present in the threshold file. Continuing ' - # 'with the default threshold of 1 file, and no skipped integrations.'.format(aperture))) - #else: integration_count_threshold = limits['Threshold'][match][0] self.skipped_initial_ints = limits['N_skipped_integs'][match][0] self.aperture = aperture @@ -999,7 +982,6 @@ def run(self): new_entries = monitor_utils.exclude_asic_tuning(new_entries) len_no_asic = len(new_entries) num_asic = len_new_darks - len_no_asic - #logging.info(f"\tFiltering out ASIC tuning files. Removed {num_asic} dark files.") logging.info(f'\tAperture: {self.aperture}, Readpattern: {self.readpatt}, new entries: {len(new_entries)}') @@ -1054,12 +1036,6 @@ def run(self): logging.info((f'\t\tFilesystem search for new dark integrations for {self.instrument}, {self.aperture}, ' f'{self.readpatt} has found {total_integrations} integrations spread across {len(new_filenames)} files.')) if total_integrations >= integration_count_threshold: - # for testing - #logging.info('FULL BATCH STARTING TIMES:') - #logging.info(starting_times) - #logging.info('ENDING TIMES:') - #logging.info(ending_times) - logging.info(f'\tThis meets the threshold of {integration_count_threshold} integrations.') monitor_run = True @@ -1080,13 +1056,7 @@ def run(self): # Run the monitor once on each list for new_file_list, batch_start_time, batch_end_time, batch_integrations in zip(self.file_batches, self.start_time_batches, self.end_time_batches, self.integration_batches): # Copy files from filesystem - - dark_files, not_copied = copy_files(new_file_list, self.data_dir) - # Fake dark_files and not_copied, for testing - #dark_files = new_file_list - #not_copied = [] - # Check that there were no problems with the file copying. If any of the copied # files have different sizes between the MAST filesystem and the JWQL filesystem, @@ -1102,31 +1072,12 @@ def run(self): os.remove(dark_file) logging.info('\tNew_filenames: {}'.format(new_file_list)) - logging.info('\tData dir: {}'.format(self.data_dir)) logging.info('\tCopied to data dir: {}'.format(dark_files)) logging.info('\tNot copied: {}'.format(not_copied)) - - - # for testing - logging.info('STARTING TIMES FOR BATCH:') - logging.info(batch_start_time) - logging.info('ENDING TIMES FOR BATCH:') - logging.info(batch_end_time) - logging.info('INTEGRATIONS FOR BATCH:') - logging.info(batch_integrations) - - - - - # Run the dark monitor self.process(dark_files) - #logging.info('HERE IS WHERE THE MONITOR WOULD RUN ON THE GIVEN BATCH OF FILES. THIS IS TURNED OFF FOR TESTING.') - - - # Get the starting and ending time of the files in this monitor run batch_start_time = np.min(np.array(batch_start_time)) @@ -1142,19 +1093,12 @@ def run(self): 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - - with engine.begin() as connection: connection.execute( self.query_table.__table__.insert(), new_entry) logging.info('\tUpdated the query history table') logging.info('NEW ENTRY: ') logging.info(new_entry) - #logging.info('TURNED OFF DATABASE UPDATES DURING TESTING') - - - - else: logging.info(f'\tThis is below the threshold of {integration_count_threshold} integrations. Monitor not run.') @@ -1170,22 +1114,12 @@ def run(self): 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - - - with engine.begin() as connection: connection.execute( self.query_table.__table__.insert(), new_entry) logging.info('\tUpdated the query history table') logging.info('NEW ENTRY: ') logging.info(new_entry) - #logging.info('TURNED OFF DATABASE UPDATES DURING TESTING') - - - - - - logging.info('Dark Monitor completed successfully.') @@ -1275,6 +1209,24 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ list have a total number of integrations that is just over the given threshold value. + General assumption: Keeping files in different epochs separate is probably more + important than rigidly enforcing that the required number of integrations is reached. + + When dividing up the input files into separate lists, we first divide up by + epoch, where the start/end of epochs are defined as times where + DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME days pass without any new data appearing. + Each epoch is then potentially subdivided further based on the threshold number + of integrations (not exposures). The splitting does not operate within files. + For example, if the threshold is 2 integrations, and a particular file contains 5 + integrations, then the dark monitor will be called once on that file, working on + all 5 integrations. + + At the end of the epoch, if the final group of file(s) do not have enough + integrations to reach the threshold, they are ignored since there is no way + to know if there are more files in the same epoch that have not yet been taken. So + the files are ignored, and the query end time will be adjusted such that these files + will be found in the next run of the monitor. + Dark calibration plans per instrument: NIRCam - for full frame, takes only 2 integrations (150 groups) once per ~30-50 days. for subarrays, takes 5-10 integrations once per 30-50 days @@ -1314,28 +1266,11 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ dark monitor """ - # Not grouping together data across multiple epochs is probably more - # important than the number of integrations.... - - - - #include a final delta_t value that is the time between the last file and - #the current time. If that value is less than...something...then we assume - #we are in the middle of an epoch of the cal program. In that case, we will - #skip running the monitor on the final batch, as defined below. We can save that - #for a future run, where the final delta_t is long enough that we can assume - #that epoch of the cal program has completed. - - - logging.info('\t\tSplitting into sub-lists. Inputs at the beginning: (file, start time, end time, nints, threshold)') for f, st, et, inte in zip(files, start_times, end_times, integration_list): logging.info(f'\t\t {f}, {st}, {et}, {inte}, {threshold}') logging.info('\n') - - - # Eventual return parameters self.file_batches = [] self.start_time_batches = [] @@ -1343,7 +1278,6 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ self.integration_batches = [] # Add the current time onto the end of start_times - #start_times = np.append(start_times, Time.now().mjd) start_times = np.array(start_times) # Get the delta t between each pair of files. Insert 0 as the initial @@ -1362,6 +1296,7 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ if dividers[-1] < len(delta_t): dividers = np.insert(dividers, len(dividers), len(delta_t)) + logging.info(f'\t\t\tThreshold delta time used to divide epochs: {DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME[self.instrument]} days') logging.info(f'\t\t\tdelta_t between files: {delta_t}') logging.info(f'\t\t\tFinal dividers (divide data based on time gaps between files): {dividers}') logging.info('\n') @@ -1369,7 +1304,7 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # Loop over epochs. # Within each batch, divide up the exposures into multiple batches if the total # number of integrations are above 2*threshold. - for i in range(len(dividers) - 1): # should this be len(dividers)-2??? we seem to be ending with empty results in the final loop + for i in range(len(dividers) - 1): batch_ints = integration_list[dividers[i]:dividers[i+1]] batch_files = files[dividers[i]:dividers[i+1]] batch_start_times = start_times[dividers[i]:dividers[i+1]] @@ -1381,152 +1316,6 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ for bi, bf in zip(batch_ints, batch_files): logging.info(f'\t\t\t{bf}, {bi}') - # Calculate how many subgroups to break up the batch into, - # based on the threshold, and under the assumption that we - # don't want to skip running on any of the files. - #n_subgroups = int(np.ceil(batch_int_sum / threshold)) - don't use this - - # Don't create mulitple subgroups for a single file. Treat the exposure as the - # base unit. - #intsum = 0 - #for batch_int, batch_file, start_time, end_time in zip(batch_ints, batch_files, batch_start_times, batch_end_times): - # intsum += batch_int - # if intsum >= - - - - #print('n_subgroups (based on number of integrations vs threshold)', n_subgroups) - #print('total number of integs in the batch: ', batch_int_sum) - #print('integ-based threshold to use: ', threshold) - - - - ## FOR TESTING - #n_subgroups = 1 # eventually n_subgroups should go away and the if else block below can - # be replaced by a single block of code that does not rely on n_subgroups - - - - - - """ - if n_subgroups == 0: - - print('IF N_SUBGROUPS USES NP.CEIL THEN IT IS NOT POSSIBLE TO HAVE N_SUBGROUPS == 0') - stop - - print('i and len(dividers)-1:', i, len(dividers) - 1, dividers) - - # Here, we are in a batch where the total number of integrations - # is less than the threshold (but the batch was identified due to - # the gaps in time before and after the batch.) In this case, we'll - # run the monitor with fewer than the threshold number of integrations, - # but only if this is not the final batch. In that case it may be that - # more observations are coming that should be grouped with the batch. - if i < (len(dividers) - 2): - self.file_batches.append(batch_files) - self.start_time_batches.append(batch_start_times) - self.end_time_batches.append(batch_end_times) - self.integration_batches.append(batch_ints) - else: - print('do we need a smarter if statment, like the line commented below?') - #if (i == len(dividers) - 1) and (batchnum == (n_subgroups - 1)) - # In this case, we are in the final epoch division AND we do not - # have enough integrations to subdivide the data. So we'll skip - # this data and wait for a future run of the monitor to bundle - # it with more, new data. - pass - - #elif n_subgroups == 1: - # # Here there are not enough integrations to split the batch into - # # more than one subgroup - # self.file_batches.append(batch_files) - # self.start_time_batches.append(batch_start_times) - # self.end_time_batches.append(batch_end_times) - # self.integration_batches.append(batch_ints) - - elif n_subgroups >= 1: - """ - # Here there are enough integrations to meet the threshold, - # or possibly enough to break the batch up - # into more than one subgroup. We can't split within a file, - # so we split after the file that gets the total number of - # integrations above the threshold. - - - - - """ - ###### Potential replacement for the 'for batchnum' loop below - ###### Potential replacement for the 'for batchnum' loop below - ###### Potential replacement for the 'for batchnum' loop below - startidx = 0 - working_batch_ints = deepcopy(batch_ints) - - - - # still need an exiting condition below.... - while True: - - batch_int_sums = np.array([np.sum(working_batch_ints[0:jj]) for jj in range(1, len(working_batch_ints) + 1)]) - ints_group = batch_int_sums // threshold - endidx = np.where(working_batch_ints > 0)[0] - - # Check if we reach the end of the file list - if len(endidx) == 0: - endidx = len(batch_ints) - 1 - complete = True - else: - endidx = endidx[0] - - subgroup_ints = batch_ints[startidx: endidx + 1] - subgroup_files = batch_files[startidx: endidx + 1] - subgroup_start_times = batch_start_times[startidx: endidx + 1] - subgroup_end_times = batch_end_times[startidx: endidx + 1] - subgroup_int_sum = np.sum(subgroup_ints) - - if (i == len(dividers) - 2) and endidx == len(batch_files) - 1: - if np.sum(subgroup_ints) >= threshold: - print('ADDED - final subgroup of final epoch') - self.file_batches.append(subgroup_files) - self.start_time_batches.append(subgroup_start_times) - self.end_time_batches.append(subgroup_end_times) - self.integration_batches.append(subgroup_ints) - else: - # Here the final subgroup does not have enough integrations to reach the threshold - # and we're not sure if the epoch is complete, so we skip these files and save them - # for a future dark monitor run - pass - - else: - #if (i < len(dividers) - 1) and (batchnum < (n_subgroups - 1)): - print('ADDED') - self.file_batches.append(subgroup_files) - self.start_time_batches.append(subgroup_start_times) - self.end_time_batches.append(subgroup_end_times) - self.integration_batches.append(subgroup_ints) - - if not complete: - startidx = deepcopy(endidx + 1) - base = batch_int_sums[endidx] - else: - # If we reach the end of the list before the expected number of - # subgroups, then we quit. - break - - - - ###### Potential replacement for the 'for batchnum' loop below - ###### Potential replacement for the 'for batchnum' loop below - ###### Potential replacement for the 'for batchnum' loop below - """ - - - - - - - # Calculate the total number of integrations up to each file batch_int_sums = np.array([np.sum(batch_ints[0:jj]) for jj in range(1, len(batch_ints) + 1)]) @@ -1534,12 +1323,9 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ startidx = 0 endidx = 0 complete = False - #for batchnum in range(n_subgroups): - just need to fix this loop since we don't know n_subgroups ahead of time - #for batchnum in range(len(batch_files)): # worst case - each file is its own batch... change batchnum to filenum, in order to make things easier to interpret - - - while True: # this instead of "for batchnum" makes more sense + # Divide into sublists + while True: endidx = np.where(batch_int_sums >= (base + threshold))[0] @@ -1552,9 +1338,9 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ if endidx == (len(batch_int_sums) - 1): complete = True - logging.info(f'\t\t\tstartidx: {startidx}') - logging.info(f'\t\t\tendidx: {endidx}') - logging.info(f'\t\t\tcomplete: {complete}') + logging.debug(f'\t\t\tstartidx: {startidx}') + logging.debug(f'\t\t\tendidx: {endidx}') + logging.debug(f'\t\t\tcomplete: {complete}') subgroup_ints = batch_ints[startidx: endidx + 1] subgroup_files = batch_files[startidx: endidx + 1] @@ -1562,34 +1348,18 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ subgroup_end_times = batch_end_times[startidx: endidx + 1] subgroup_int_sum = np.sum(subgroup_ints) - - logging.info(f'\t\t\tsubgroup_ints: {subgroup_ints}') - logging.info(f'\t\t\tsubgroup_files: {subgroup_files}') - logging.info(f'\t\t\tsubgroup_int_sum: {subgroup_int_sum}') - - #print('batchnum: ', batchnum) - #print(batch_ints[startidx: endidx + 1]) - #print(batch_files[startidx: endidx + 1]) - #print(i, len(dividers) - 1, batchnum, n_subgroups-1) - - - + logging.debug(f'\t\t\tsubgroup_ints: {subgroup_ints}') + logging.debug(f'\t\t\tsubgroup_files: {subgroup_files}') + logging.debug(f'\t\t\tsubgroup_int_sum: {subgroup_int_sum}') # Add to output lists. The exception is if we are in the # final subgroup of the final epoch. In that case, we don't know # if more data are coming soon that may be able to be combined. So # in that case, we ignore the files for this run of the monitor. - #if (i == len(dividers) - 2) and (batchnum == (n_subgroups - 1)): if (i == len(dividers) - 2) and endidx == len(batch_files) - 1: # Here we are in the final subgroup of the final epoch, where we - # mayb not necessarily know if there will be future data to combine + # do not necessarily know if there will be future data to combine # with these data - - #Here..... we do not know for sure the epoch is over? Confirm that we do not know this. - #If that is true, we can still check to see if we have reached the threshold number of - #integrations and run if so. - #print('final subgroup of final epoch. if the epoch is not over, so skipping files') - logging.info(f'\t\t\tShould be final epoch and final subgroup. epoch number: {i}') if np.sum(subgroup_ints) >= threshold: @@ -1606,7 +1376,6 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ pass else: - #if (i < len(dividers) - 1) and (batchnum < (n_subgroups - 1)): self.file_batches.append(subgroup_files) self.start_time_batches.append(subgroup_start_times) self.end_time_batches.append(subgroup_end_times) @@ -1706,7 +1475,7 @@ def stats_by_amp(self, image, amps): maxx = copy(mxx) if mxy > maxy: maxy = copy(mxy) - amps['5'] = [(0, maxx, 1), (0, maxy, 1)] + amps['5'] = [(4, maxx, 1), (4, maxy, 1)] logging.info(('\tFull frame exposure detected. Adding the full frame to the list ' 'of amplifiers upon which to calculate statistics.')) diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py index 196f779a2..663d632cc 100755 --- a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py @@ -96,6 +96,9 @@ def calc_bin_edges(self, centers): def create_plot(self): """Place the data in a CoumnDataSource and create the plot """ + title_str = f'{self.aperture}: Dark Rate Histogram. {self.obsdate.strftime("%d %b %Y")}' + x_label = 'Dark Rate (DN/sec)' + y_label = 'Number of Pixels' if len(self.data) > 0: # Specify which key ("amplifier") to show. If there is data for amp='5', # show that, as it will be the data for the entire detector. If not then @@ -110,10 +113,6 @@ def create_plot(self): else: use_amp = '1' - title_str = f'{self.aperture}: Dark Rate Histogram. {self.obsdate.strftime("%d %b %Y")}' - x_label = 'Dark Rate (DN/sec)' - y_label = 'Number of Pixels' - # If there are histogram data for multiple amps, then we can plot each histogram. if len(self.data) > 1: # Looks like the histogram data for the individual amps is not being saved @@ -254,7 +253,7 @@ def create_plot(self): class DarkMonitorData(): - """Retrive dark monitor data from the database tables + """Retrieve dark monitor data from the database tables Attributes ---------- @@ -556,7 +555,7 @@ def get_latest_histogram_data(self): self.db.stats_data[idx].hist_amplitudes) # Keep track of the observation date of the most recent entry - self.hist_date = self.db.stats_data[most_recent_idx[0].obs_mid_time] + self.hist_date = self.db.stats_data[most_recent_idx[0]].obs_mid_time def get_trending_data(self): """Organize data for the trending plot. Here we need all the data for From 3569812dfac42f55d9e7aee1ced143780c4491ff Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Thu, 28 Dec 2023 17:02:15 -0500 Subject: [PATCH 24/38] PEP8 --- .../common_monitors/dark_monitor.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 1c363556a..174323ee1 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -989,7 +989,7 @@ def run(self): new_filenames = [] for file_entry in new_entries: try: - new_filenames.append(filesystem_path(file_entry['filename'])) + new_filenames.append(filesystem_path(file_entry['filename'])) except FileNotFoundError: logging.warning(f"\t\tUnable to locate {file_entry['filename']} in filesystem. Not including in processing.") @@ -1080,14 +1080,14 @@ def run(self): self.process(dark_files) # Get the starting and ending time of the files in this monitor run - batch_start_time = np.min(np.array(batch_start_time)) + batch_start_time = np.min(np.array(batch_start_time)) batch_end_time = np.max(np.array(batch_end_time)) # Update the query history once for each group of files new_entry = {'instrument': instrument, 'aperture': aperture, 'readpattern': self.readpatt, - 'start_time_mjd': batch_start_time, #-- something is wrong here. Seeing 0.0 and 2.0 in testing on server + 'start_time_mjd': batch_start_time, 'end_time_mjd': batch_end_time, 'files_found': len(dark_files), 'run_monitor': monitor_run, @@ -1305,10 +1305,10 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # Within each batch, divide up the exposures into multiple batches if the total # number of integrations are above 2*threshold. for i in range(len(dividers) - 1): - batch_ints = integration_list[dividers[i]:dividers[i+1]] - batch_files = files[dividers[i]:dividers[i+1]] - batch_start_times = start_times[dividers[i]:dividers[i+1]] - batch_end_times = end_times[dividers[i]:dividers[i+1]] + batch_ints = integration_list[dividers[i]:dividers[i + 1]] + batch_files = files[dividers[i]:dividers[i + 1]] + batch_start_times = start_times[dividers[i]:dividers[i + 1]] + batch_end_times = end_times[dividers[i]:dividers[i + 1]] batch_int_sum = np.sum(batch_ints) logging.info(f'\t\t\tLoop over time-based batches. Working on batch {i}') @@ -1399,7 +1399,6 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ logging.info(f'\t\t\t{fb}, {ib}') logging.info(f'\t\t\tDONE WITH SUBGROUPS\n\n\n\n') - def stats_by_amp(self, image, amps): """Calculate statistics in the input image for each amplifier as well as the full image From 3b03f31a40021b385cc45da2e29851f2746d8e43 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Fri, 29 Dec 2023 11:48:12 -0500 Subject: [PATCH 25/38] Filter out exps with too few groups --- .../common_monitors/dark_monitor.py | 27 ++++++++++++++++++- jwql/utils/constants.py | 4 +++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 174323ee1..ab7c40f1d 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -103,7 +103,7 @@ from jwql.utils import calculations, instrument_properties, mast_utils, monitor_utils from jwql.utils.constants import ASIC_TEMPLATES, DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME, DARK_MONITOR_MAX_BADPOINTS_TO_PLOT from jwql.utils.constants import JWST_INSTRUMENT_NAMES, FULL_FRAME_APERTURES, JWST_INSTRUMENT_NAMES_MIXEDCASE -from jwql.utils.constants import JWST_DATAPRODUCTS, RAPID_READPATTERNS +from jwql.utils.constants import JWST_DATAPRODUCTS, MINIMUM_DARK_CURRENT_GROUPS, RAPID_READPATTERNS from jwql.utils.logging_functions import log_info, log_fail from jwql.utils.permissions import set_permissions from jwql.utils.utils import copy_files, ensure_dir_exists, get_config, filesystem_path, save_png @@ -445,6 +445,27 @@ def exclude_existing_badpix(self, badpix, pixel_type): session.close() return (new_pixels_x, new_pixels_y) + def exclude_too_few_groups(self, result_list): + """Given a list of mast query results, go through and exlclude + files that have too few groups to be useful + + Parameters + ---------- + result_list : list + List of dictionaries containing a MAST query result + + Returns + ------- + filtered_results : list + List of dictionaries with files containing too few groups excluded + """ + filtered_results = [] + for result in result_list: + if result['ngroups'] >= MINIMUM_DARK_CURRENT_GROUPS: + filtered_results.append(result) + return filtered_results + + def find_hot_dead_pixels(self, mean_image, comparison_image, hot_threshold=2., dead_threshold=0.1): """Create the ratio of the slope image to a baseline slope image. Pixels in the ratio image with values above @@ -983,6 +1004,10 @@ def run(self): len_no_asic = len(new_entries) num_asic = len_new_darks - len_no_asic + # Exclude files that don't have enough groups to be useful + new_entries = self.exclude_too_few_groups(new_entries) + len_new_darks = len(new_entries) + logging.info(f'\tAperture: {self.aperture}, Readpattern: {self.readpatt}, new entries: {len(new_entries)}') # Get full paths to the files diff --git a/jwql/utils/constants.py b/jwql/utils/constants.py index 672408778..d92e4759e 100644 --- a/jwql/utils/constants.py +++ b/jwql/utils/constants.py @@ -627,6 +627,10 @@ # Maximum number of records returned by MAST for a single query MAST_QUERY_LIMIT = 500000 +# Minimum number of groups per integration required to include data +# in the dark current monitor +MINIMUM_DARK_CURRENT_GROUPS = 10 + # Expected position sensor values for MIRI. Used by the EDB monitor # to filter out bad values. Tuple values are the expected value and # the standard deviation associated with the value From 98ee7b73b18c55f60e045cab3d6786ebd06c7d8e Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Fri, 29 Dec 2023 16:23:58 -0500 Subject: [PATCH 26/38] Updates to help with limited testing --- .../common_monitors/dark_monitor.py | 53 +++++++++++++++---- .../jwql/monitor_pages/monitor_dark_bokeh.py | 1 + 2 files changed, 44 insertions(+), 10 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index ab7c40f1d..5eb0772a2 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -875,7 +875,10 @@ def process(self, file_list): # Construct new entry for dark database table source_files = [os.path.basename(item) for item in file_list] for key in amp_mean.keys(): - dark_db_entry = {'aperture': self.aperture, 'amplifier': key, 'mean': amp_mean[key], + dark_db_entry = {'aperture': self.aperture, + 'amplifier': key, + 'readpattern': self.readpatt, + 'mean': amp_mean[key], 'stdev': amp_stdev[key], 'source_files': source_files, 'obs_start_time': min_time, @@ -949,7 +952,7 @@ def run(self): self.query_end = Time.now().mjd # Loop over all instruments - for instrument in JWST_INSTRUMENT_NAMES: + for instrument in ['miri', 'nircam']: # JWST_INSTRUMENT_NAMES: self.instrument = instrument logging.info(f'\n\nWorking on {instrument}') @@ -963,6 +966,13 @@ def run(self): # Get a list of all possible readout patterns associated with the aperture possible_readpatts = RAPID_READPATTERNS[instrument] + ########FOR TESTING########TO LIMIT THE TEST CASE + if instrument == 'miri': + possible_apertures = ['MIRIM_FULL'] + elif instrument == 'nircam': + possible_apertures = ['NRCA1_FULL'] + #########FOR TESTING####### + for aperture in possible_apertures: logging.info('') logging.info(f'Working on aperture {aperture} in {instrument}') @@ -974,6 +984,13 @@ def run(self): self.aperture = aperture # We need a separate search for each readout pattern + ########FOR TESTING########TO LIMIT THE TEST CASE + if instrument == 'miri': + possible_readpatts = ['FAST'] + elif instrument == 'nircam': + possible_readpatts = ['RAPID'] + #########FOR TESTING####### + for readpatt in possible_readpatts: self.readpatt = readpatt logging.info(f'\tWorking on readout pattern: {self.readpatt}') @@ -1071,6 +1088,12 @@ def run(self): self.aperture.lower())) ensure_dir_exists(self.data_dir) + + + print('Before splitting into sublists:') + for f in new_filenames: + print(f) + # Split the list of good files into sub-lists based on the integration # threshold. The monitor will then be run on each sub-list independently, # in order to produce results with roughly the same signal-to-noise. This @@ -1078,6 +1101,12 @@ def run(self): # where it hasn't been run in a while and data have piled up in the meantime. self.split_files_into_sub_lists(new_filenames, starting_times, ending_times, integrations, integration_count_threshold) + + print('in sublists:') + print(self.file_batches) + + + # Run the monitor once on each list for new_file_list, batch_start_time, batch_end_time, batch_integrations in zip(self.file_batches, self.start_time_batches, self.end_time_batches, self.integration_batches): # Copy files from filesystem @@ -1092,6 +1121,7 @@ def run(self): if orig_size != copied_size: logging.info(f"\tProblem copying {os.path.basename(dark_file)} from the filesystem.") logging.info(f"Size in filesystem: {orig_size}, size of copy: {copied_size}. Skipping file.") + print(f'CAN"T COPY {os.path.basename(dark_file)} CORRECTLY!') not_copied.append(dark_file) dark_files.remove(dark_file) os.remove(dark_file) @@ -1102,7 +1132,10 @@ def run(self): logging.info('\tNot copied: {}'.format(not_copied)) # Run the dark monitor - self.process(dark_files) + #self.process(dark_files) + print(instrument, aperture, readpatt) + for f in new_file_list: + print(f) # Get the starting and ending time of the files in this monitor run batch_start_time = np.min(np.array(batch_start_time)) @@ -1118,9 +1151,9 @@ def run(self): 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - with engine.begin() as connection: - connection.execute( - self.query_table.__table__.insert(), new_entry) + #with engine.begin() as connection: + # connection.execute( + # self.query_table.__table__.insert(), new_entry) logging.info('\tUpdated the query history table') logging.info('NEW ENTRY: ') logging.info(new_entry) @@ -1139,9 +1172,9 @@ def run(self): 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - with engine.begin() as connection: - connection.execute( - self.query_table.__table__.insert(), new_entry) + #with engine.begin() as connection: + # connection.execute( + # self.query_table.__table__.insert(), new_entry) logging.info('\tUpdated the query history table') logging.info('NEW ENTRY: ') logging.info(new_entry) @@ -1582,7 +1615,7 @@ def stats_by_amp(self, image, amps): if __name__ == '__main__': module = os.path.basename(__file__).strip('.py') - start_time, log_file = monitor_utils.initialize_instrument_monitor(module) + #start_time, log_file = monitor_utils.initialize_instrument_monitor(module) monitor = Dark() monitor.run() diff --git a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py index 663d632cc..fb73e401d 100755 --- a/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py +++ b/jwql/website/apps/jwql/monitor_pages/monitor_dark_bokeh.py @@ -581,6 +581,7 @@ def stats_data_to_lists(self): self._amplifiers = np.array([e.amplifier for e in self.db.stats_data]) self._entry_dates = np.array([e.entry_date for e in self.db.stats_data]) self._mean = np.array([e.mean for e in self.db.stats_data]) + self._readpatt = np.array([e.readpattern for e in self.db.stats_data]) self._stdev = np.array([e.stdev for e in self.db.stats_data]) self._obs_mid_time = np.array([e.obs_mid_time for e in self.db.stats_data]) self._stats_mean_dark_image_files = np.array([e.mean_dark_image_file for e in self.db.stats_data]) From 44d474529b44420206d790c7f991ffe31b99ae97 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Thu, 11 Jan 2024 10:15:32 -0500 Subject: [PATCH 27/38] testing --- .../common_monitors/dark_monitor.py | 37 ++++++++++++++----- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 5eb0772a2..ef640e8b2 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -951,6 +951,14 @@ def run(self): # Use the current time as the end time for MAST query self.query_end = Time.now().mjd + + #####FOR TESTING###### + self.query_end = 59699. # For nircam full and miriim testing + #####FOR TESTING###### + + + + # Loop over all instruments for instrument in ['miri', 'nircam']: # JWST_INSTRUMENT_NAMES: self.instrument = instrument @@ -1002,7 +1010,8 @@ def run(self): logging.info('SETTING SELF.QUERY_START TO 59500 (PRE-LAUNCH) FOR TESTING.') - self.query_start = 59500. + #self.query_start = 59500. + self.query_start = 59680. # for nircam full and mirim testing @@ -1090,6 +1099,15 @@ def run(self): + + if instrument == 'miri': + new_filenames = ['/Volumes/jwst_ins/jwql/filesystem/public/jw01546/jw01546001001/jw01546001001_02101_00001_mirimage_dark.fits', + '/Volumes/jwst_ins/jwql/filesystem/public/jw01546/jw01546001001/jw01546001001_02102_00001_mirimage_dark.fits'] + starting_times = starting_times[0:2] + ending_times = ending_times[0:2] + integrations = integrations[0:2] + + print('Before splitting into sublists:') for f in new_filenames: print(f) @@ -1118,13 +1136,12 @@ def run(self): for dark_file in dark_files: copied_size = os.stat(dark_file).st_size orig_size = os.stat(filesystem_path(os.path.basename(dark_file))).st_size - if orig_size != copied_size: - logging.info(f"\tProblem copying {os.path.basename(dark_file)} from the filesystem.") - logging.info(f"Size in filesystem: {orig_size}, size of copy: {copied_size}. Skipping file.") - print(f'CAN"T COPY {os.path.basename(dark_file)} CORRECTLY!') - not_copied.append(dark_file) - dark_files.remove(dark_file) - os.remove(dark_file) + #if orig_size != copied_size: + # logging.error(f"\tProblem copying {os.path.basename(dark_file)} from the filesystem!") + # logging.error(f"Size in filesystem: {orig_size}, size of copy: {copied_size}. Skipping file.") + # not_copied.append(dark_file) + # dark_files.remove(dark_file) + # os.remove(dark_file) logging.info('\tNew_filenames: {}'.format(new_file_list)) logging.info('\tData dir: {}'.format(self.data_dir)) @@ -1132,7 +1149,7 @@ def run(self): logging.info('\tNot copied: {}'.format(not_copied)) # Run the dark monitor - #self.process(dark_files) + self.process(dark_files) print(instrument, aperture, readpatt) for f in new_file_list: print(f) @@ -1615,7 +1632,7 @@ def stats_by_amp(self, image, amps): if __name__ == '__main__': module = os.path.basename(__file__).strip('.py') - #start_time, log_file = monitor_utils.initialize_instrument_monitor(module) + start_time, log_file = monitor_utils.initialize_instrument_monitor(module) monitor = Dark() monitor.run() From c1fee2e996631e4f667f8a235b2cd80bdc4f0bf3 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 31 Jan 2024 14:57:58 -0500 Subject: [PATCH 28/38] update miri testing files --- jwql/instrument_monitors/common_monitors/dark_monitor.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 252f5b825..2ec2f73cf 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -1099,8 +1099,9 @@ def run(self): ensure_dir_exists(self.working_data_dir) if instrument == 'miri': - new_filenames = ['/Volumes/jwst_ins/jwql/filesystem/public/jw01546/jw01546001001/jw01546001001_02101_00001_mirimage_dark.fits', - '/Volumes/jwst_ins/jwql/filesystem/public/jw01546/jw01546001001/jw01546001001_02102_00001_mirimage_dark.fits'] + filesystem = get_config()['filesystem'] + new_filenames = [os.path.join(filesystem, '/public/jw01546/jw01546001001/jw01546001001_02101_00001_mirimage_dark.fits'), + os.path.join(filesystem, '/public/jw01546/jw01546001001/jw01546001001_02102_00001_mirimage_dark.fits')] starting_times = starting_times[0:2] ending_times = ending_times[0:2] integrations = integrations[0:2] From 291a25d5bdc9a6f4d443c74728000cc617394b50 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 31 Jan 2024 17:10:42 -0500 Subject: [PATCH 29/38] Clean up print statements --- .../common_monitors/dark_monitor.py | 64 +++++++++---------- 1 file changed, 31 insertions(+), 33 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 2ec2f73cf..1c80c9ce1 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -1074,8 +1074,11 @@ def run(self): starting_times.append(hdulist[0].header['EXPSTART']) ending_times.append(hdulist[0].header['EXPEND']) else: - bad_size_filenames.append(new_file) + logging.info(f'\t\t{new_file} has unexpected aperture size. Expecting {expected_xsize}x{expected_ysize}. Got {xsize}x{ysize}') + + + if len(temp_filenames) != len(new_filenames): logging.info('\t\tSome files returned by MAST have unexpected aperture sizes. These files will be ignored: ') for badfile in bad_size_filenames: @@ -1100,17 +1103,14 @@ def run(self): if instrument == 'miri': filesystem = get_config()['filesystem'] - new_filenames = [os.path.join(filesystem, '/public/jw01546/jw01546001001/jw01546001001_02101_00001_mirimage_dark.fits'), - os.path.join(filesystem, '/public/jw01546/jw01546001001/jw01546001001_02102_00001_mirimage_dark.fits')] + new_filenames = [os.path.join(filesystem, 'public/jw01546/jw01546001001/jw01546001001_02101_00001_mirimage_dark.fits'), + os.path.join(filesystem, 'public/jw01546/jw01546001001/jw01546001001_02102_00001_mirimage_dark.fits')] + print('manually set new_filenames: ', new_filenames) + starting_times = starting_times[0:2] ending_times = ending_times[0:2] integrations = integrations[0:2] - - print('Before splitting into sublists:') - for f in new_filenames: - print(f) - # Split the list of good files into sub-lists based on the integration # threshold. The monitor will then be run on each sub-list independently, # in order to produce results with roughly the same signal-to-noise. This @@ -1118,16 +1118,10 @@ def run(self): # where it hasn't been run in a while and data have piled up in the meantime. self.split_files_into_sub_lists(new_filenames, starting_times, ending_times, integrations, integration_count_threshold) - - print('in sublists:') - print(self.file_batches) - - - # Run the monitor once on each list for new_file_list, batch_start_time, batch_end_time, batch_integrations in zip(self.file_batches, self.start_time_batches, self.end_time_batches, self.integration_batches): # Copy files from filesystem - dark_files, not_copied = copy_files(new_file_list, self.data_dir) + dark_files, not_copied = copy_files(new_file_list, self.working_data_dir) # Check that there were no problems with the file copying. If any of the copied # files have different sizes between the MAST filesystem and the JWQL filesystem, @@ -1135,28 +1129,32 @@ def run(self): for dark_file in dark_files: copied_size = os.stat(dark_file).st_size orig_size = os.stat(filesystem_path(os.path.basename(dark_file))).st_size - #if orig_size != copied_size: - # logging.error(f"\tProblem copying {os.path.basename(dark_file)} from the filesystem!") - # logging.error(f"Size in filesystem: {orig_size}, size of copy: {copied_size}. Skipping file.") - # not_copied.append(dark_file) - # dark_files.remove(dark_file) - # os.remove(dark_file) + if orig_size != copied_size: + logging.error(f"\tProblem copying {os.path.basename(dark_file)} from the filesystem!") + logging.error(f"Size in filesystem: {orig_size}, size of copy: {copied_size}. Skipping file.") + not_copied.append(dark_file) + dark_files.remove(dark_file) + os.remove(dark_file) logging.info('\tNew_filenames: {}'.format(new_file_list)) - logging.info('\tData dir: {}'.format(self.data_dir)) + logging.info('\tData dir: {}'.format(self.working_data_dir)) logging.info('\tCopied to data dir: {}'.format(dark_files)) logging.info('\tNot copied: {}'.format(not_copied)) - # Run the dark monitor - self.process(dark_files) - print(instrument, aperture, readpatt) - for f in new_file_list: - print(f) - # Get the starting and ending time of the files in this monitor run batch_start_time = np.min(np.array(batch_start_time)) batch_end_time = np.max(np.array(batch_end_time)) + if len(dark_files) > 0: + # Run the dark monitor + logging.info(f'\tRunning process for {instrument}, {aperture}, {readpatt} with:') + for dkfile in dark_files: + logging.info(f'\t{dkfile}') + self.process(dark_files) + else: + logging.info('\tNo files remaining to process. Skipping monitor.') + monitor_run = False + # Update the query history once for each group of files new_entry = {'instrument': instrument, 'aperture': aperture, @@ -1371,7 +1369,7 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ dividers = np.insert(dividers, len(dividers), len(delta_t)) logging.info(f'\t\t\tThreshold delta time used to divide epochs: {DARK_MONITOR_BETWEEN_EPOCH_THRESHOLD_TIME[self.instrument]} days') - logging.info(f'\t\t\tdelta_t between files: {delta_t}') + logging.info(f'\t\t\tdelta_t between files: {delta_t} days.') logging.info(f'\t\t\tFinal dividers (divide data based on time gaps between files): {dividers}') logging.info('\n') @@ -1434,10 +1432,10 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ # Here we are in the final subgroup of the final epoch, where we # do not necessarily know if there will be future data to combine # with these data - logging.info(f'\t\t\tShould be final epoch and final subgroup. epoch number: {i}') + logging.debug(f'\t\t\tShould be final epoch and final subgroup. epoch number: {i}') if np.sum(subgroup_ints) >= threshold: - logging.info('\t\t\tADDED - final subgroup of final epoch') + logging.debug('\t\t\tADDED - final subgroup of final epoch') self.file_batches.append(subgroup_files) self.start_time_batches.append(subgroup_start_times) self.end_time_batches.append(subgroup_end_times) @@ -1464,11 +1462,11 @@ def split_files_into_sub_lists(self, files, start_times, end_times, integration_ break logging.info(f'\n\t\t\tEpoch number: {i}') - logging.info('\t\t\tBatch File, Bath integration') + logging.info('\t\t\tFiles, integrations in file batch:') for bi, bf in zip(batch_ints, batch_files): logging.info(f'\t\t\t{bf}, {bi}') logging.info(f'\n\t\t\tSplit into separate subgroups for processing:') - logging.info('\t\t\tFile batches, integration batches') + logging.info('\t\t\tFiles and number of integrations in each subgroup:') for fb, ib in zip(self.file_batches, self.integration_batches): logging.info(f'\t\t\t{fb}, {ib}') logging.info(f'\t\t\tDONE WITH SUBGROUPS\n\n\n\n') From a1913495c36b0f1fb63651aea08018977eab94af Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Wed, 31 Jan 2024 17:21:50 -0500 Subject: [PATCH 30/38] Turn on database updates --- .../common_monitors/dark_monitor.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 1c80c9ce1..19fd72422 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -1165,9 +1165,9 @@ def run(self): 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - #with engine.begin() as connection: - # connection.execute( - # self.query_table.__table__.insert(), new_entry) + with engine.begin() as connection: + connection.execute( + self.query_table.__table__.insert(), new_entry) logging.info('\tUpdated the query history table') logging.info('NEW ENTRY: ') logging.info(new_entry) @@ -1186,9 +1186,9 @@ def run(self): 'run_monitor': monitor_run, 'entry_date': datetime.datetime.now()} - #with engine.begin() as connection: - # connection.execute( - # self.query_table.__table__.insert(), new_entry) + with engine.begin() as connection: + connection.execute( + self.query_table.__table__.insert(), new_entry) logging.info('\tUpdated the query history table') logging.info('NEW ENTRY: ') logging.info(new_entry) From 65759756022f802cc8b85e672c80599b323f3ffd Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Fri, 2 Feb 2024 12:12:32 -0500 Subject: [PATCH 31/38] Remove restricted time limits and apertures from testing --- .../common_monitors/dark_monitor.py | 75 +++++-------------- 1 file changed, 19 insertions(+), 56 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 19fd72422..926eed8ac 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -951,14 +951,6 @@ def run(self): # Use the current time as the end time for MAST query self.query_end = Time.now().mjd - - #####FOR TESTING###### - self.query_end = 59699. # For nircam full and miriim testing - #####FOR TESTING###### - - - - # Loop over all instruments for instrument in ['miri', 'nircam']: # JWST_INSTRUMENT_NAMES: self.instrument = instrument @@ -974,13 +966,6 @@ def run(self): # Get a list of all possible readout patterns associated with the aperture possible_readpatts = RAPID_READPATTERNS[instrument] - ########FOR TESTING########TO LIMIT THE TEST CASE - if instrument == 'miri': - possible_apertures = ['MIRIM_FULL'] - elif instrument == 'nircam': - possible_apertures = ['NRCA1_FULL'] - #########FOR TESTING####### - for aperture in possible_apertures: logging.info('') logging.info(f'Working on aperture {aperture} in {instrument}') @@ -991,32 +976,12 @@ def run(self): self.skipped_initial_ints = limits['N_skipped_integs'][match][0] self.aperture = aperture - # We need a separate search for each readout pattern - ########FOR TESTING########TO LIMIT THE TEST CASE - if instrument == 'miri': - possible_readpatts = ['FAST'] - elif instrument == 'nircam': - possible_readpatts = ['RAPID'] - #########FOR TESTING####### - for readpatt in possible_readpatts: self.readpatt = readpatt logging.info(f'\tWorking on readout pattern: {self.readpatt}') # Locate the record of the most recent MAST search - #self.query_start = self.most_recent_search() - - - - - logging.info('SETTING SELF.QUERY_START TO 59500 (PRE-LAUNCH) FOR TESTING.') - #self.query_start = 59500. - self.query_start = 59680. # for nircam full and mirim testing - - - - - + self.query_start = self.most_recent_search() logging.info(f'\tQuery times: {self.query_start} {self.query_end}') # Query MAST using the aperture and the time of the @@ -1042,7 +1007,8 @@ def run(self): try: new_filenames.append(filesystem_path(file_entry['filename'])) except FileNotFoundError: - logging.warning(f"\t\tUnable to locate {file_entry['filename']} in filesystem. Not including in processing.") + logging.warning((f"\t\tUnable to locate {file_entry['filename']} in filesystem. " + "Not including in processing.")) # Generate a count of the total number of integrations across the files. This number will # be compared to the threshold value to determine if the monitor is run. @@ -1067,7 +1033,8 @@ def run(self): # keep the file. Also, make sure there is at leasat one integration, after ignoring any user-input # number of integrations. keep_ints = int(nints) - self.skipped_initial_ints - if ((keep_ints > 0) and ((xsize == expected_xsize and ysize == expected_ysize) or expected_xsize is None or expected_ysize is None)): + if ((keep_ints > 0) and ((xsize == expected_xsize and ysize == expected_ysize) or + expected_xsize is None or expected_ysize is None)): temp_filenames.append(new_file) total_integrations += int(nints) integrations.append(int(nints) - self.skipped_initial_ints) @@ -1075,12 +1042,12 @@ def run(self): ending_times.append(hdulist[0].header['EXPEND']) else: bad_size_filenames.append(new_file) - logging.info(f'\t\t{new_file} has unexpected aperture size. Expecting {expected_xsize}x{expected_ysize}. Got {xsize}x{ysize}') - - + logging.info((f'\t\t{new_file} has unexpected aperture size. Expecting ' + f'{expected_xsize}x{expected_ysize}. Got {xsize}x{ysize}')) if len(temp_filenames) != len(new_filenames): - logging.info('\t\tSome files returned by MAST have unexpected aperture sizes. These files will be ignored: ') + logging.info(('\t\tSome files returned by MAST have unexpected aperture sizes. These files ' + 'will be ignored: ')) for badfile in bad_size_filenames: logging.info('\t\t\t{}'.format(badfile)) new_filenames = deepcopy(temp_filenames) @@ -1089,7 +1056,8 @@ def run(self): # monitor's signal-to-noise requirements if len(new_filenames) > 0: logging.info((f'\t\tFilesystem search for new dark integrations for {self.instrument}, {self.aperture}, ' - f'{self.readpatt} has found {total_integrations} integrations spread across {len(new_filenames)} files.')) + f'{self.readpatt} has found {total_integrations} integrations spread ' + f'across {len(new_filenames)} files.')) if total_integrations >= integration_count_threshold: logging.info(f'\tThis meets the threshold of {integration_count_threshold} integrations.') monitor_run = True @@ -1101,25 +1069,19 @@ def run(self): self.aperture.lower())) ensure_dir_exists(self.working_data_dir) - if instrument == 'miri': - filesystem = get_config()['filesystem'] - new_filenames = [os.path.join(filesystem, 'public/jw01546/jw01546001001/jw01546001001_02101_00001_mirimage_dark.fits'), - os.path.join(filesystem, 'public/jw01546/jw01546001001/jw01546001001_02102_00001_mirimage_dark.fits')] - print('manually set new_filenames: ', new_filenames) - - starting_times = starting_times[0:2] - ending_times = ending_times[0:2] - integrations = integrations[0:2] - # Split the list of good files into sub-lists based on the integration # threshold. The monitor will then be run on each sub-list independently, # in order to produce results with roughly the same signal-to-noise. This # also prevents the monitor running on a huge chunk of files in the case # where it hasn't been run in a while and data have piled up in the meantime. - self.split_files_into_sub_lists(new_filenames, starting_times, ending_times, integrations, integration_count_threshold) + self.split_files_into_sub_lists(new_filenames, starting_times, ending_times, + integrations, integration_count_threshold) # Run the monitor once on each list - for new_file_list, batch_start_time, batch_end_time, batch_integrations in zip(self.file_batches, self.start_time_batches, self.end_time_batches, self.integration_batches): + for new_file_list, batch_start_time, batch_end_time, batch_integrations in zip(self.file_batches, + self.start_time_batches, + self.end_time_batches, + self.integration_batches): # Copy files from filesystem dark_files, not_copied = copy_files(new_file_list, self.working_data_dir) @@ -1173,7 +1135,8 @@ def run(self): logging.info(new_entry) else: - logging.info(f'\tThis is below the threshold of {integration_count_threshold} integrations. Monitor not run.') + logging.info((f'\tThis is below the threshold of {integration_count_threshold} ' + 'integrations. Monitor not run.')) monitor_run = False # Update the query history From 256a673ae28d535f568cf55bc488dae95cf80a05 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Fri, 2 Feb 2024 12:15:33 -0500 Subject: [PATCH 32/38] pep8 --- jwql/instrument_monitors/common_monitors/dark_monitor.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index 926eed8ac..dd8aa44b1 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -465,7 +465,6 @@ def exclude_too_few_groups(self, result_list): filtered_results.append(result) return filtered_results - def find_hot_dead_pixels(self, mean_image, comparison_image, hot_threshold=2., dead_threshold=0.1): """Create the ratio of the slope image to a baseline slope image. Pixels in the ratio image with values above @@ -1033,8 +1032,8 @@ def run(self): # keep the file. Also, make sure there is at leasat one integration, after ignoring any user-input # number of integrations. keep_ints = int(nints) - self.skipped_initial_ints - if ((keep_ints > 0) and ((xsize == expected_xsize and ysize == expected_ysize) or - expected_xsize is None or expected_ysize is None)): + if ((keep_ints > 0) and ((xsize == expected_xsize and ysize == expected_ysize) + or expected_xsize is None or expected_ysize is None)): temp_filenames.append(new_file) total_integrations += int(nints) integrations.append(int(nints) - self.skipped_initial_ints) From 380ab3b228d450f823d2306e65b20a4e028ed5f7 Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Fri, 2 Feb 2024 12:16:33 -0500 Subject: [PATCH 33/38] pep8, make up your mind --- jwql/instrument_monitors/common_monitors/dark_monitor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index dd8aa44b1..b7f732c7b 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -1033,7 +1033,7 @@ def run(self): # number of integrations. keep_ints = int(nints) - self.skipped_initial_ints if ((keep_ints > 0) and ((xsize == expected_xsize and ysize == expected_ysize) - or expected_xsize is None or expected_ysize is None)): + or expected_xsize is None or expected_ysize is None)): temp_filenames.append(new_file) total_integrations += int(nints) integrations.append(int(nints) - self.skipped_initial_ints) From 513d902f6ddade3347463e73ec0528786f54cbae Mon Sep 17 00:00:00 2001 From: Bryan Hilbert Date: Fri, 2 Feb 2024 12:18:05 -0500 Subject: [PATCH 34/38] pep8, you can do it --- jwql/instrument_monitors/common_monitors/dark_monitor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jwql/instrument_monitors/common_monitors/dark_monitor.py b/jwql/instrument_monitors/common_monitors/dark_monitor.py index b7f732c7b..9a1968921 100755 --- a/jwql/instrument_monitors/common_monitors/dark_monitor.py +++ b/jwql/instrument_monitors/common_monitors/dark_monitor.py @@ -1033,7 +1033,7 @@ def run(self): # number of integrations. keep_ints = int(nints) - self.skipped_initial_ints if ((keep_ints > 0) and ((xsize == expected_xsize and ysize == expected_ysize) - or expected_xsize is None or expected_ysize is None)): + or expected_xsize is None or expected_ysize is None)): temp_filenames.append(new_file) total_integrations += int(nints) integrations.append(int(nints) - self.skipped_initial_ints) From 59d7815c2a51bbdd0bed9ae978df66eb9dba5673 Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Tue, 20 Feb 2024 15:42:58 -0500 Subject: [PATCH 35/38] create pull_jwql_branch.sh --- jwql/pull_jwql_branch.sh | 83 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 jwql/pull_jwql_branch.sh diff --git a/jwql/pull_jwql_branch.sh b/jwql/pull_jwql_branch.sh new file mode 100644 index 000000000..73c7fb8b4 --- /dev/null +++ b/jwql/pull_jwql_branch.sh @@ -0,0 +1,83 @@ +#!/bin/bash + +function echo_format { + echo "WARNING! the optional parameters should only be used during a JWQL release in production" + echo "Usage: $0 [-r|--reset_service] [-n|--notify ]" +} + +# Check if the required number of arguments are provided +if [ "$#" -lt 1 ]; then + echo_format + exit 1 +fi + +# Set default values for optional flags +reset=false +notify=false +recipient="" + +# Retrieve the branch_name from the command line argument +branch_name=$1 +# Parse optional flags +while [[ $# -gt 1 ]]; do + case "$2" in + -r|--reset_service) + reset=true + ;; + -n|--notify) + notify=true + recipient="$3" + shift + ;; + *) + echo "Error: Invalid option $2" + echo_format + exit 1 + ;; + esac + shift +done + +if [ "$notify" = true ] && [ -z "$recipient" ]; then + echo_format + exit 1 +fi + +echo "Branch: $branch_name"; +echo "Reset: $reset"; +echo "Notify: $notify $recipient"; + +# 1. Pull updated code from GitHub deployment branch (keep second checkout in case its already defined for some weird reason) +git checkout -b $branch_name --track origin/$branch_name +git checkout $branch_name +git fetch origin $branch_name +git pull origin $branch_name +git fetch origin --tags + +# 2. Bring the server down and back up +if [ "$reset" = true ]; then + sudo /bin/systemctl stop jwql.service +fi + +# 3. Install jwql +pip install -e .. + +# 4. Merge Any Migrations +python ./website/manage.py migrate + +# 5. Bring the server back up +if [ "$reset" = true ]; then + sudo /bin/systemctl start jwql.service +fi + +# 6. Initialize any new databases that have been added +python ./database/database_interface.py + +# 7. Send out notification email +if [ "$notify" = true ] && [ -n "$recipient" ]; then + subject="JWQL $branch_name Released" + message_content="Hello, A new version of JWQL ($branch_name) has just been deployed to jwql.stsci.edu. Visit https://github.com/spacetelescope/jwql/releases for more information." + echo "$message_content" | mail -s "$subject" "$recipient" + echo "Notification Email Sent" + echo "Deployment Complete!" +fi \ No newline at end of file From eaf2f8baf665fd70604c5d93bf06993e50da22b8 Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Tue, 20 Feb 2024 15:56:29 -0500 Subject: [PATCH 36/38] comment fix --- jwql/pull_jwql_branch.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jwql/pull_jwql_branch.sh b/jwql/pull_jwql_branch.sh index 73c7fb8b4..7137c0d98 100644 --- a/jwql/pull_jwql_branch.sh +++ b/jwql/pull_jwql_branch.sh @@ -54,7 +54,7 @@ git fetch origin $branch_name git pull origin $branch_name git fetch origin --tags -# 2. Bring the server down and back up +# 2. Bring the server down if [ "$reset" = true ]; then sudo /bin/systemctl stop jwql.service fi From 9acb9f18fc03c45ccd64002c84191f6a4e353566 Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Tue, 20 Feb 2024 16:14:49 -0500 Subject: [PATCH 37/38] remove server --- jwql/pull_jwql_branch.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/jwql/pull_jwql_branch.sh b/jwql/pull_jwql_branch.sh index 7137c0d98..2afbc1a55 100644 --- a/jwql/pull_jwql_branch.sh +++ b/jwql/pull_jwql_branch.sh @@ -76,8 +76,7 @@ python ./database/database_interface.py # 7. Send out notification email if [ "$notify" = true ] && [ -n "$recipient" ]; then subject="JWQL $branch_name Released" - message_content="Hello, A new version of JWQL ($branch_name) has just been deployed to jwql.stsci.edu. Visit https://github.com/spacetelescope/jwql/releases for more information." + message_content="Hello, A new version of JWQL ($branch_name) has just been released. Visit https://github.com/spacetelescope/jwql/releases for more information." echo "$message_content" | mail -s "$subject" "$recipient" echo "Notification Email Sent" - echo "Deployment Complete!" fi \ No newline at end of file From 51be92ca3ac74a210bc004e3a3c7516af06eed89 Mon Sep 17 00:00:00 2001 From: Bradley Sappington Date: Wed, 21 Feb 2024 07:52:42 -0500 Subject: [PATCH 38/38] update echo_format --- jwql/pull_jwql_branch.sh | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/jwql/pull_jwql_branch.sh b/jwql/pull_jwql_branch.sh index 2afbc1a55..95a1c94b4 100644 --- a/jwql/pull_jwql_branch.sh +++ b/jwql/pull_jwql_branch.sh @@ -1,8 +1,22 @@ #!/bin/bash function echo_format { - echo "WARNING! the optional parameters should only be used during a JWQL release in production" + echo "" echo "Usage: $0 [-r|--reset_service] [-n|--notify ]" + echo "" + echo "WARNING! the optional parameters should only be used during a JWQL release in production" + echo "branch: the git branch to pull from" + echo "[-r|--reset_service]: Reset the jwql service" + echo "[-n|--notify ]: Notify via provided email" + echo "" + echo "Local:" + echo "$ bash pull_jwql_branch.sh develop" + echo "" + echo "Test:" + echo "$ bash pull_jwql_branch.sh v1.2 -r" + echo "" + echo "Production:" + echo "$ bash pull_jwql_branch.sh v1.2 -r -n group_email_address@stsci.edu" } # Check if the required number of arguments are provided @@ -54,7 +68,7 @@ git fetch origin $branch_name git pull origin $branch_name git fetch origin --tags -# 2. Bring the server down +# 2. Bring the service down if [ "$reset" = true ]; then sudo /bin/systemctl stop jwql.service fi @@ -65,7 +79,7 @@ pip install -e .. # 4. Merge Any Migrations python ./website/manage.py migrate -# 5. Bring the server back up +# 5. Bring the service back up if [ "$reset" = true ]; then sudo /bin/systemctl start jwql.service fi