diff --git a/drizzlepac/hapsequencer.py b/drizzlepac/hapsequencer.py index 0a65abf09..c340175af 100644 --- a/drizzlepac/hapsequencer.py +++ b/drizzlepac/hapsequencer.py @@ -138,18 +138,19 @@ def create_catalog_products(total_obj_list, log_level, diagnostic_mode=False, ph # Make sure this is re-initialized for the new total product phot_mode = input_phot_mode + # Generate an "n" exposure mask which has the image footprint set to the number + # of exposures which constitute each pixel. + total_product_obj.generate_footprint_mask() + # Instantiate filter catalog product object total_product_catalogs = HAPCatalogs(total_product_obj.drizzle_filename, total_product_obj.configobj_pars.get_pars('catalog generation'), total_product_obj.configobj_pars.get_pars('quality control'), + total_product_obj.mask, log_level, types=input_phot_mode, diagnostic_mode=diagnostic_mode) - # Generate an "n" exposure mask which has the image footprint set to the number - # of exposures which constitute each pixel. - total_product_obj.generate_footprint_mask() - # Identify sources in the input image and delay writing the total detection # catalog until the photometric measurements have been done on the filter # images and some of the measurements can be appended to the total catalog @@ -220,6 +221,7 @@ def create_catalog_products(total_obj_list, log_level, diagnostic_mode=False, ph filter_product_catalogs = HAPCatalogs(filter_product_obj.drizzle_filename, total_product_obj.configobj_pars.get_pars('catalog generation'), total_product_obj.configobj_pars.get_pars('quality control'), + total_product_obj.mask, log_level, types=phot_mode, diagnostic_mode=diagnostic_mode, diff --git a/drizzlepac/haputils/catalog_utils.py b/drizzlepac/haputils/catalog_utils.py index bb7d92a30..5215a7b0f 100644 --- a/drizzlepac/haputils/catalog_utils.py +++ b/drizzlepac/haputils/catalog_utils.py @@ -12,7 +12,7 @@ from astropy.convolution import RickerWavelet2DKernel from astropy.coordinates import SkyCoord import numpy as np -from scipy import ndimage +from scipy import ndimage, stats from photutils import CircularAperture, CircularAnnulus, DAOStarFinder, IRAFStarFinder from photutils import Background2D, SExtractorBackground, StdBackgroundRMS @@ -41,7 +41,7 @@ class CatalogImage: - def __init__(self, filename, log_level): + def __init__(self, filename, num_images_mask, log_level): # set logging level to user-specified level log.setLevel(log_level) @@ -52,6 +52,10 @@ def __init__(self, filename, log_level): self.imghdu = filename self.imgname = filename.filename() + # This is the "footprint_mask" of the total product object which indicates + # the number of images which comprise each individual pixel + self.num_images_mask = num_images_mask + # Get header information to annotate the output catalogs if "total" in self.imgname: self.ghd_product = "tdp" @@ -71,6 +75,8 @@ def __init__(self, filename, log_level): self.bkg_background_ra = None self.bkg_rms_ra = None self.bkg_rms_median = None + self.footprint_mask = None + self.inv_footprint_mask = None # Populated by self.build_kernel() self.kernel = None @@ -89,6 +95,7 @@ def close(self): def build_kernel(self, box_size, win_size, fwhmpsf, simple_bkg=False, bkg_skew_threshold=0.5, + zero_percent=25.0, negative_percent=15.0, negative_threshold=-0.5, nsigma_clip=3.0, @@ -99,6 +106,7 @@ def build_kernel(self, box_size, win_size, fwhmpsf, self.compute_background(box_size, win_size, simple_bkg=simple_bkg, bkg_skew_threshold=bkg_skew_threshold, + zero_percent=zero_percent, negative_percent=negative_percent, negative_threshold=negative_threshold, nsigma_clip=nsigma_clip, @@ -120,6 +128,7 @@ def compute_background(self, box_size, win_size, bkg_estimator=SExtractorBackground, rms_estimator=StdBackgroundRMS, simple_bkg=False, bkg_skew_threshold=0.5, + zero_percent=25.0, negative_percent=15.0, negative_threshold=0.0, nsigma_clip=3.0, @@ -151,6 +160,12 @@ def compute_background(self, box_size, win_size, will be computed for potential use for the background determination, otherwise the sigma_clipped_stats algorithm is used. + zero_percent : float, optional + Discriminator on the input image. The percentage of zero values in the illuminated portion + of the input image is determined - if there are more zero values than this lower limit, then + the background is set to an image of constant value zero and the background rms is computed + based on the pixels which are non-zero in the illuminated portion of the input image. + negative_percent : float, optional Discriminator on the background-subtracted image. The percentage of negative values in the background-subtracted image is determined - below this limit the Background2D algorithm stays in play, @@ -180,11 +195,19 @@ def compute_background(self, box_size, win_size, self.bkg_rms_median : float background rms value over entire 2D array + self.footprint_mask : bool 2Dndarry + Footprint of input image set to True for the illuminated portion and False for + the non-illuminated portion + + self.inv_footprint_mask : bool 2Dndarry + Inverse of the footprint_mask + """ # Report configuration values to log log.info("") log.info("Background Computation") log.info("File: {}".format(self.imgname)) + log.info("Zero threshold: {}".format(zero_percent)) log.info("Sigma-clipped Background Configuration Variables") log.info(" Negative threshold: {}".format(negative_threshold)) log.info(" Negative percent: {}".format(negative_percent)) @@ -197,69 +220,98 @@ def compute_background(self, box_size, win_size, # SExtractorBackground ans StdBackgroundRMS are the defaults bkg = None + is_zero_background_defined = False # Make a local copy of the data(image) being processed in order to reset any # data values which equal nan (e.g., subarrays) to zero. imgdata = self.data.copy() - imgdata[np.isnan(imgdata)] = 0 - - # Create mask to reject any sources located less than 10 pixels from a image/chip edge - # to be used for the sigma-clipping - binary_inverted_wht = np.where(imgdata == 0, 1, 0) - self.exclude_zones_mask = ndimage.binary_dilation(binary_inverted_wht, iterations=10) - # Compute a sigma-clipped background which returns only single values for mean, + # In order to compute the proper statistics on the input data, need to use the footprint + # mask to get the actual data - illuminated portion (True), non-illuminated (False). + self.footprint_mask = self.num_images_mask > 0 + self.inv_footprint_mask = np.invert(self.footprint_mask) + + # If the image contains a lot of values identically equal to zero (as in some SBC images), + # set the two-dimensional background image to a constant of zero and the background rms to + # the real rms of the non-zero values in the image. + num_of_illuminated_pixels = self.footprint_mask.sum() + num_of_zeros = np.count_nonzero(imgdata[self.footprint_mask] == 0) + non_zero_pixels = imgdata[self.footprint_mask] + + # BACKGROUND COMPUTATION 1 (unusual case) + # If there are too many background zeros in the image (> number_of_zeros_in_background_threshold), set the + # background median and background rms values + if num_of_zeros / float(num_of_illuminated_pixels) * 100.0 > zero_percent: + self.bkg_median = 0.0 + self.bkg_rms_median = stats.tstd(non_zero_pixels, limits=[0, None], inclusive=[False, True]) + self.bkg_background_ra = np.full_like(imgdata, 0.0) + self.bkg_rms_ra = np.full_like(imgdata, self.bkg_rms_median) + + is_zero_background_defined = True + log.info("Input image contains excessive zero values in the background. Median: {} RMS: {}".format(self.bkg_median, self.bkg_rms_median)) + + # BACKGROUND COMPUTATION 2 (sigma_clipped_stats) + # If the input data is not the unusual case of an "excessive zero background", compute + # a sigma-clipped background which returns only single values for mean, # median, and standard deviations - log.info("") - log.info("Computing the background using sigma-clipped statistics algorithm.") - bkg_mean, bkg_median, bkg_rms = sigma_clipped_stats(imgdata, - self.exclude_zones_mask, - sigma=nsigma_clip, - cenfunc='median', - maxiters=maxiters) - - log.info("Sigma-clipped Statistics - Background mean: {} median: {} rms: {}".format(bkg_mean, bkg_median, bkg_rms)) - log.info("") + if not is_zero_background_defined: + log.info("") + log.info("Computing the background using sigma-clipped statistics algorithm.") + bkg_mean, bkg_median, bkg_rms = sigma_clipped_stats(imgdata, + self.inv_footprint_mask, + sigma=nsigma_clip, + cenfunc='median', + maxiters=maxiters) + + log.info("Sigma-clipped Statistics - Background mean: {} median: {} rms: {}".format(bkg_mean, bkg_median, bkg_rms)) + log.info("") - # Compute Pearson’s second coefficient of skewness - this is a criterion - # for possibly computing a two-dimensional background fit - bkg_skew = 3.0 * (bkg_mean - bkg_median) / bkg_rms - log.info("Sigma-clipped computed skewness: {0:.2f}".format(bkg_skew)) - - # Compute a minimum rms value based upon information directly from the data - if self.keyword_dict["detector"] != "SBC": - minimum_rms = self.keyword_dict['atodgn'] * self.keyword_dict['readnse'] \ - * self.keyword_dict['ndrizim'] / self.keyword_dict['texpo_time'] - - # Compare a minimum rms based upon input characteristics versus the one computed and use - # the larger of the two values. - if (bkg_rms < minimum_rms): - bkg_rms = minimum_rms - log.info("Mimimum RMS of input based upon the readnoise, gain, number of exposures, and total exposure time: {}".format(minimum_rms)) - log.info("Sigma-clipped RMS has been updated - Background mean: {} median: {} rms: {}".format(bkg_mean, bkg_median, bkg_rms)) + # Compute Pearson’s second coefficient of skewness - this is a criterion + # for possibly computing a two-dimensional background fit + # Use the "raw" values generated by sigma_clipped_stats() + bkg_skew = 3.0 * (bkg_mean - bkg_median) / bkg_rms + log.info("Sigma-clipped computed skewness: {0:.2f}".format(bkg_skew)) + + # Ensure the computed values are not negative + if bkg_mean < 0.0 or bkg_median < 0.0 or bkg_rms < 0.0: + bkg_mean = max(0, bkg_mean) + bkg_median = max(0, bkg_median) + bkg_rms = max(0, bkg_rms) + log.info("UPDATED Sigma-clipped Statistics - Background mean: {} median: {} rms: {}".format(bkg_mean, bkg_median, bkg_rms)) log.info("") - # Generate two-dimensional background and rms images with the attributes of - # the input data, but the content based on the sigma-clipped statistics. - # bkg_median ==> background and bkg_rms ==> background rms - self.bkg_background_ra = np.full_like(imgdata, bkg_median) - self.bkg_rms_ra = np.full_like(imgdata, bkg_rms) - self.bkg_median = bkg_median - self.bkg_rms_median = bkg_rms - + # Compute a minimum rms value based upon information directly from the data + if self.keyword_dict["detector"].upper() != "SBC": + minimum_rms = self.keyword_dict['atodgn'] * self.keyword_dict['readnse'] \ + * self.keyword_dict['ndrizim'] / self.keyword_dict['texpo_time'] + + # Compare a minimum rms based upon input characteristics versus the one computed and use + # the larger of the two values. + if (bkg_rms < minimum_rms): + bkg_rms = minimum_rms + log.info("Mimimum RMS of input based upon the readnoise, gain, number of exposures, and total exposure time: {}".format(minimum_rms)) + log.info("Sigma-clipped RMS has been updated - Background mean: {} median: {} rms: {}".format(bkg_mean, bkg_median, bkg_rms)) + log.info("") + + # Generate two-dimensional background and rms images with the attributes of + # the input data, but the content based on the sigma-clipped statistics. + # bkg_median ==> background and bkg_rms ==> background rms + self.bkg_background_ra = np.full_like(imgdata, bkg_median) + self.bkg_rms_ra = np.full_like(imgdata, bkg_rms) + self.bkg_median = bkg_median + self.bkg_rms_median = bkg_rms + + # BACKGROUND COMPUTATION 3 (Background2D) # The simple_bkg = True is the way to force the background to be computed with the # sigma-clipped algorithm, regardless of any other criterion. If simple_bkg == True, - # this routine is done, otherwise try to use Background2D to compute the background. - if not simple_bkg: + # the compute_background() is done, otherwise try to use Background2D to compute the background. + if not simple_bkg and not is_zero_background_defined: # If the sigma-clipped background image skew is less than the threshold, # compute a two-dimensional background fit. if bkg_skew < bkg_skew_threshold: log.info("Computing the background using the Background2D algorithm.") - # Create the mask to ignore pixels with the value of 0 - mask = (imgdata == 0) - exclude_percentiles = [10, 25, 50, 75] for percentile in exclude_percentiles: log.info("Percentile in use: {}".format(percentile)) @@ -268,10 +320,11 @@ def compute_background(self, box_size, win_size, bkg_estimator=bkg_estimator(), bkgrms_estimator=rms_estimator(), exclude_percentile=percentile, edge_method="pad", - mask=mask) + mask=self.inv_footprint_mask) - # Apply the coverage mask to the returned background image - bkg.background *= ~mask + # Apply the coverage mask to the returned background image to clear out + # any information in the non-illuminated portion of the image + bkg.background *= self.footprint_mask except Exception: bkg = None @@ -284,26 +337,20 @@ def compute_background(self, box_size, win_size, bkg_median = bkg.background_median break - del mask - # If computation of a two-dimensional background image were successful, compute the # background-subtracted image and evaluate it for the number of negative values. # # If bkg is None, use the sigma-clipped statistics for the background. - # If bkd is not None, but the background-subtracted image is too negative, use the + # If bkg is not None, but the background-subtracted image is too negative, use the # sigma-clipped computation for the background. if bkg is not None: imgdata_bkgsub = imgdata - bkg_background_ra # Determine how much of the illuminated portion of the background subtracted # image is negative - illum_mask = self.exclude_zones_mask < 1 - total_illum_mask = illum_mask.sum() - illum_data = imgdata_bkgsub * illum_mask - negative_mask = illum_data < negative_threshold - total_negative_mask = negative_mask.sum() - negative_ratio = total_negative_mask / total_illum_mask - del illum_data, illum_mask, negative_mask, imgdata_bkgsub + num_negative = np.count_nonzero(imgdata_bkgsub[self.footprint_mask] < 0) + negative_ratio = num_negative / num_of_illuminated_pixels + del imgdata_bkgsub # Report this information so the relative percentage and the threshold are known log.info("Percentage of negative values in the background subtracted image {0:.2f} vs low threshold of {1:.2f}.".format(100.0 * negative_ratio, negative_percent)) @@ -322,10 +369,11 @@ def compute_background(self, box_size, win_size, self.bkg_rms_ra = bkg_rms_ra.copy() self.bkg_rms_median = bkg_rms_median self.bkg_median = bkg_median - del bkg_background_ra, bkg_rms_ra, bkg_rms_median, bkg_median log.info("") log.info("*** Use the background image determined from the Background2D. ***") + del bkg_background_ra, bkg_rms_ra + # Skewness of sigma_clipped background exceeds threshold else: log.info("*** Use the background image determined from the sigma_clip algorithm based upon skewness. ***") @@ -368,7 +416,7 @@ def _get_header_data(self): keyword_dict["texpo_time"] = self.imghdu[0].header["TEXPTIME"] keyword_dict["exptime"] = self.imghdu[0].header["EXPTIME"] keyword_dict["ndrizim"] = self.imghdu[0].header["NDRIZIM"] - if keyword_dict["detector"] != "SBC": + if keyword_dict["detector"].upper() != "SBC": keyword_dict["ccd_gain"] = self.imghdu[0].header["CCDGAIN"] keyword_dict["readnse"] = self._get_max_key_value(self.imghdu[0].header, 'READNSE') keyword_dict["atodgn"] = self._get_max_key_value(self.imghdu[0].header, 'ATODGN') @@ -430,7 +478,7 @@ class HAPCatalogs: """ crfactor = {'aperture': 300, 'segment': 150} # CRs / hr / 4kx4k pixels - def __init__(self, fitsfile, param_dict, param_dict_qc, log_level, diagnostic_mode=False, types=None, + def __init__(self, fitsfile, param_dict, param_dict_qc, num_images_mask, log_level, diagnostic_mode=False, types=None, tp_sources=None): # set logging level to user-specified level log.setLevel(log_level) @@ -458,11 +506,12 @@ def __init__(self, fitsfile, param_dict, param_dict_qc, log_level, diagnostic_mo # Get various configuration variables needed for the background computation # Compute the background for this image - self.image = CatalogImage(fitsfile, log_level) + self.image = CatalogImage(fitsfile, num_images_mask, log_level) self.image.compute_background(self.param_dict['bkg_box_size'], self.param_dict['bkg_filter_size'], simple_bkg=self.param_dict['simple_bkg'], bkg_skew_threshold=self.param_dict['bkg_skew_threshold'], + zero_percent=self.param_dict['zero_percent'], negative_percent=self.param_dict['negative_percent'], negative_threshold=self.param_dict['negative_threshold'], nsigma_clip=self.param_dict['nsigma_clip'], @@ -472,6 +521,7 @@ def __init__(self, fitsfile, param_dict, param_dict_qc, log_level, diagnostic_mo self.param_dict['dao']['TWEAK_FWHMPSF'], self.param_dict['simple_bkg'], self.param_dict['bkg_skew_threshold'], + self.param_dict['zero_percent'], self.param_dict['negative_percent'], self.param_dict['negative_threshold'], self.param_dict['nsigma_clip'], @@ -516,18 +566,18 @@ def verify_crthresh(self, n1_exposure_time): thresh = crfactor * n1_exposure_time**2 / texptime """ for cat_type in self.catalogs: - crthresh_mask = None - source_cat = self.catalogs[cat_type].sources if cat_type == 'aperture' else self.catalogs[cat_type].source_cat - - flag_cols = [colname for colname in source_cat.colnames if colname.startswith('Flag')] - for colname in flag_cols: - catalog_crmask = source_cat[colname] < 2 - if crthresh_mask is None: - crthresh_mask = catalog_crmask - else: - # Combine masks for all filters for this catalog type - crthresh_mask = np.bitwise_or(crthresh_mask, catalog_crmask) - source_cat.sources_num_good = len(np.where(crthresh_mask)[0]) + crthresh_mask = None + source_cat = self.catalogs[cat_type].sources if cat_type == 'aperture' else self.catalogs[cat_type].source_cat + + flag_cols = [colname for colname in source_cat.colnames if colname.startswith('Flag')] + for colname in flag_cols: + catalog_crmask = source_cat[colname] < 2 + if crthresh_mask is None: + crthresh_mask = catalog_crmask + else: + # Combine masks for all filters for this catalog type + crthresh_mask = np.bitwise_or(crthresh_mask, catalog_crmask) + source_cat.sources_num_good = len(np.where(crthresh_mask)[0]) reject_catalogs = False @@ -549,7 +599,6 @@ def verify_crthresh(self, n1_exposure_time): return reject_catalogs - def measure(self, filter_name, **pars): """Perform photometry and other measurements on sources for this image. @@ -622,7 +671,7 @@ def __init__(self, image, param_dict, param_dict_qc, diagnostic_mode, tp_sources self.gain = self.image.keyword_dict['exptime'] * np.mean(gain_values) # Set the gain for ACS/SBC and WFC3/IR to 1.0 - if self.image.keyword_dict["detector"] in ["IR", "SBC"]: + if self.image.keyword_dict["detector"].upper() in ["IR", "SBC"]: self.gain = 1.0 # Convert photometric aperture radii from arcsec to pixels @@ -656,16 +705,11 @@ def __init__(self, image, param_dict, param_dict_qc, diagnostic_mode, tp_sources self.source_cat = None # catalog of sources and their properties self.tp_sources = tp_sources - # create mask to reject any sources located less than 10 pixels from a image/chip edge - wht_image = np.nan_to_num(self.image.data, 0.0) - binary_inverted_wht = np.where(wht_image == 0, 1, 0) - self.exclusion_mask = ndimage.binary_dilation(binary_inverted_wht, iterations=10) - # Determine what regions we have for source identification # Regions are defined as sections of the image which has the same # max WHT within a factor of 2.0 (or so). # make_wht_masks(whtarr, maskarr, scale=1.5, sensitivity=0.95, kernel=(11,11)) - self.tp_masks = make_wht_masks(self.image.wht_image, self.exclusion_mask, + self.tp_masks = make_wht_masks(self.image.wht_image, self.image.inv_footprint_mask, scale=self.param_dict['scale'], sensitivity=self.param_dict['sensitivity'], kernel=(self.param_dict['region_size'], @@ -731,7 +775,7 @@ def annotate_table(self, data_table, param_dict_qc, product="tdp"): data_table.meta["Aperture PA"] = self.image.keyword_dict["aperture_pa"] data_table.meta["Exposure Start"] = self.image.keyword_dict["expo_start"] data_table.meta["Total Exposure Time"] = self.image.keyword_dict["texpo_time"] - if self.image.keyword_dict["detector"] != "SBC": + if self.image.keyword_dict["detector"].upper() != "SBC": data_table.meta["CCD Gain"] = self.image.keyword_dict["ccd_gain"] if product.lower() == "tdp" or self.image.keyword_dict["instrument"].upper() == "WFC3": data_table.meta["Filter 1"] = self.image.keyword_dict["filter1"] @@ -796,13 +840,9 @@ def __init__(self, image, param_dict, param_dict_qc, diagnostic_mode, tp_sources def identify_sources(self, **pars): """Create a master coordinate list of sources identified in the specified total detection product image """ - if pars and 'mask' in pars: - drc = 'drc.fits' if 'drc.fits' in self.imgname else 'drz.fits' - fits.PrimaryHDU(data=pars['mask'].astype(np.uint16)).writeto(self.imgname.replace(drc, 'footprint_mask.fits')) - source_fwhm = self.image.kernel_fwhm # read in sci, wht extensions of drizzled product - image = self.image.data.copy() + image = np.nan_to_num(self.image.data, 0.0) # Create the background-subtracted image image -= self.image.bkg_background_ra @@ -828,9 +868,6 @@ def identify_sources(self, **pars): log.info("") log.info("{}".format("=" * 80)) - drc = 'drc.fits' if 'drc.fits' in self.image.imgname else 'drz.fits' - fits.PrimaryHDU(data=self.exclusion_mask.astype(np.int16)).writeto(self.image.imgname.replace(drc, 'exclusion_mask.fits')) - sources = None for mask in self.tp_masks: # apply mask for each separate range of WHT values @@ -847,12 +884,12 @@ def identify_sources(self, **pars): reg_rms_median)) daofind = DAOStarFinder(fwhm=source_fwhm, threshold=self.param_dict['nsigma'] * reg_rms_median) - reg_sources = daofind(region, mask=self.exclusion_mask) + reg_sources = daofind(region, mask=self.image.inv_footprint_mask) elif self.param_dict["starfinder_algorithm"] == "iraf": log.info("IRAFStarFinder(fwhm={}, threshold={}*{})".format(source_fwhm, self.param_dict['nsigma'], reg_rms_median)) isf = IRAFStarFinder(fwhm=source_fwhm, threshold=self.param_dict['nsigma'] * reg_rms_median) - reg_sources = isf(region, mask=self.exclusion_mask) + reg_sources = isf(region, mask=self.image.inv_footprint_mask) else: err_msg = "'{}' is not a valid 'starfinder_algorithm' parameter input in the catalog_generation parameters json file. Valid options are 'dao' for photutils.detection.DAOStarFinder() or 'iraf' for photutils.detection.IRAFStarFinder().".format(self.param_dict["starfinder_algorithm"]) log.error(err_msg) @@ -1205,7 +1242,7 @@ def identify_sources(self, **pars): if self.diagnostic_mode: # Exclusion mask outname = self.imgname.replace(".fits", "_mask.fits") - fits.PrimaryHDU(data=self.exclusion_mask.astype(np.uint16)).writeto(outname) + fits.PrimaryHDU(data=self.image.inv_footprint_mask.astype(np.uint16)).writeto(outname) # Background image outname = self.imgname.replace(".fits", "_bkg.fits") @@ -1231,7 +1268,7 @@ def identify_sources(self, **pars): ncount, filter_kernel=self.image.kernel, source_box=self._size_source_box, - mask=self.exclusion_mask) + mask=self.image.inv_footprint_mask) # Check if custom_segm_image is None indicating there are no detectable sources in the # total detection image. If value is None, a warning has already been issued. Issue @@ -1278,7 +1315,7 @@ def identify_sources(self, **pars): ncount, filter_kernel=rw2d_kernel, source_box=self._size_source_box, - mask=self.exclusion_mask) + mask=self.image.inv_footprint_mask) # Check if rw2d_segm_image is None indicating there are no detectable sources in the # total detection image. If value is None, a warning has already been issued. Issue @@ -1397,13 +1434,16 @@ def compute_threshold(self, nsigma, bkg_rms): log.info("Computing the threshold value used for source detection.") - threshold = np.zeros_like(self.tp_masks[0]['rel_weight']) - log.info("Using WHT masks as a scale on the RMS to compute threshold detection limit.") - for wht_mask in self.tp_masks: - threshold_item = nsigma * bkg_rms * np.sqrt(wht_mask['mask'] / wht_mask['rel_weight'].max()) - threshold_item[np.isnan(threshold_item)] = 0.0 - threshold += threshold_item - del(threshold_item) + if not self.tp_masks: + threshold = nsigma * bkg_rms + else: + threshold = np.zeros_like(self.tp_masks[0]['rel_weight']) + log.info("Using WHT masks as a scale on the RMS to compute threshold detection limit.") + for wht_mask in self.tp_masks: + threshold_item = nsigma * bkg_rms * np.sqrt(wht_mask['mask'] / wht_mask['rel_weight'].max()) + threshold_item[np.isnan(threshold_item)] = 0.0 + threshold += threshold_item + del(threshold_item) return threshold diff --git a/drizzlepac/pars/hap_pars/default_parameters/acs/hrc/acs_hrc_catalog_generation_all.json b/drizzlepac/pars/hap_pars/default_parameters/acs/hrc/acs_hrc_catalog_generation_all.json index c9e5c85a6..ca3d969e0 100644 --- a/drizzlepac/pars/hap_pars/default_parameters/acs/hrc/acs_hrc_catalog_generation_all.json +++ b/drizzlepac/pars/hap_pars/default_parameters/acs/hrc/acs_hrc_catalog_generation_all.json @@ -15,10 +15,11 @@ "region_size": 11, "flag_trim_value": 5, "simple_bkg": false, + "zero_percent": 25.0, "negative_threshold": 0.0, "negative_percent": 15.0, "nsigma_clip": 3.0, - "maxiters":3, + "maxiters": 3, "bkg_skew_threshold": 0.5, "dao": { "bkgsig_sf": 4.0, diff --git a/drizzlepac/pars/hap_pars/default_parameters/acs/sbc/acs_sbc_catalog_generation_all.json b/drizzlepac/pars/hap_pars/default_parameters/acs/sbc/acs_sbc_catalog_generation_all.json index 286e6b7d4..8287b252e 100644 --- a/drizzlepac/pars/hap_pars/default_parameters/acs/sbc/acs_sbc_catalog_generation_all.json +++ b/drizzlepac/pars/hap_pars/default_parameters/acs/sbc/acs_sbc_catalog_generation_all.json @@ -15,10 +15,11 @@ "region_size": 11, "flag_trim_value": 5, "simple_bkg": false, + "zero_percent": 25.0, "negative_threshold": 0.0, "negative_percent": 15.0, "nsigma_clip": 3.0, - "maxiters":3, + "maxiters": 3, "bkg_skew_threshold": 0.5, "dao": { "bkgsig_sf": 4.0, diff --git a/drizzlepac/pars/hap_pars/default_parameters/acs/wfc/acs_wfc_catalog_generation_all.json b/drizzlepac/pars/hap_pars/default_parameters/acs/wfc/acs_wfc_catalog_generation_all.json index f913dad3d..6af5530e0 100644 --- a/drizzlepac/pars/hap_pars/default_parameters/acs/wfc/acs_wfc_catalog_generation_all.json +++ b/drizzlepac/pars/hap_pars/default_parameters/acs/wfc/acs_wfc_catalog_generation_all.json @@ -15,10 +15,11 @@ "region_size": 11, "flag_trim_value": 5, "simple_bkg": false, + "zero_percent": 25.0, "negative_threshold": 0.0, "negative_percent": 15.0, "nsigma_clip": 3.0, - "maxiters":3, + "maxiters": 3, "bkg_skew_threshold": 0.5, "dao": { "bkgsig_sf": 4.0, diff --git a/drizzlepac/pars/hap_pars/default_parameters/wfc3/ir/wfc3_ir_catalog_generation_all.json b/drizzlepac/pars/hap_pars/default_parameters/wfc3/ir/wfc3_ir_catalog_generation_all.json index 1aa74fd8f..477bcb3bb 100644 --- a/drizzlepac/pars/hap_pars/default_parameters/wfc3/ir/wfc3_ir_catalog_generation_all.json +++ b/drizzlepac/pars/hap_pars/default_parameters/wfc3/ir/wfc3_ir_catalog_generation_all.json @@ -15,10 +15,11 @@ "region_size": 11, "flag_trim_value": 65368, "simple_bkg": false, + "zero_percent": 25.0, "negative_threshold": 0.0, "negative_percent": 15.0, "nsigma_clip": 3.0, - "maxiters":3, + "maxiters": 3, "bkg_skew_threshold": 0.5, "dao": { "bkgsig_sf": 4.0, diff --git a/drizzlepac/pars/hap_pars/default_parameters/wfc3/uvis/wfc3_uvis_catalog_generation_all.json b/drizzlepac/pars/hap_pars/default_parameters/wfc3/uvis/wfc3_uvis_catalog_generation_all.json index d7b97f07c..3ee379581 100644 --- a/drizzlepac/pars/hap_pars/default_parameters/wfc3/uvis/wfc3_uvis_catalog_generation_all.json +++ b/drizzlepac/pars/hap_pars/default_parameters/wfc3/uvis/wfc3_uvis_catalog_generation_all.json @@ -15,10 +15,11 @@ "region_size": 11, "flag_trim_value": 5, "simple_bkg": false, + "zero_percent": 25.0, "negative_threshold": 0.0, "negative_percent": 15.0, "nsigma_clip": 3.0, - "maxiters":3, + "maxiters": 3, "bkg_skew_threshold": 0.5, "dao": { "bkgsig_sf": 4.0, diff --git a/drizzlepac/pars/hap_pars/user_parameters/acs/hrc/acs_hrc_catalog_generation_all.json b/drizzlepac/pars/hap_pars/user_parameters/acs/hrc/acs_hrc_catalog_generation_all.json index b4ff466f9..32ebaea21 100644 --- a/drizzlepac/pars/hap_pars/user_parameters/acs/hrc/acs_hrc_catalog_generation_all.json +++ b/drizzlepac/pars/hap_pars/user_parameters/acs/hrc/acs_hrc_catalog_generation_all.json @@ -13,11 +13,12 @@ "region_size": 11, "flag_trim_value": 5, "simple_bkg": false, + "zero_percent": 25.0, "negative_threshold": 0.0, "negative_percent": 15.0, "nsigma_clip": 3.0, - "maxiters":3, - "bkg_skew_threshold":0.5, + "maxiters": 3, + "bkg_skew_threshold": 0.5, "dao": { "bkgsig_sf": 4.0, "kernel_sd_aspect_ratio": 0.8, diff --git a/drizzlepac/pars/hap_pars/user_parameters/acs/sbc/acs_sbc_catalog_generation_all.json b/drizzlepac/pars/hap_pars/user_parameters/acs/sbc/acs_sbc_catalog_generation_all.json index f54034f16..e02f12bec 100644 --- a/drizzlepac/pars/hap_pars/user_parameters/acs/sbc/acs_sbc_catalog_generation_all.json +++ b/drizzlepac/pars/hap_pars/user_parameters/acs/sbc/acs_sbc_catalog_generation_all.json @@ -13,11 +13,12 @@ "region_size": 11, "flag_trim_value": 5, "simple_bkg": false, + "zero_percent": 25.0, "negative_threshold": 0.0, "negative_percent": 15.0, "nsigma_clip": 3.0, - "maxiters":3, - "bkg_skew_threshold":0.5, + "maxiters": 3, + "bkg_skew_threshold": 0.5, "dao": { "bkgsig_sf": 4.0, "kernel_sd_aspect_ratio": 0.8, diff --git a/drizzlepac/pars/hap_pars/user_parameters/acs/wfc/acs_wfc_catalog_generation_all.json b/drizzlepac/pars/hap_pars/user_parameters/acs/wfc/acs_wfc_catalog_generation_all.json index bf132d0af..fd3e9e532 100644 --- a/drizzlepac/pars/hap_pars/user_parameters/acs/wfc/acs_wfc_catalog_generation_all.json +++ b/drizzlepac/pars/hap_pars/user_parameters/acs/wfc/acs_wfc_catalog_generation_all.json @@ -13,11 +13,12 @@ "region_size": 11, "flag_trim_value": 5, "simple_bkg": false, + "zero_percent": 25.0, "negative_threshold": 0.0, "negative_percent": 15.0, "nsigma_clip": 3.0, - "maxiters":3, - "bkg_skew_threshold":0.5, + "maxiters": 3, + "bkg_skew_threshold": 0.5, "dao": { "bkgsig_sf": 4.0, "kernel_sd_aspect_ratio": 0.8, diff --git a/drizzlepac/pars/hap_pars/user_parameters/wfc3/ir/wfc3_ir_catalog_generation_all.json b/drizzlepac/pars/hap_pars/user_parameters/wfc3/ir/wfc3_ir_catalog_generation_all.json index 91c3ba930..0bb343094 100644 --- a/drizzlepac/pars/hap_pars/user_parameters/wfc3/ir/wfc3_ir_catalog_generation_all.json +++ b/drizzlepac/pars/hap_pars/user_parameters/wfc3/ir/wfc3_ir_catalog_generation_all.json @@ -13,11 +13,12 @@ "region_size": 11, "flag_trim_value": 5, "simple_bkg": false, + "zero_percent": 25.0, "negative_threshold": 0.0, "negative_percent": 15.0, "nsigma_clip": 3.0, - "maxiters":3, - "bkg_skew_threshold":0.5, + "maxiters": 3, + "bkg_skew_threshold": 0.5, "dao": { "bkgsig_sf": 4.0, "kernel_sd_aspect_ratio": 0.8, diff --git a/drizzlepac/pars/hap_pars/user_parameters/wfc3/uvis/wfc3_uvis_catalog_generation_all.json b/drizzlepac/pars/hap_pars/user_parameters/wfc3/uvis/wfc3_uvis_catalog_generation_all.json index 84ce000dd..ffd7554c1 100644 --- a/drizzlepac/pars/hap_pars/user_parameters/wfc3/uvis/wfc3_uvis_catalog_generation_all.json +++ b/drizzlepac/pars/hap_pars/user_parameters/wfc3/uvis/wfc3_uvis_catalog_generation_all.json @@ -13,11 +13,12 @@ "region_size": 11, "flag_trim_value": 5, "simple_bkg": false, + "zero_percent": 25.0, "negative_threshold": 0.0, "negative_percent": 15.0, "nsigma_clip": 3.0, - "maxiters":3, - "bkg_skew_threshold":0.5, + "maxiters": 3, + "bkg_skew_threshold": 0.5, "dao": { "bkgsig_sf": 4.0, "kernel_sd_aspect_ratio": 0.8,