diff --git a/python/lsst/meas/algorithms/__init__.py b/python/lsst/meas/algorithms/__init__.py index 245b34a23..eff9e02ed 100644 --- a/python/lsst/meas/algorithms/__init__.py +++ b/python/lsst/meas/algorithms/__init__.py @@ -72,3 +72,7 @@ from .version import * import lsst.utils + +# adaptive_thresholds.py is intentionally not imported and lifted, to +# (belatedly) try to limit how much code is run when just importing +# lsst.meas.algorithm. diff --git a/python/lsst/meas/algorithms/adaptive_thresholds.py b/python/lsst/meas/algorithms/adaptive_thresholds.py new file mode 100644 index 000000000..06a8d069f --- /dev/null +++ b/python/lsst/meas/algorithms/adaptive_thresholds.py @@ -0,0 +1,588 @@ +# This file is part of meas_algorithms. +# +# Developed for the LSST Data Management System. +# This product includes software developed by the LSST Project +# (https://www.lsst.org). +# See the COPYRIGHT file at the top-level directory of this distribution +# for details of code ownership. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +__all__ = [ + "AdaptiveThresholdDetectionConfig", + "AdaptiveThresholdDetectionTask", + "AdaptiveThresholdBackgroundConfig", + "AdaptiveThresholdBackgroundTask", +] + +from contextlib import contextmanager + +import numpy as np + +from lsst.pex.config import Field, Config, ConfigField, DictField, FieldValidationError, ListField, RangeField +from lsst.pipe.base import Task + +from lsst.afw.geom import SpanSet +from lsst.afw.image import Mask +from lsst.afw.math import BackgroundList +from .detection import SourceDetectionConfig, SourceDetectionTask +from .subtractBackground import SubtractBackgroundConfig, SubtractBackgroundTask + + +class AdaptiveThresholdDetectionConfig(Config): + """Configuration for AdaptiveThresholdDetectionTask + """ + maxAdaptiveDetIter = Field(dtype=int, default=20, + doc="Maximum number of adaptive threshold detection iterations.") + maxNumPeakPerBand = DictField( + keytype=str, + itemtype=int, + default={ + "u": 3000, + "g": 3000, + "r": 3000, + "i": 5000, + "z": 5000, + "y": 3000, + "fallback": 5000, + }, + doc="Maximum number of peaks per band. If the current band for the exposure " + "is not included as a key in this dict, the value associated with the " + "\"fallback\" key will be used.", + ) + minFootprint = Field(dtype=int, default=15, + doc="Minimum number of footprints considered sufficient to exit the " + "iteration loop. This should be larger than or equal to ``minIsolated`` as " + "it does not take into account whether any given footprint is multi-peaked " + "(i.e. blended thus not providing any isolated sources for PSF modeling.)") + maxPeakToFootRatio = Field(dtype=float, default=800.0, + doc="Maximum ratio of the number of peaks per footprint considered " + "sufficient to exit the iteration loop. This helps guard against " + "large contiguous footprints leaving no isolated sources for " + "inclusion in the PSF modeling.") + minIsolated = Field(dtype=int, default=6, + doc="Minimum number of single-peaked (i.e. isolated) footprints " + "considered sufficient to exit the iteration loop. This should be " + "larger than the minimum number of sources desired for PSF modeling " + "since some may be rejected by the source selector.") + sufficientIsolated = Field(dtype=int, default=100, + doc="Number of single-peaked (isolated) footprints considered " + "sufficient to exit the iteration loop. Must be larger than " + "``minIsolated``.") + sufficientFractionIsolated = Field(dtype=float, default=0.45, + doc="Fraction of single-peaked (isolated) footprints considered " + "sufficient to exit the iteration loop.") + baseline = ConfigField( + "Baseline configuration for SourceDetectionTask in the absence of any iteration. " + "All options other than thresholdPolarity, thresholdValue, and includeThresholdMultiplier " + "are held fixed at these values.", + SourceDetectionConfig, + ) + + def setDefaults(self): + self.baseline.reEstimateBackground = False + self.baseline.doTempWideBackground = True + self.baseline.tempWideBackground.binSize = 512 + self.baseline.thresholdPolarity = "positive" # for schema and final run. + self.baseline.includeThresholdMultiplier = 2.0 + + def validate(self): + super().validate() + if "fallback" not in self.maxNumPeakPerBand: + msg = ("Must include a \"fallback\" key in the config.maxNumPeakPerBand config dict. " + f"It is currently: {self.maxNumPeakPerBand}.") + raise FieldValidationError(self.__class__.maxNumPeakPerBand, self, msg) + if self.minFootprint < self.minIsolated: + msg = (f"The config.minFootprint (= {self.minFootprint}) must be >= that of " + f"config.minIsolated (= {self.minIsolated}).") + raise FieldValidationError(self.__class__.minFootprint, self, msg) + if self.sufficientIsolated < self.minIsolated: + msg = (f"The config.sufficientIsolated (= {self.sufficientIsolated}) must be >= that of " + f"config.minIsolated (= {self.minIsolated}).") + raise FieldValidationError(self.__class__.sufficientIsolated, self, msg) + if self.baseline.reEstimateBackground: + raise FieldValidationError( + self.__class__.baseline, self, + "Baseline detection configuration must not include background re-estimation." + ) + + +class AdaptiveThresholdDetectionTask(Task): + """Detection of sources on an image using an adaptive scheme for + the detection threshold. + """ + ConfigClass = AdaptiveThresholdDetectionConfig + _DefaultName = "detection" + + def __init__(self, schema=None, **kwargs): + super().__init__(**kwargs) + # We make a baseline SourceDetectionTask only to set up the schema. + if schema is not None: + SourceDetectionTask(config=self.config.baseline, schema=schema) + + def run(self, table, exposure, **kwargs): + """Perform detection with an adaptive threshold detection scheme + conditioned to maximize the likelihood of a successful PSF model fit + for any given "scene". + + In particular, we'd like to be able to handle different scenes, from + sparsely populated ones through very crowded ones, and possibly high + fill-factor nebulosity along the way, with a single pipeline (and set + of configs). This requires some flexibility in setting the detection + thresholds in order to detect enough sources suitable for PSF modeling + (e.g. crowded fields require higher thresholds to ensure the detections + don't end up overlapping into a single or very small number of blended + footprints. + + We first detect sources using the default threshold and multiplier. + Then, cycling through a series of criteria based on the DETECTED mask + planes (number of footprints, number of peaks, number of isolated + footprints, number of peaks-per-footrpint, etc.) conditioned to identify + a "Goldilocks Zone" where we have enough isolated peaks from which to + measure the PSF, we iterate while adjusting the detection thresholds + in the appropriate direction until all criteria are met (or the maximum + number of iterations is reached). + + Parameters + ---------- + table : `lsst.afw.table.SourceTable` + Table object that will be used to create the SourceCatalog. + exposure : `lsst.afw.image.Exposure` + Exposure to process; DETECTED mask plane will be set in-place. + **kwargs + Forwarded to internal runs of `SourceDetectionTask`. + + Returns + ------- + results : `lsst.pipe.base.Struct` + The adaptive threshold detection results. Most fields are directly + produced by `SourceDetectionTask.run`. Additional fields include: + + ``thresholdValue`` + The final threshold value used to the configure the final round + of detection (`float`). + ``includeThresholdMultiplier`` + The final multiplication factor applied to the configured detection + threshold. (`float`). + """ + band = "fallback" + if exposure.filter is not None: + if exposure.filter.hasBandLabel(): + band = exposure.filter.bandLabel + if band in self.config.maxNumPeakPerBand: + maxNumPeak = self.config.maxNumPeakPerBand[band] + else: + maxNumPeak = self.config.maxNumPeakPerBand["fallback"] + + # Set up and configure the adaptive detection task on first iteration. + thresholdFactor = 1.0 + adaptiveDetectionConfig = self.config.baseline.copy() + adaptiveDetectionConfig.thresholdPolarity = "both" + self.log.info("Using adaptive detection with thresholdValue = %.2f and multiplier = %.1f", + adaptiveDetectionConfig.thresholdValue, + adaptiveDetectionConfig.includeThresholdMultiplier) + adaptiveDetectionTask = SourceDetectionTask(config=adaptiveDetectionConfig) + + maxNumNegFactor = 1.0 + # We use a 1-indexed iteration variable just to make logs intuitive. + for nAdaptiveDetIter in range(1, self.config.maxAdaptiveDetIter + 1): + detRes = adaptiveDetectionTask.run(table=table, exposure=exposure, doSmooth=True, **kwargs) + sourceCat = detRes.sources + nFootprint = len(sourceCat) + nPeak = 0 + nPosPeak = detRes.numPosPeaks + nNegPeak = detRes.numNegPeaks # Often detected in high nebulosity scenes. + maxNumNegPeak = max(15, int(0.025*nPosPeak))*maxNumNegFactor + nIsolated = 0 + nPeakPerSrcMax = 0 + maxNumPeakPerSrcMax = 0.2*maxNumPeak + for src in sourceCat: + nPeakSrc = len(src.getFootprint().getPeaks()) + if nPeakSrc == 1: + nIsolated += 1 + nPeak += nPeakSrc + nPeakPerSrcMax = max(nPeakPerSrcMax, nPeakSrc) + if nFootprint > 0.0: + fractionIsolated = nIsolated/nFootprint + avgPeakPerFoot = nPeak/nFootprint + else: + fractionIsolated = float("nan") + avgPeakPerFoot = float("nan") + self.log.info("In adaptive detection iter %d: nFootprints = %d, nPosPeak = %d (max is %d), " + "nNegPeak = %d (max is %d), nPeak/nFoot = %.1f (max is %.1f), " + "nPeakPerkSrcMax = %d (max is %d), nIsolated = %d, fractionIsolated = %.2f", + nAdaptiveDetIter, nFootprint, nPeak, maxNumPeak, nNegPeak, maxNumNegPeak, + avgPeakPerFoot, self.config.maxPeakToFootRatio, nPeakPerSrcMax, + maxNumPeakPerSrcMax, nIsolated, fractionIsolated) + if (nIsolated > self.config.sufficientIsolated + and fractionIsolated > self.config.sufficientFractionIsolated + and (nAdaptiveDetIter > 1 or self.config.maxAdaptiveDetIter == 1)): + if ((nIsolated > 5.0*self.config.sufficientIsolated and nPeak < 2.5*maxNumPeak + and nNegPeak < 100.0*maxNumNegPeak) + or (nNegPeak < 5.0*maxNumNegPeak and nPeak < 1.2*maxNumPeak)): + self.log.info("Sufficient isolated footprints (%d > %d) and fraction of isolated " + "footprints (%.2f > %.2f) for PSF modeling. Exiting adaptive detection " + "at iter: %d.", + nIsolated, self.config.sufficientIsolated, fractionIsolated, + self.config.sufficientFractionIsolated, nAdaptiveDetIter) + break + + if nFootprint == 0 or nPosPeak == 0: + thresholdFactor = 0.25 + maxNumNegFactor *= 10 + self.log.warning("Adaptive threshold increase went too far (nFootprint = 0). " + "Decreasing threshold to %.2f and rerunning.", + thresholdFactor*adaptiveDetectionConfig.thresholdValue) + adaptiveDetectionConfig.thresholdValue = ( + thresholdFactor*adaptiveDetectionConfig.thresholdValue) + adaptiveDetectionTask = SourceDetectionTask(config=adaptiveDetectionConfig) + continue + + if ((nPeak/nFootprint > self.config.maxPeakToFootRatio and nIsolated < self.config.minIsolated) + or nNegPeak > maxNumNegPeak): + if nNegPeak > 2*maxNumNegPeak: + thresholdFactor = 1.5 + else: + thresholdFactor = 1.25 + thresholdFactor *= adaptiveDetectionConfig.includeThresholdMultiplier + adaptiveDetectionConfig.includeThresholdMultiplier = 1.0 + self.log.warning("Adaptive detection iter %d: catalog had nPeak/nFootprint = " + "%.1f (max is %.1f) and %d negative peaks (max is %d). " + "Increasing threshold to %.2f and setting multiplier " + "to %.1f and rerunning.", + nAdaptiveDetIter, nPeak/nFootprint, self.config.maxPeakToFootRatio, + nNegPeak, maxNumNegPeak, + thresholdFactor*adaptiveDetectionConfig.thresholdValue, + adaptiveDetectionConfig.includeThresholdMultiplier) + adaptiveDetectionConfig.thresholdValue = ( + thresholdFactor*adaptiveDetectionConfig.thresholdValue) + adaptiveDetectionTask = SourceDetectionTask(config=adaptiveDetectionConfig) + continue + + if (nPeak > maxNumPeak or nPeakPerSrcMax > maxNumPeakPerSrcMax + or nFootprint <= self.config.minFootprint): + if nPeak > maxNumPeak or nPeakPerSrcMax > 0.25*maxNumPeak: + if nAdaptiveDetIter < 0.5*self.config.maxAdaptiveDetIter: + if nPeak > 3*maxNumPeak or nPeakPerSrcMax > maxNumPeak: + thresholdFactor = 1.7 + elif nPeak > 2*maxNumPeak or nPeakPerSrcMax > 0.5*maxNumPeak: + thresholdFactor = 1.4 + else: + thresholdFactor = 1.2 + else: + thresholdFactor = 1.2 + thresholdFactor *= adaptiveDetectionConfig.includeThresholdMultiplier + newThresholdMultiplier = max(1.0, 0.5*adaptiveDetectionConfig.includeThresholdMultiplier) + adaptiveDetectionConfig.includeThresholdMultiplier = newThresholdMultiplier + adaptiveDetectionConfig.thresholdValue = ( + thresholdFactor*adaptiveDetectionConfig.thresholdValue) + self.log.warning("Adaptive detection iter %d catalog had nPeak = %d (max = %d) " + "and nPeakPerSrcMax = %d (max = %d). Increasing threshold to %.2f " + "and setting multiplier to %.1f and rerunning.", + nAdaptiveDetIter, nPeak, maxNumPeak, nPeakPerSrcMax, maxNumPeakPerSrcMax, + adaptiveDetectionConfig.thresholdValue, + adaptiveDetectionConfig.includeThresholdMultiplier) + adaptiveDetectionTask = SourceDetectionTask(config=adaptiveDetectionConfig) + continue + + if nFootprint <= self.config.minFootprint: + maxNumNegFactor *= 10 # Allow more -ve detections at this point. + thresholdFactor = min(0.85, 0.4*np.log10(10*(nFootprint + 1))) + self.log.warning("Adaptive detection iter %d catalog had only %d footprints. " + "Lowering threshold to %.2f and increasing the allowance for " + "negative detections and rerunning.", + nAdaptiveDetIter, nFootprint, + thresholdFactor*adaptiveDetectionConfig.thresholdValue) + adaptiveDetectionTask = SourceDetectionTask(config=adaptiveDetectionConfig) + adaptiveDetectionConfig.thresholdValue = ( + thresholdFactor*adaptiveDetectionConfig.thresholdValue + ) + adaptiveDetectionTask = SourceDetectionTask(config=adaptiveDetectionConfig) + else: + break + # Final round of detection with positive polarity + adaptiveDetectionConfig.thresholdPolarity = "positive" + adaptiveDetectionTask = SourceDetectionTask(config=adaptiveDetectionConfig) + self.log.info("Perfomring final round of detection with threshold %.2f and multiplier %.1f", + adaptiveDetectionConfig.thresholdValue, + adaptiveDetectionConfig.includeThresholdMultiplier) + detections = adaptiveDetectionTask.run(table=table, exposure=exposure, doSmooth=True, **kwargs) + detections.thresholdValue = adaptiveDetectionConfig.thresholdValue + detections.includeThresholdMultiplier = adaptiveDetectionConfig.includeThresholdMultiplier + return detections + + + +class AdaptiveThresholdBackgroundConfig(SubtractBackgroundConfig): + detectedFractionBadMaskPlanes = ListField( + "Mask planes to ignore when computing the detected fraction.", dtype=str, + default=["BAD", "EDGE", "NO_DATA"], + ) + minDetFracForFinalBg = Field( + "Minimum detected fraction for the final background.", + dtype=float, default=0.02, + ) + maxDetFracForFinalBg = Field( + "Maximum detected fraction for the final background.", + dtype=float, default=0.93, + ) + initialDilateRadius = RangeField( + "Number of pixels to dilate the original mask by.", + dtype=int, default=10, min=1, + ) + doCheckPerAmpDetectionFraction = Field( + "Whether to check per-amplifier detection fractions.", + dtype=bool, default=True, + ) + maxDetectionIterationCount = Field( + "Maximum number of adaptive detection iterations.", + dtype=int, default=39, # Original code had 40, but iterated from [0, n] inclusive. + ) + detection = ConfigField( + "Baseline configuration for SourceDetectionTask prior to iteration.", + SourceDetectionConfig, + ) + + def setDefaults(self): + super().setDefaults() + self.detection.doTempLocalBackground = False + self.detection.nSigmaToGrow = 70.0 + self.detection.reEstimateBackground = False + self.detection.includeThresholdMultiplier = 1.0 + self.detection.thresholdValue = 2.0 # Actually used only as a floor. + self.detection.thresholdType = "pixel_stdev" + + +class AdaptiveThresholdBackgroundTask(SubtractBackgroundTask): + """A background subtraction task that does its own masking of detected + sources, using an adaptive scheme that iterates until bounds on the mask + fraction are satisfied. + + Notes + ----- + This task is only designed for use on detector images, as it is aware of + amplifier geometry (to deal with the fact that some amps have much higher + noise than others, and hence very different detected-mask fractions for the + same detection threshold. + """ + + ConfigClass = AdaptiveThresholdBackgroundConfig + _DETECTED_MASK_PLANES = ("DETECTED", "DETECTED_NEGATIVE") + + def run(self, exposure, background=None, stats=True, statsKeys=None, backgroundToPhotometricRatio=None): + # Restore the previously measured background and remeasure it + # using an adaptive threshold detection iteration to ensure a + # "Goldilocks Zone" for the fraction of detected pixels. + if not background: + background = BackgroundList() + median_background = 0.0 + else: + median_background = np.median(background.getImage().array) + self.log.warning("Original median_background = %.2f", median_background) + # TODO: apply backgroundToPhotometricRatio here! + exposure.image.array += background.getImage().array + + with self._restore_mask_when_done(exposure) as original_mask: + dilated_mask = self._dilate_original_mask(exposure, original_mask) + self._set_adaptive_detection_mask(exposure, median_background, dilated_mask) + # Do not pass the original background in, since we want to wholly + # replace it. + return super().run(exposure=exposure, stats=stats, statsKeys=statsKeys, + backgroundToPhotometricRatio=backgroundToPhotometricRatio) + + def _dilate_original_mask(self, exposure, original_mask): + detected_fraction_orig = self._compute_mask_fraction(exposure.mask) + # Dilate the current detected mask planes and don't clear + # them in the detection step. + for n_pix_to_dilate in range(self.config.initialDilateRadius, 0, -1): + dilated_mask = original_mask.clone() + for mask_name in self._DETECTED_MASK_PLANES: + # Compute the grown detection mask plane using SpanSet + detected_mask_bit = dilated_mask.getPlaneBitMask(mask_name) + detected_mask_span_set = SpanSet.fromMask(dilated_mask, detected_mask_bit) + detected_mask_span_set = detected_mask_span_set.dilated(n_pix_to_dilate) + detected_mask_span_set = detected_mask_span_set.clippedTo(dilated_mask.getBBox()) + # Clear the detected mask plane + detectedMask = dilated_mask.getMaskPlane(mask_name) + dilated_mask.clearMaskPlane(detectedMask) + # Set the mask plane to the dilated one + detected_mask_span_set.setMask(dilated_mask, detected_mask_bit) + + detected_fraction_dilated = self._compute_mask_fraction(dilated_mask) + if detected_fraction_dilated < self.config.maxDetFracForFinalBg or n_pix_to_dilate == 1: + break + exposure.mask = dilated_mask + self.log.warning("detected_fraction_orig = %.3f detected_fraction_dilated = %.3f", + detected_fraction_orig, detected_fraction_dilated) + n_above_max_per_amp = -99 + highest_detected_fraction_per_amp = float("nan") + if self.config.doCheckPerAmpDetectionFraction: + n_above_max_per_amp, highest_detected_fraction_per_amp, no_zero_det_amps = \ + self._compute_per_amp_fraction(exposure, detected_fraction_dilated) + self.log.warning("Dilated mask: n_above_max_per_amp = %d, " + "highest_detected_fraction_per_amp = %.3f", + n_above_max_per_amp, highest_detected_fraction_per_amp) + return dilated_mask + + def _set_adaptive_detection_mask(self, exposure, median_background, dilated_mask): + detected_fraction = 1.0 + n_footprint_tmp = 1e12 + detection_config = self.config.detection.copy() + detection_config.thresholdValue = max(detection_config.thresholdValue, 0.2*median_background) + detection_config.thresholdType = "pixel_stdev" # "stdev" + + no_zero_det_amps = 0 + n_above_max_per_amp = -99 + highest_detected_fraction_per_amp = float("nan") + + for n_iter in range(0, self.config.maxDetectionIterationCount): + current_threshold = detection_config.thresholdValue + if detected_fraction > self.config.maxDetFracForFinalBg: + detection_config.thresholdValue = 1.07*current_threshold + if n_footprint_tmp < 3 and detected_fraction > 0.9*self.config.maxDetFracForFinalBg: + detection_config.thresholdValue = 1.2*current_threshold + if n_above_max_per_amp > 1: + detection_config.thresholdValue = 1.1*current_threshold + if detected_fraction < self.config.minDetFracForFinalBg: + detection_config.thresholdValue = 0.8*current_threshold + detection_task = SourceDetectionTask(config=detection_config) + tempDetections = detection_task.detectFootprints(exposure=exposure, clearMask=True) + exposure.mask |= dilated_mask + n_footprint_tmp = ( + (len(tempDetections.positive.getFootprints()) if tempDetections is not None else 0) + + (len(tempDetections.negative.getFootprints()) if tempDetections.negative is not None else 0) + ) + detected_fraction = self._compute_mask_fraction(exposure.mask) + self.log.info("nIter = %d, thresh = %.2f: Fraction of pixels marked as DETECTED or " + "DETECTED_NEGATIVE in star_background_detection = %.3f " + "(max is %.3f; min is %.3f)", + n_iter, detection_config.thresholdValue, + detected_fraction, self.config.maxDetFracForFinalBg, self.config.minDetFracForFinalBg) + + n_amp = len(exposure.detector.getAmplifiers()) + if self.config.doCheckPerAmpDetectionFraction: + n_above_max_per_amp, highest_detected_fraction_per_amp, no_zero_det_amps = \ + self._compute_per_amp_fraction(exposure, detected_fraction) + + if not no_zero_det_amps: + detection_config.thresholdValue = 0.95*current_threshold + + if (detected_fraction < self.config.maxDetFracForFinalBg and detected_fraction > self.config.minDetFracForFinalBg + and n_above_max_per_amp < int(0.75*n_amp) + and no_zero_det_amps): + if (n_above_max_per_amp < max(1, int(0.15*n_amp)) + or detected_fraction < 0.85*self.config.maxDetFracForFinalBg): + break + else: + self.log.warning("Making small tweak....") + detection_config.thresholdValue = 1.05*current_threshold + self.log.warning("n_above_max_per_amp = %d (abs max is %d)", n_above_max_per_amp, int(0.75*n_amp)) + + self.log.info("Fraction of pixels marked as DETECTED or DETECTED_NEGATIVE is now %.5f " + "(highest per amp section = %.5f)", + detected_fraction, highest_detected_fraction_per_amp) + + if detected_fraction > self.config.maxDetFracForFinalBg: + exposure.mask = dilated_mask + self.log.warning("Final fraction of pixels marked as DETECTED or DETECTED_NEGATIVE " + "was too large in star_background_detection = %.3f (max = %.3f). " + "Reverting to dilated mask from PSF detection...", + detected_fraction, self.config.maxDetFracForFinalBg) + + def _compute_mask_fraction(self, mask): + """Evaluate the fraction of masked pixels in a (set of) mask plane(s). + + Parameters + ---------- + mask : `lsst.afw.image.Mask` + The mask on which to evaluate the fraction. + + Returns + ------- + detected_fraction : `float` + The calculated fraction of masked pixels + """ + bad_pixel_mask = Mask.getPlaneBitMask(self.config.detectedFractionBadMaskPlanes) + n_good_pix = np.sum(mask.array & bad_pixel_mask == 0) + if n_good_pix == 0: + detected_fraction = float("nan") + return detected_fraction + detected_pixel_mask = Mask.getPlaneBitMask(self._DETECTED_MASK_PLANES) + n_detected_pix = np.sum((mask.array & detected_pixel_mask != 0) + & (mask.array & bad_pixel_mask == 0)) + detected_fraction = n_detected_pix/n_good_pix + return detected_fraction + + def _compute_per_amp_fraction(self, exposure, detected_fraction): + """Evaluate the maximum per-amplifier fraction of masked pixels. + + Parameters + ---------- + exposure : `lsst.afw.image.ExposureF` + The exposure on which to compute the per-amp masked fraction. + detected_fraction : `float` + The current detected_fraction of the detected mask planes for the + full detector. + + Returns + ------- + n_above_max_per_amp : `int` + The number of amplifiers with masked fractions above a maximum + value (set by the current full-detector ``detected_fraction``). + highest_detected_fraction_per_amp : `float` + The highest value of the per-amplifier fraction of masked pixels. + no_zero_det_amps : `bool` + A boolean representing whether any of the amplifiers has zero + masked pixels. + """ + highest_detected_fraction_per_amp = -9.99 + n_above_max_per_amp = 0 + n_no_zero_det_amps = 0 + no_zero_det_amps = True + amps = exposure.detector.getAmplifiers() + if amps is not None: + for ia, amp in enumerate(amps): + amp_bbox = amp.getBBox() + exp_bbox = exposure.getBBox() + if not exp_bbox.contains(amp_bbox): + self.log.info("Bounding box of amplifier (%s) does not fit in exposure's " + "bounding box (%s). Skipping...", amp_bbox, exp_bbox) + continue + sub_image = exposure.subset(amp.getBBox()) + detected_fraction_amp = self._compute_mask_fraction(sub_image.mask) + self.log.debug("Current detected fraction for amplifier %s = %.3f", + amp.getName(), detected_fraction_amp) + if detected_fraction_amp < 0.002: + n_no_zero_det_amps += 1 + if n_no_zero_det_amps > 2: + no_zero_det_amps = False + break + highest_detected_fraction_per_amp = max(detected_fraction_amp, + highest_detected_fraction_per_amp) + if highest_detected_fraction_per_amp > min(0.998, max(0.8, 3.0*detected_fraction)): + n_above_max_per_amp += 1 + if n_above_max_per_amp > 2: + break + else: + self.log.info("No amplifier object for detector %d, so skipping per-amp " + "detection fraction checks.", exposure.detector.getId()) + return n_above_max_per_amp, highest_detected_fraction_per_amp, no_zero_det_amps + + @contextmanager + def _restore_mask_when_done(self, exposure): + original_mask = exposure.mask.clone() + try: + yield original_mask + finally: + exposure.mask = original_mask diff --git a/python/lsst/meas/algorithms/detection.py b/python/lsst/meas/algorithms/detection.py index ddb737794..9ce8c123a 100644 --- a/python/lsst/meas/algorithms/detection.py +++ b/python/lsst/meas/algorithms/detection.py @@ -600,7 +600,7 @@ def applyThreshold(self, middle, bbox, factor=1.0, factorNeg=None): return results - def finalizeFootprints(self, mask, results, sigma, factor=1.0, factorNeg=None): + def finalizeFootprints(self, mask, results, sigma, factor=1.0, factorNeg=None, growOverride=None): """Finalize the detected footprints. Grow the footprints, set the ``DETECTED`` and ``DETECTED_NEGATIVE`` @@ -629,6 +629,10 @@ def finalizeFootprints(self, mask, results, sigma, factor=1.0, factorNeg=None): for positive detection polarity) is assumed. Note that this is only used here for logging purposes. """ + if growOverride is not None: + self.log.warning("config.nSigmaToGrow is set to %.2f, but the caller has set " + "growOverride to %.2f, so the footprints will be grown by " + "%.2f sigma.", self.config.nSigmaToGrow, growOverride, growOverride) factorNeg = factor if factorNeg is None else factorNeg for polarity, maskName in (("positive", "DETECTED"), ("negative", "DETECTED_NEGATIVE")): fpSet = getattr(results, polarity) diff --git a/python/lsst/meas/algorithms/dynamicDetection.py b/python/lsst/meas/algorithms/dynamicDetection.py index cdb51e12b..27fc9f810 100644 --- a/python/lsst/meas/algorithms/dynamicDetection.py +++ b/python/lsst/meas/algorithms/dynamicDetection.py @@ -1,10 +1,13 @@ -__all__ = ["DynamicDetectionConfig", "DynamicDetectionTask", "InsufficientSourcesError"] +__all__ = [ + "DynamicDetectionConfig", + "DynamicDetectionTask", + "InsufficientSourcesError", +] import numpy as np from lsst.pex.config import Field, ConfigurableField -from lsst.pipe.base import Struct from .detection import SourceDetectionConfig, SourceDetectionTask from .skyObjects import SkyObjectsTask @@ -13,6 +16,7 @@ from lsst.afw.geom import makeCdMatrix, makeSkyWcs, SpanSet from lsst.afw.table import SourceCatalog, SourceTable from lsst.meas.base import ForcedMeasurementTask +from lsst.pipe.base import Struct import lsst.afw.image import lsst.afw.math @@ -99,6 +103,15 @@ class DynamicDetectionConfig(SourceDetectionConfig): "suitable locations to lay down sky objects. To allow for best effort " "sky source placement, if True, this allows for a slight erosion of " "the detection masks.") + maxPeakToFootRatio = Field(dtype=float, default=150.0, + doc="Maximum ratio of peak per footprint in the detection mask. " + "This is to help prevent single contiguous footprints that nothing " + "can be done with (i.e. deblending will be skipped). If the current " + "detection plane does not satisfy this constraint, the detection " + "threshold is increased iteratively until it is. This behaviour is " + "intended to be an effective no-op for most \"typical\" scenes/standard " + "quality observations, but can avoid total meltdown in, e.g. very " + "crowded regions.") def setDefaults(self): SourceDetectionConfig.setDefaults(self) @@ -139,7 +152,7 @@ def __init__(self, *args, **kwargs): # Set up forced measurement. config = ForcedMeasurementTask.ConfigClass() - config.plugins.names = ['base_TransformedCentroid', 'base_PsfFlux', 'base_LocalBackground'] + config.plugins.names = ["base_TransformedCentroid", "base_PsfFlux"] # We'll need the "centroid" and "psfFlux" slots for slot in ("shape", "psfShape", "apFlux", "modelFlux", "gaussianFlux", "calibFlux"): setattr(config.slots, slot, None) @@ -270,10 +283,7 @@ def calculateThreshold(self, exposure, seed, sigma=None, minFractionSourcesFacto # Calculate new threshold fluxes = catalog["base_PsfFlux_instFlux"] area = catalog["base_PsfFlux_area"] - bg = catalog["base_LocalBackground_instFlux"] - - good = (~catalog["base_PsfFlux_flag"] & ~catalog["base_LocalBackground_flag"] - & np.isfinite(fluxes) & np.isfinite(area) & np.isfinite(bg)) + good = (~catalog["base_PsfFlux_flag"] & np.isfinite(fluxes)) if good.sum() < minNumSources: if not isBgTweak: @@ -302,9 +312,9 @@ def calculateThreshold(self, exposure, seed, sigma=None, minFractionSourcesFacto else: self.log.info("Number of good sky sources used for dynamic detection background tweak:" " %d (of %d requested).", good.sum(), self.skyObjects.config.nSources) - bgMedian = np.median((fluxes/area)[good]) - lq, uq = np.percentile((fluxes - bg*area)[good], [25.0, 75.0]) + bgMedian = np.median((fluxes/area)[good]) + lq, uq = np.percentile(fluxes[good], [25.0, 75.0]) stdevMeas = 0.741*(uq - lq) medianError = np.median(catalog["base_PsfFlux_instFluxErr"][good]) if wcsIsNone: @@ -421,22 +431,59 @@ def detectFootprints(self, exposure, doSmooth=True, sigma=None, clearMask=True, # seed needs to fit in a C++ 'int' so pybind doesn't choke on it seed = (expId if expId is not None else int(maskedImage.image.array.sum())) % (2**31 - 1) threshResults = self.calculateThreshold(exposure, seed, sigma=sigma) - factor = threshResults.multiplicative - self.log.info("Modifying configured detection threshold by factor %f to %f", + minMultiplicative = 0.5 + if threshResults.multiplicative < minMultiplicative: + self.log.warning("threshResults.multiplicative = %.2f is less than minimum value (%.2f). " + "Setting to %.2f.", threshResults.multiplicative, minMultiplicative, + minMultiplicative) + factor = max(minMultiplicative, threshResults.multiplicative) + self.log.info("Modifying configured detection threshold by factor %.2f to %.2f", factor, factor*self.config.thresholdValue) - # Blow away preliminary (low threshold) detection mask - self.clearMask(maskedImage.mask) - if not clearMask: - maskedImage.mask.array |= oldDetected - - # Rinse and repeat thresholding with new calculated threshold - results = self.applyThreshold(middle, maskedImage.getBBox(), factor) - results.prelim = prelim - results.background = background if background is not None else lsst.afw.math.BackgroundList() - if self.config.doTempLocalBackground: - self.applyTempLocalBackground(exposure, middle, results) - self.finalizeFootprints(maskedImage.mask, results, sigma, factor=factor) + growOverride = None + inFinalize = True + while inFinalize: + inFinalize = False + # Blow away preliminary (low threshold) detection mask + self.clearMask(maskedImage.mask) + if not clearMask: + maskedImage.mask.array |= oldDetected + + # Rinse and repeat thresholding with new calculated threshold + results = self.applyThreshold(middle, maskedImage.getBBox(), factor) + results.prelim = prelim + results.background = background if background is not None else lsst.afw.math.BackgroundList() + if self.config.doTempLocalBackground: + self.applyTempLocalBackground(exposure, middle, results) + self.finalizeFootprints(maskedImage.mask, results, sigma, factor=factor, + growOverride=growOverride) + self.log.warning("nPeaks/nFootprint = %.2f (max is %.1f)", + results.numPosPeaks/results.numPos, + self.config.maxPeakToFootRatio) + if results.numPosPeaks/results.numPos > self.config.maxPeakToFootRatio: + if results.numPosPeaks/results.numPos > 3*self.config.maxPeakToFootRatio: + factor *= 1.4 + else: + factor *= 1.2 + if factor > 2.0: + if growOverride is None: + growOverride = 0.75*self.config.nSigmaToGrow + else: + growOverride *= 0.75 + self.log.warning("Decreasing nSigmaToGrow to %.2f", growOverride) + if factor >= 5: + self.log.warning("New theshold value would be > 5 times the initially requested " + "one (%.2f > %.2f). Leaving inFinalize iteration without " + "getting the number of peaks per footprint below %.1f", + factor*self.config.thresholdValue, self.config.thresholdValue, + self.config.maxPeakToFootRatio) + inFinalize = False + else: + inFinalize = True + self.log.warning("numPosPeaks/numPos (%d) > maxPeakPerFootprint (%.1f). " + "Increasing threshold factor to %.2f and re-running,", + results.numPosPeaks/results.numPos, self.config.maxPeakToFootRatio, + factor) self.clearUnwantedResults(maskedImage.mask, results) @@ -445,15 +492,22 @@ def detectFootprints(self, exposure, doSmooth=True, sigma=None, clearMask=True, self.display(exposure, results, middle) + # Re-do the background tweak after any temporary backgrounds have + # been restored. + # + # But we want to keep any large-scale background (e.g., scattered + # light from bright stars) from being selected for sky objects in + # the calculation, so do another detection pass without either the + # local or wide temporary background subtraction; the DETECTED + # pixels will mark the area to ignore. + + # The following if/else is to workaround the fact that it is + # currently not possible to persist an empty BackgroundList, so + # we instead set the value of the backround tweak to 0.0 if + # doBackgroundTweak is False and call the tweakBackground function + # regardless to get at least one background into the list (do we + # need a TODO here?). if self.config.doBackgroundTweak: - # Re-do the background tweak after any temporary backgrounds have - # been restored. - # - # But we want to keep any large-scale background (e.g., scattered - # light from bright stars) from being selected for sky objects in - # the calculation, so do another detection pass without either the - # local or wide temporary background subtraction; the DETECTED - # pixels will mark the area to ignore. originalMask = maskedImage.mask.array.copy() try: self.clearMask(exposure.mask) @@ -464,7 +518,9 @@ def detectFootprints(self, exposure, doSmooth=True, sigma=None, clearMask=True, isBgTweak=True).additive finally: maskedImage.mask.array[:] = originalMask - self.tweakBackground(exposure, bgLevel, results.background) + else: + bgLevel = 0.0 + self.tweakBackground(exposure, bgLevel, results.background) return results @@ -485,7 +541,8 @@ def tweakBackground(self, exposure, bgLevel, bgList=None): bg : `lsst.afw.math.BackgroundMI` Constant background model. """ - self.log.info("Tweaking background by %f to match sky photometry", bgLevel) + if bgLevel != 0.0: + self.log.info("Tweaking background by %f to match sky photometry", bgLevel) exposure.image -= bgLevel bgStats = lsst.afw.image.MaskedImageF(1, 1) bgStats.set(bgLevel, 0, bgLevel) @@ -564,7 +621,7 @@ def _computeBrightDetectionMask(self, maskedImage, convolveResults): format(nPixDetNeg, 100*nPixDetNeg/nPix)) if nPixDetNeg/nPix > brightMaskFractionMax or nPixDet/nPix > brightMaskFractionMax: self.log.warn("Too high a fraction (%.1f > %.1f) of pixels were masked with current " - "\"bright\" detection round thresholds. Increasing by a factor of %f " + "\"bright\" detection round thresholds. Increasing by a factor of %.2f " "and trying again.", max(nPixDetNeg, nPixDet)/nPix, brightMaskFractionMax, self.config.bisectFactor) diff --git a/python/lsst/meas/algorithms/loadReferenceObjects.py b/python/lsst/meas/algorithms/loadReferenceObjects.py index 4db94fdc2..b65c0f57e 100644 --- a/python/lsst/meas/algorithms/loadReferenceObjects.py +++ b/python/lsst/meas/algorithms/loadReferenceObjects.py @@ -147,6 +147,22 @@ class LoadReferenceObjectsConfig(pexConfig.Config): dtype=bool, default=False, ) + maxRefObjects = pexConfig.Field( + doc="Maximum number of reference objects to send to the matcher. Setting " + "this to a reasonable value may be desirable for memory reasons " + "(particularly in very crowded field).", + dtype=int, + default=None, + optional=True, + ) + minRefMag = pexConfig.Field( + doc="Minimum (i.e. brightest) magnitude for reference catalog (the brightest " + "sources are typically saturated in the images, so may as well remove " + "them from the reference catalog).", + dtype=float, + default=None, + optional=True, + ) def validate(self): super().validate() @@ -232,7 +248,8 @@ def applyProperMotions(self, catalog, epoch): self.log.debug("No epoch provided: not applying proper motion corrections to refcat.") return - # Warn/raise for a catalog in an incorrect format, if epoch was specified. + # Warn/raise for a catalog in an incorrect format, if epoch was + # specified. if "pm_ra" in catalog.schema: pm_ra_radians = False field = catalog.schema["pm_ra"].asField() @@ -290,10 +307,10 @@ def _remapReferenceCatalogSchema(refCat, *, anyFilterMapsToThis=None, mapper.editOutputSchema().disconnectAliases() if centroids: - # Add and initialize centroid and hasCentroid fields (these are - # added after loading to avoid wasting space in the saved catalogs). - # The new fields are automatically initialized to (nan, nan) and - # False so no need to set them explicitly. + # Add and initialize centroid and hasCentroid fields (these + # are added after loading to avoid wasting space in the saved + # catalogs). The new fields are automatically initialized to + # (nan, nan) and False so no need to set them explicitly. mapper.editOutputSchema().addField("centroid_x", type=float, doReplace=True) mapper.editOutputSchema().addField("centroid_y", type=float, doReplace=True) mapper.editOutputSchema().addField("hasCentroid", type="Flag", doReplace=True) @@ -665,11 +682,12 @@ class which intersect or are contained within the specified region. The regionLat.getA().asDegrees(), regionLat.getB().asDegrees()) if filtFunc is None: filtFunc = _FilterCatalog(region) - # filter out all the regions supplied by the constructor that do not overlap + # Filter out all the regions supplied by the constructor that do not + # overlap. overlapList = [] for dataId, refCat in zip(self.dataIds, self.refCats): - # SphGeom supports some objects intersecting others, but is not symmetric, - # try the intersect operation in both directions + # SphGeom supports some objects intersecting others, but is not + # symmetric, try the intersect operation in both directions. try: intersects = dataId.region.intersects(region) except TypeError: @@ -678,10 +696,27 @@ class which intersect or are contained within the specified region. The if intersects: overlapList.append((dataId, refCat)) - if len(overlapList) == 0: + nOverlap = len(overlapList) + if nOverlap == 0: raise RuntimeError("No reference tables could be found for input region") + if self.config.maxRefObjects is not None: + maxRefObjectsPerInput = int(self.config.maxRefObjects/nOverlap) + else: + maxRefObjectsPerInput = None + + if self.config.anyFilterMapsToThis is not None: + refFluxField = self.config.anyFilterMapsToThis + "_flux" + else: + refFluxField = None + firstCat = overlapList[0][1].get() + # Filter catalog if limits were imposed by the maxRefObjects and/or + # minRefMag config settings. + if (refFluxField is not None + and (maxRefObjectsPerInput is not None or self.config.minRefMag is not None)): + firstCat = filterRefCat(firstCat, refFluxField, maxRefObjectsPerInput, + minRefMag=self.config.minRefMag, log=self.log) refCat = filtFunc(firstCat, overlapList[0][0].region) trimmedAmount = len(firstCat) - len(refCat) @@ -692,7 +727,13 @@ class which intersect or are contained within the specified region. The if tmpCat.schema != firstCat.schema: raise TypeError("Reference catalogs have mismatching schemas") - filteredCat = filtFunc(tmpCat, dataId.region) + if maxRefObjectsPerInput is not None or self.config.minRefMag is not None: + filteredCat = filterRefCat(tmpCat, refFluxField, maxRefObjectsPerInput, + minRefMag=self.config.minRefMag, log=self.log) + else: + filteredCat = tmpCat + filteredCat = filtFunc(filteredCat, dataId.region) + refCat.extend(filteredCat) trimmedAmount += len(tmpCat) - len(filteredCat) @@ -804,6 +845,86 @@ def loadSchema(self, filterName): return pipeBase.Struct(schema=expandedEmptyCat.schema, fluxField=fluxField) +def filterRefCat(refCat, refFluxField, maxRefObjects=None, minRefMag=None, log=None): + """Sub-select a number of reference objects starting from the brightest + and maxing out at the number specified by maxRefObjects. + + No further trimming is done if len(refCat) > maxRefObjects after trimming + to minRefMag. + + Parameters + ---------- + refCat : `lsst.afw.table.SimpleCatalog` + Catalog of reference objects to trim. + refFluxField : `str` + Field of refCat to use for flux. + maxRefObjects : `int` or `None`, optional + Maximum number of reference objects (i.e. trim refCat down to + this number of objects). + minRefMag : `int` or `None`, optional + Minimum (i.e. brightest) magnitude to include in the reference + catalog. + log : `lsst.log.Log` or `logging.Logger` or `None`, optional + Logger object used to write out messages. If `None`, no messages + will be logged. + + Returns + ------- + filteredCat : `lsst.afw.table.SimpleCatalog` + Catalog trimmed to the maximum brightness and/or maximum number set + in the task config from the brightest flux down. + """ + if maxRefObjects is None and minRefMag is None: + if log is not None: + log.debug("No filtering of the reference catalog has been actually requested " + "(i.e maxRefObjects and minRefMag are both `None`) Returning " + "original catalog.") + return refCat + + filteredCat = refCat.copy(deep=True) + + if minRefMag is not None: + refFlux = filteredCat.get(refFluxField) + refMag = (refFlux*astropy.units.nJy).to_value(astropy.units.ABmag) + if numpy.nanmin(refMag) <= minRefMag: + filteredCat = filteredCat[(refMag > minRefMag)].copy(deep=True) + if log is not None: + log.info("Trimming the loaded reference catalog to %s > %.2f", refFluxField, minRefMag) + + if maxRefObjects is not None: + if len(filteredCat) <= maxRefObjects: + if log is not None: + log.debug("Number of reference objects in reference catalog = %d (max is %d). " + "Returning catalog without further filtering.", + len(filteredCat), maxRefObjects) + return filteredCat + if log is not None: + log.info("Trimming number of reference objects in refCat from %d down to %d. ", + len(refCat), maxRefObjects) + if not filteredCat.isContiguous(): + filteredCat = filteredCat.copy(deep=True) + + refFlux = filteredCat.get(refFluxField) + sortedRefFlux = refFlux[refFlux.argsort()] + minRefFlux = sortedRefFlux[-(maxRefObjects + 1)] + + selected = (filteredCat.get(refFluxField) > minRefFlux) + filteredCat = filteredCat[selected] + + if not filteredCat.isContiguous(): + filteredCat = filteredCat.copy(deep=True) + + if log is not None: + if len(filteredCat) > 0: + refFlux = filteredCat[refFluxField] + refMag = (refFlux*astropy.units.nJy).to_value(astropy.units.ABmag) + log.info("Reference catalog magnitude range for filter %s after trimming: refMagMin = %.2f; " + "refMagMax = %.2f", refFluxField, numpy.nanmin(refMag), numpy.nanmax(refMag)) + else: + log.warning("Length of reference catalog after filtering is 0.") + return filteredCat + + def getRefFluxField(schema, filterName): """Get the name of a flux field from a schema. diff --git a/python/lsst/meas/algorithms/sourceSelector.py b/python/lsst/meas/algorithms/sourceSelector.py index 8888b1c17..e40f920d4 100644 --- a/python/lsst/meas/algorithms/sourceSelector.py +++ b/python/lsst/meas/algorithms/sourceSelector.py @@ -22,6 +22,7 @@ __all__ = ["BaseSourceSelectorConfig", "BaseSourceSelectorTask", "sourceSelectorRegistry", "ColorLimit", "MagnitudeLimit", "SignalToNoiseLimit", "MagnitudeErrorLimit", "RequireFlags", "RequireUnresolved", "RequireFiniteRaDec", "RequirePrimary", + "CullFromMaskedRegion", "CentroidErrorLimit", "ScienceSourceSelectorConfig", "ScienceSourceSelectorTask", "ReferenceSourceSelectorConfig", "ReferenceSourceSelectorTask", "NullSourceSelectorTask" @@ -639,6 +640,41 @@ def apply(self, catalog, exposure): return selected +class CentroidErrorLimit(BaseLimit): + """Select sources using a limit on the centroid errors. + + This object can be used as a `lsst.pex.config.Config` for configuring + the limit, and then the `apply` method can be used to identify sources + in the catalog that match the configured limit. + """ + centroidField = pexConfig.Field(dtype=str, default="slot_Centroid", + doc="Name of the source centroid field to use.") + + def setDefaults(self): + self.maximum = 3.0 + + def apply(self, catalog): + """Apply the limit on source centroid errors to a catalog. + + Parameters + ---------- + catalog : `lsst.afw.table.SourceCatalog` + Catalog of sources to which the limit will be applied. + + Returns + ------- + selected : `numpy.ndarray` + Boolean array indicating for each source whether it is selected + (True means selected). + """ + xErrField = self.centroidField + "_xErr" + yErrField = self.centroidField + "_yErr" + selected = ((np.isfinite(catalog[xErrField])) & (np.isfinite(catalog[yErrField]))) + selected &= BaseLimit.apply(self, catalog[xErrField]) + selected &= BaseLimit.apply(self, catalog[yErrField]) + return selected + + class ScienceSourceSelectorConfig(pexConfig.Config): """Configuration for selecting science sources""" doFluxLimit = pexConfig.Field(dtype=bool, default=False, doc="Apply flux limit?") @@ -652,6 +688,7 @@ class ScienceSourceSelectorConfig(pexConfig.Config): doc="Apply source is primary check?") doSkySources = pexConfig.Field(dtype=bool, default=False, doc="Include sky sources, unioned with all other criteria?") + doCentroidErrorLimit = pexConfig.Field(dtype=bool, default=False, doc="Apply limit on centroid errors?") fluxLimit = pexConfig.ConfigField(dtype=FluxLimit, doc="Flux limit to apply") flags = pexConfig.ConfigField(dtype=RequireFlags, doc="Flags to require") unresolved = pexConfig.ConfigField(dtype=RequireUnresolved, doc="Star/galaxy separation to apply") @@ -662,6 +699,8 @@ class ScienceSourceSelectorConfig(pexConfig.Config): requirePrimary = pexConfig.ConfigField(dtype=RequirePrimary, doc="Primary source criteria to apply") skyFlag = pexConfig.ConfigField(dtype=RequireFlags, doc="Sky source flag to include") + centroidErrorLimit = pexConfig.ConfigField(dtype=CentroidErrorLimit, + doc="Limit to place on centroid errors.") def setDefaults(self): pexConfig.Config.setDefaults(self) @@ -728,6 +767,8 @@ def selectSources(self, sourceCat, matches=None, exposure=None): selected &= self.config.requirePrimary.apply(sourceCat) if self.config.doSkySources: selected |= self.config.skyFlag.apply(sourceCat) + if self.config.doCentroidErrorLimit: + selected &= self.config.centroidErrorLimit.apply(sourceCat) self.log.info("Selected %d/%d sources", selected.sum(), len(sourceCat)) diff --git a/tests/test_adaptiveThresholdDetection.py b/tests/test_adaptiveThresholdDetection.py new file mode 100644 index 000000000..69c2e53f1 --- /dev/null +++ b/tests/test_adaptiveThresholdDetection.py @@ -0,0 +1,88 @@ +import unittest + +import lsst.utils.tests +import numpy as np +from lsst.afw.geom import makeCdMatrix, makeSkyWcs +from lsst.afw.table import SourceTable +from lsst.geom import Box2I, Extent2I, Point2D, Point2I, SpherePoint, degrees +from lsst.meas.algorithms.adaptive_thresholds import AdaptiveThresholdDetectionTask +from lsst.meas.algorithms.testUtils import plantSources + + +class AdaptiveThresholdDetectionTest(lsst.utils.tests.TestCase): + def setUp(self): + xy0 = Point2I(12345, 67890) # xy0 for image + dims = Extent2I(2345, 2345) # Dimensions of image + self.box = Box2I(xy0, dims) # Bounding box of image + self.sigma = 3.21 # PSF sigma + self.nSigmaForKernel = 5.0 # Number of PSF sigmas for kernel + self.sky = 12345.6 # Sky level + noise = np.sqrt(self.sky)*np.pi*self.sigma**2 # Poisson noise per PSF + self.faint = 1.0*noise # Faintest level for star fluxes + self.bright = 100.0*noise # Brightest level for star fluxes + self.starBox = Box2I(self.box) # Area on image in which we can put star centers + starBoxBuffer = 4.0 # Buffer for star centers around edge + self.starBox.grow(-int(starBoxBuffer*self.sigma)) + self.scale = 1.0e-5*degrees # Pixel scale + self.config = AdaptiveThresholdDetectionTask.ConfigClass() + self.rtol = 1e-4 + + def makeMockExposure(self, numStars=300): + rng = np.random.Generator(np.random.MT19937(12345)) + stars = [(xx, yy, ff, self.sigma) for xx, yy, ff in + zip(rng.uniform(self.starBox.getMinX(), self.starBox.getMaxX(), numStars), + rng.uniform(self.starBox.getMinY(), self.starBox.getMaxY(), numStars), + np.linspace(self.faint, self.bright, numStars))] + exposure = plantSources(self.box, 2*int(self.nSigmaForKernel*self.sigma) + 1, self.sky, stars, True) + exposure.setWcs(makeSkyWcs(crpix=Point2D(0, 0), + crval=SpherePoint(0, 0, degrees), + cdMatrix=makeCdMatrix(scale=self.scale))) + return exposure + + def tearDown(self): + # del self.exposure + del self.config + + def check(self, expectFactor, initialThreshold=None): + if initialThreshold is None: + maxSn = float(np.nanmax(self.exposure.image.array/np.sqrt(self.exposure.variance.array))) + initialThreshold = min(maxSn, 5.0) + schema = SourceTable.makeMinimalSchema() + table = SourceTable.make(schema) + self.config.baseline.thresholdValue = initialThreshold + task = AdaptiveThresholdDetectionTask(config=self.config) + results = task.run(table, self.exposure) + self.assertFloatsAlmostEqual(results.thresholdValue, expectFactor, rtol=self.rtol) + + def testUncrowded(self): + """Add sparse-ish numbers of stars. + """ + self.exposure = self.makeMockExposure() + self.check(5.0) + self.exposure = self.makeMockExposure(numStars=100) + self.check(5.0) + + def testCrowded(self): + """Add enough stars to be considered crowded and test adjusting + initial detection threshold. + """ + self.exposure = self.makeMockExposure(numStars=8000) + self.check(23.8) + self.exposure = self.makeMockExposure(numStars=8000) + self.check(50.0, initialThreshold=50.0) + self.exposure = self.makeMockExposure(numStars=25000) + self.check(173.4, initialThreshold=20.0) + + +class TestMemory(lsst.utils.tests.MemoryTestCase): + pass + + +def setup_module(module): + lsst.utils.tests.init() + + +if __name__ == "__main__": + import sys + setup_module(sys.modules['__main__']) + unittest.main() diff --git a/tests/test_dynamicDetection.py b/tests/test_dynamicDetection.py index 3ca6e5529..a5145da10 100644 --- a/tests/test_dynamicDetection.py +++ b/tests/test_dynamicDetection.py @@ -3,7 +3,6 @@ import lsst.utils.tests import numpy as np from lsst.afw.geom import makeCdMatrix, makeSkyWcs -from lsst.afw.image import PARENT from lsst.afw.table import SourceTable from lsst.geom import Box2I, Extent2I, Point2D, Point2I, SpherePoint, degrees from lsst.meas.algorithms import DynamicDetectionTask @@ -37,35 +36,8 @@ def setUp(self): crval=SpherePoint(0, 0, degrees), cdMatrix=makeCdMatrix(scale=scale))) - # Make a large area of extra background; we should be robust against - # it. Unfortunately, some tuning is required here to get something - # challenging but not impossible: - # * A very large box will cause failures because the "extra" and the - # "normal" are reversed. - # * A small box will not be challenging because it's simple to clip - # out. - # * A large value will cause failures because it produces large edges - # in background-subtrction that broaden flux distributions. - # * A small value will not be challenging because it has little effect. - extraBox = Box2I(xy0 + Extent2I(345, 456), Extent2I(1234, 1234)) # Box for extra background - extraValue = 0.5*noise # Extra background value to add in - self.exposure.image[extraBox, PARENT] += extraValue - self.config = DynamicDetectionTask.ConfigClass() - self.config.skyObjects.nSources = 300 - self.config.reEstimateBackground = False - self.config.doTempWideBackground = True - self.config.thresholdType = "pixel_stdev" - - # Relative tolerance for tweak factor. - # Not sure why this isn't smaller; maybe due to use of Poisson instead - # of Gaussian noise? - # It seems as if some sky objects are being placed in the extra - # background region, which is causing the offset between the expected - # factor and the measured factor to be larger than otherwise expected. - # This relative tolerance was increased from 0.1 to 0.15 on DM-23781 to - # account for this. - self.rtol = 0.15 + self.rtol = 0.1 def tearDown(self): del self.exposure