Skip to content

Commit

Permalink
make sure background is subtracted before doing coaddition or subtrac…
Browse files Browse the repository at this point in the history
…tion
  • Loading branch information
guynir42 committed Jun 14, 2024
1 parent bd5f51f commit 02d9d07
Show file tree
Hide file tree
Showing 8 changed files with 98 additions and 54 deletions.
6 changes: 6 additions & 0 deletions improc/alignment.py
Original file line number Diff line number Diff line change
Expand Up @@ -520,6 +520,12 @@ def run( self, source_image, target_image ):
if target_image == source_image:
warped_image = Image.copy_image( source_image )
warped_image.type = 'Warped'
if source_image.bg is None:
warnings.warn("No background image found. Using original image data.")
warped_image.data = source_image.data
else:
warped_image.data = source_image.data_bg

warped_image.psf = source_image.psf
warped_image.zp = source_image.zp
warped_image.wcs = source_image.wcs
Expand Down
10 changes: 8 additions & 2 deletions models/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -2028,14 +2028,20 @@ def data_bg(self):
"""The image data, after subtracting the background. If no Background object is loaded, will raise. """
if self.bg is None:
raise ValueError("No background is loaded for this image.")
return self.data - self.bg.counts
if self.bg.format == 'scalar':
return self.data - self.bg.value
else:
return self.data - self.bg.counts

@property
def nandata_bg(self):
"""The image data, after subtracting the background and masking with NaNs wherever the flag is not zero. """
if self.bg is None:
raise ValueError("No background is loaded for this image.")
return self.nandata - self.bg.counts
if self.bg.format == 'scalar':
return self.nandata - self.bg.value
else:
return self.nandata - self.bg.counts

def show(self, **kwargs):
"""
Expand Down
22 changes: 21 additions & 1 deletion models/reference.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from models.provenance import Provenance
from models.source_list import SourceList
from models.psf import PSF
from models.background import Background
from models.world_coordinates import WorldCoordinates
from models.zero_point import ZeroPoint

Expand Down Expand Up @@ -138,6 +139,7 @@ class Reference(Base, AutoIDMixin):
def __init__(self, **kwargs):
self.sources = None
self.psf = None
self.bg = None
self.wcs = None
self.zp = None
super().__init__(**kwargs)
Expand All @@ -150,6 +152,7 @@ def __setattr__(self, key, value):
self.section_id = value.section_id
self.sources = value.sources
self.psf = value.psf
self.bg = value.bg
self.wcs = value.wcs
self.zp = value.zp

Expand All @@ -160,6 +163,7 @@ def init_on_load(self):
Base.init_on_load(self)
self.sources = None
self.psf = None
self.bg = None
self.wcs = None
self.zp = None
this_object_session = orm.Session.object_session(self)
Expand All @@ -169,7 +173,7 @@ def init_on_load(self):
def make_provenance(self):
"""Make a provenance for this reference image. """
upstreams = [self.image.provenance]
for att in ['image', 'sources', 'psf', 'wcs', 'zp']:
for att in ['image', 'sources', 'psf', 'bg', 'wcs', 'zp']:
if getattr(self, att) is not None:
upstreams.append(getattr(self, att).provenance)
else:
Expand Down Expand Up @@ -202,6 +206,8 @@ def get_upstream_provenances(self):
prov.append(self.sources.provenance)
if self.psf is not None and self.psf.provenance is not None and self.psf.provenance.id is not None:
prov.append(self.psf.provenance)
if self.bg is not None and self.bg.provenance is not None and self.bg.provenance.id is not None:
prov.append(self.bg.provenance)
if self.wcs is not None and self.wcs.provenance is not None and self.wcs.provenance.id is not None:
prov.append(self.wcs.provenance)
if self.zp is not None and self.zp.provenance is not None and self.zp.provenance.id is not None:
Expand Down Expand Up @@ -245,6 +251,20 @@ def load_upstream_products(self, session=None):
self.image.psf = psfs[0]
self.psf = psfs[0]

bgs = session.scalars(
sa.select(Background).where(
Background.image_id == self.image.id,
Background.provenance_id.in_(prov_ids),
)
).all()
if len(bgs) > 1:
raise ValueError(
f"Image {self.image_id} has more than one Background matching upstream provenance."
)
elif len(bgs) == 1:
self.image.bg = bgs[0]
self.bg = bgs[0]

if self.sources is not None:
wcses = session.scalars(
sa.select(WorldCoordinates).where(
Expand Down
13 changes: 2 additions & 11 deletions pipeline/subtraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,21 +155,12 @@ def _subtract_zogy(self, new_image, ref_image):
ref_image_data = ref_image.data
new_image_psf = new_image.psf.get_clip()
ref_image_psf = ref_image.psf.get_clip()
new_image_noise = new_image.bkg_rms_estimate # TOOD: improve this by using a Background object?
ref_image_noise = 1.0 # proper coaddition images have noise=1.0 by construction
new_image_noise = new_image.bkg_rms_estimate
ref_image_noise = ref_image.bkg_rms_estimate
new_image_flux_zp = 10 ** (0.4 * new_image.zp.zp)
ref_image_flux_zp = 10 ** (0.4 * ref_image.zp.zp)
# TODO: consider adding an estimate for the astrometric uncertainty dx, dy

# do some additional processing of the new image
mu, sigma = sigma_clipping(new_image_data)
new_image_data = (new_image_data - mu) / sigma # TODO: skip this if we already background subtracted
if new_image_noise is not None:
new_image_noise = new_image_noise / sigma
else:
new_image_noise = 1.0 # TODO: this can go away after we verify images always have background estimates!
new_image_flux_zp = new_image_flux_zp / sigma

new_image_data = self.inpainter.run(new_image_data, new_image.flags, new_image.weight)

output = zogy_subtract(
Expand Down
22 changes: 15 additions & 7 deletions tests/fixtures/ptf.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from models.image import Image
from models.source_list import SourceList
from models.psf import PSF
from models.background import Background
from models.world_coordinates import WorldCoordinates
from models.zero_point import ZeroPoint
from models.reference import Reference
Expand Down Expand Up @@ -422,7 +423,7 @@ def ptf_ref(
is_testing=True,
)

cache_base_name = f'187/PTF_20090405_073932_11_R_ComSci_{im_prov.id[:6]}_u-wswtff'
cache_base_name = f'187/PTF_20090405_073932_11_R_ComSci_{im_prov.id[:6]}_u-iqxrjn'

# this provenance is used for sources, psf, wcs, zp
sources_prov = Provenance(
Expand All @@ -434,8 +435,9 @@ def ptf_ref(
)
extensions = [
'image.fits',
f'psf_{sources_prov.id[:6]}.fits',
f'sources_{sources_prov.id[:6]}.fits',
f'psf_{sources_prov.id[:6]}.fits',
f'bg_{sources_prov.id[:6]}.h5',
f'wcs_{sources_prov.id[:6]}.txt',
'zp'
]
Expand All @@ -452,18 +454,23 @@ def ptf_ref(
coadd_image.ref_image_id = ptf_reference_images[-1].id # make sure to replace the ID with the new DB value
assert coadd_image.provenance_id == coadd_image.provenance.id

# get the PSF:
coadd_image.psf = copy_from_cache(PSF, ptf_cache_dir, cache_base_name + f'.psf_{sources_prov.id[:6]}.fits')
coadd_image.psf.provenance = sources_prov
assert coadd_image.psf.provenance_id == coadd_image.psf.provenance.id

# get the source list:
coadd_image.sources = copy_from_cache(
SourceList, ptf_cache_dir, cache_base_name + f'.sources_{sources_prov.id[:6]}.fits'
)
coadd_image.sources.provenance = sources_prov
assert coadd_image.sources.provenance_id == coadd_image.sources.provenance.id

# get the PSF:
coadd_image.psf = copy_from_cache(PSF, ptf_cache_dir, cache_base_name + f'.psf_{sources_prov.id[:6]}.fits')
coadd_image.psf.provenance = sources_prov
assert coadd_image.psf.provenance_id == coadd_image.psf.provenance.id

# get the background:
coadd_image.bg = copy_from_cache(Background, ptf_cache_dir, cache_base_name + f'.bg_{sources_prov.id[:6]}.h5')
coadd_image.bg.provenance = sources_prov
assert coadd_image.bg.provenance_id == coadd_image.bg.provenance.id

# get the WCS:
coadd_image.wcs = copy_from_cache(
WorldCoordinates, ptf_cache_dir, cache_base_name + f'.wcs_{sources_prov.id[:6]}.txt'
Expand Down Expand Up @@ -491,6 +498,7 @@ def ptf_ref(
copy_to_cache(pipe.datastore.image, ptf_cache_dir)
copy_to_cache(pipe.datastore.sources, ptf_cache_dir)
copy_to_cache(pipe.datastore.psf, ptf_cache_dir)
copy_to_cache(pipe.datastore.bg, ptf_cache_dir)
copy_to_cache(pipe.datastore.wcs, ptf_cache_dir)
copy_to_cache(pipe.datastore.zp, ptf_cache_dir, cache_base_name + '.zp.json')

Expand Down
3 changes: 2 additions & 1 deletion tests/improc/test_zogy.py
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,8 @@ def test_subtraction_seeing_background():
raise ValueError(
f'seeing: ({ref_seeing:.2f}, {new_seeing:.2f}), '
f'background: ({ref_bkg:.2f}, {new_bkg:.2f}), '
f'expected/measured: ({expected:.3f}, {measured:.3f})'
f'expected/measured: ({expected:.3f}, {measured:.3f}), '
f'loss: {(expected - measured) / expected:.3f}'
)


Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_measurements.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def test_measurements_attributes(measurer, ptf_datastore, test_config):
# TODO: add test for limiting magnitude (issue #143)


@pytest.mark.skip(reason="This test fails on GA but not locally, see issue #306")
# @pytest.mark.skip(reason="This test fails on GA but not locally, see issue #306")
# @pytest.mark.flaky(max_runs=3)
def test_filtering_measurements(ptf_datastore):
# printout the list of relevant environmental variables:
Expand Down
74 changes: 43 additions & 31 deletions tests/pipeline/test_coaddition.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
from pipeline.photo_cal import PhotCalibrator


def estimate_psf_width(data, sz=15, upsampling=25):
"""Extract a bright star and estimate its FWHM.
def estimate_psf_width(data, sz=9, upsampling=100, num_stars=25):
"""Extract a few bright stars and estimate their median FWHM.
This is a very rough-and-dirty method used only for testing.
Expand All @@ -38,6 +38,9 @@ def estimate_psf_width(data, sz=15, upsampling=25):
upsampling: int
The factor by which to up-sample the PSF.
Default is 25.
num_stars: int
The number of stars to use to estimate the FWHM.
Default is 10.
Returns
-------
Expand All @@ -51,39 +54,48 @@ def estimate_psf_width(data, sz=15, upsampling=25):
data[:, 0:sz] = np.nan
data[:, -sz:] = np.nan

psf = extract_psf_surrogate(data, sz=sz, upsampling=upsampling)
flux = []
area = []
radii = np.array(range(1, psf.shape[0], 2))
x, y = np.meshgrid(np.arange(psf.shape[0]), np.arange(psf.shape[1]))
rmap = np.sqrt((x - psf.shape[0] // 2) ** 2 + (y - psf.shape[1] // 2) ** 2)

for r in radii:
mask = (rmap <= r + 1) & (rmap > r - 1)
area.append(np.sum(mask))
flux.append(np.sum(psf[mask]))

flux = np.array(flux)
area = np.array(area, dtype=float)
area[area == 0] = np.nan
flux_n = flux / area # normalize by the area of the annulus

# go over the flux difference curve and find where it drops below half the peak flux:
peak = np.nanmax(flux_n)
idx = np.where(flux_n <= peak / 2)[0][0]

fwhm = radii[idx] * 2 / upsampling
fwhms = []
for i in range(num_stars):
psf = extract_psf_surrogate(data, sz=sz, upsampling=upsampling)
flux = []
area = []
radii = np.array(range(1, psf.shape[0] // 2, 2))
x, y = np.meshgrid(np.arange(psf.shape[0]), np.arange(psf.shape[1]))
rmap = np.sqrt((x - psf.shape[1] // 2) ** 2 + (y - psf.shape[0] // 2) ** 2)

for r in radii:
mask = (rmap <= r + 1) & (rmap > r - 1)
area.append(np.sum(mask))
flux.append(np.sum(psf[mask]))

flux = np.array(flux)
area = np.array(area, dtype=float)
area[area == 0] = np.nan
flux_n = flux / area # normalize by the area of the annulus

# go over the flux difference curve and find where it drops below half the peak flux:
peak = np.nanmax(flux_n)
idx = np.where(flux_n <= peak / 2)[0][0]

fwhm = radii[idx] * 2 / upsampling
fwhms.append(fwhm)

fwhm = np.nanmedian(fwhms)
# print(f'fwhm median= {fwhm}, fwhm_err= {np.std(fwhms)}')

return fwhm


def extract_psf_surrogate(data, sz=15, upsampling=25):
def extract_psf_surrogate(data, sz=9, upsampling=100):
"""Extract a rough estimate for the PSF from the brightest (non-flagged) star in the image.
This is a very rough-and-dirty method used only for testing.
Assumes the data array has NaNs at all masked pixel locations.
Will mask the area of the chosen star so that the same array can be
re-used to find progressively fainter stars.
Parameters
----------
data: ndarray
Expand All @@ -110,7 +122,8 @@ def extract_psf_surrogate(data, sz=15, upsampling=25):
edge_y1 = max(0, y - sz)
edge_y2 = min(data.shape[0], y + sz)

psf = data[edge_y1:edge_y2, edge_x1:edge_x2]
psf = data[edge_y1:edge_y2, edge_x1:edge_x2].copy()
data[edge_y1:edge_y2, edge_x1:edge_x2] = np.nan # can re-use this array to find other stars

# up-sample the PSF by the given factor:
psf = ifft2(fftshift(np.pad(fftshift(fft2(psf)), sz*upsampling))).real
Expand Down Expand Up @@ -260,18 +273,17 @@ def test_zogy_vs_naive(ptf_aligned_images, coadder):
# get the FWHM estimate for the regular images and for the coadd
fwhms = []
for im in ptf_aligned_images:
im_nan = im.data.copy()
im_nan[im.flags > 0] = np.nan
fwhms.append(estimate_psf_width(im_nan))
# choose an area in the middle of the image
fwhms.append(estimate_psf_width(im.nandata[1800:2600, 600:1400]))

fwhms = np.array(fwhms)

zogy_im_nans = zogy_im.copy()
zogy_im_nans[zogy_fl > 0] = np.nan
zogy_fwhm = estimate_psf_width(zogy_im_nans)
zogy_fwhm = estimate_psf_width(zogy_im_nans[1800:2600, 600:1400])
naive_im_nans = naive_im.copy()
naive_im_nans[naive_fl > 0] = np.nan
naive_fwhm = estimate_psf_width(naive_im_nans)
naive_fwhm = estimate_psf_width(naive_im_nans[1800:2600, 600:1400])

assert all(zogy_fwhm <= fwhms) # the ZOGY PSF should be narrower than original PSFs
assert zogy_fwhm < naive_fwhm
Expand Down

0 comments on commit 02d9d07

Please sign in to comment.