Skip to content

Commit

Permalink
Merge branch 'decam_swarp_reference' of github.com:rknop/SeeChange in…
Browse files Browse the repository at this point in the history
…to decam_swarp_reference
  • Loading branch information
rknop committed Oct 17, 2024
2 parents 4071b86 + 6dd4108 commit 6ca47fd
Show file tree
Hide file tree
Showing 8 changed files with 9 additions and 14 deletions.
2 changes: 1 addition & 1 deletion improc/scamp.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def solve_wcs_scamp( sources, catalog, crossid_radius=2.,

t0 = time.perf_counter()
# Don't catch the timeout exception; assume that if it times out first time around,
# it's not going to succeed with other paramters. This is maybe too strong of
# it's not going to succeed with other parameters. This is maybe too strong of
# an assumption, but it will often be true, and when it is, catching the exception
# and retrying turns this into a multi-minute timewaster.
res = subprocess.run( command, capture_output=True, timeout=timeout )
Expand Down
4 changes: 2 additions & 2 deletions models/calibratorfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,11 +327,11 @@ def acquire_lock( cls, instrument, section, calibset, calibtype, flattype=None,
# each time, but that had a race condition of its own. When launching a
# bunch of processes with a multiprocessing pool, they'd all be synchronized
# enough that multiple processes would get to a long sleep at the same time,
# and then all pool for the lock at clse enough to the same time that only
# and then all pool for the lock at close enough to the same time that only
# one would get it. The rest would all wait a very long time (while, for
# most of it, no lock was being held) before trying again. They'd only have
# a few tries left, and ultimately several would fail. So, instead, wait a
# random amount of time, to prevent syncronization.
# random amount of time, to prevent synchronization.
tsleep = sleepmin + math.fabs( random.normalvariate( mu=0., sigma=sleepsigma ) )
time.sleep( tsleep )
totsleep += tsleep
Expand Down
4 changes: 2 additions & 2 deletions models/instrument.py
Original file line number Diff line number Diff line change
Expand Up @@ -2180,7 +2180,7 @@ def exposure_coords( self, index ):
ra, dec : float, float
"""
raise NotImplementedError( f"{self.__class__.__name__} has't implemetned exposure_coords" )
raise NotImplementedError( f"{self.__class__.__name__} hasn't implemented exposure_coords" )

def exposure_depth( self, index ):
"""Return a number in magnitudes that relates to depth of exposure.
Expand All @@ -2190,7 +2190,7 @@ def exposure_depth( self, index ):
Parameters
----------
index: int
Index into encapsulated expolsures
Index into encapsulated exposures
Returns
-------
Expand Down
2 changes: 0 additions & 2 deletions pipeline/acquire_decam_refs.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,6 @@ def extract_image_and_do_things( self, exposure, section_id ):
'calibset': 'externally_supplied',
'flattype': 'externally_supplied',
'steps_required': [ 'overscan', 'linearity', 'flat', 'fringe' ]} )
import pdb; pdb.set_trace()
pipeline.run( ds, no_provtag=True, ok_no_ref_provs=True )
ds.reraise()

Expand Down Expand Up @@ -318,7 +317,6 @@ def download_and_extract( self, origexps, usefuldexen ):
"""Download identified exposures; load them and their images into the database."""

SCLogger.info( f"============ Downloading {len(usefuldexen)} reduced exposures." )
# exposures = origexps.download_and_commit_exposures( list(usefuldexen) )
exposures = origexps.download_and_commit_exposures( list(usefuldexen),
delete_downloads=False,
existing_ok=True )
Expand Down
1 change: 0 additions & 1 deletion pipeline/coaddition.py
Original file line number Diff line number Diff line change
Expand Up @@ -616,7 +616,6 @@ def _coadd_swarp( self,
] )


# SCLogger.debug( f"Running swarp to coadd {len(sumimgs)} images" )
SCLogger.debug( f"Running swarp to coadd {len(sumimgs)} images; swarp command is {command}" )
t0 = time.perf_counter()
res = subprocess.run( command, capture_output=True, timeout=self.pars.swarp_timeout )
Expand Down
6 changes: 3 additions & 3 deletions pipeline/ref_maker.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def __init__(self, **kwargs):
'preprocessing_prov',
None,
(str, None),
"Provenance ID of preprocessing provenacne to search for images for. Be careful using this! "
"Provenance ID of preprocessing provenance to search for images for. Be careful using this! "
"Do not use this if you have more than one instrument. If you don't specify this, it will "
"be determined automatically using config, which is usually what you want.",
critical=True
Expand Down Expand Up @@ -660,7 +660,7 @@ def run(self, *args, do_not_build=False, **kwargs ):
"""Look to see if there is an existing reference that matches the specs; if not, optionally build one.
See parse_arguments for function call parameters. The remaining
policy for which images to picdk, and what provenacne to use to
policy for which images to pick, and what provenance to use to
find references, is defined by the parameters object of self and
self.pipeline.
Expand Down Expand Up @@ -717,7 +717,7 @@ def run(self, *args, do_not_build=False, **kwargs ):
return None
elif ( not self.pars.min_only_center ) and any( c < self.pars.min_number for c in match_count ):
SCLogger.info( f"RefMaker didn't find enough references at at least one point on the image; "
f"match_count={match_count}, min_number={self.mars.min_number}" )
f"match_count={match_count}, min_number={self.pars.min_number}" )
return None

# Sort the images and create data stores for all of them
Expand Down
2 changes: 0 additions & 2 deletions tests/pipeline/test_coaddition.py
Original file line number Diff line number Diff line change
Expand Up @@ -607,5 +607,3 @@ def test_coadd_17_decam_images_swarp( decam_17_offset_refs, decam_four_refs_alig
assert img.flags.shape == img.data.shape
assert img.weight.shape == img.weight.shape

# import pdb; pdb.set_trace()
# pass
2 changes: 1 addition & 1 deletion util/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def replace( cls, midformat=None, datefmt=None, level=None ):

@classmethod
def multiprocessing_replace( cls, datefmt=None, level=None ):
"""Shorthand for replace with midformat parsed from the current muiltiprocessing process."""
"""Shorthand for replace with midformat parsed from the current multiprocessing process."""

me = multiprocessing.current_process()
# Usually processes are named things like ForkPoolWorker-{number}, or something
Expand Down

0 comments on commit 6ca47fd

Please sign in to comment.