diff --git a/models/base.py b/models/base.py index 372dbbee..1ddec466 100644 --- a/models/base.py +++ b/models/base.py @@ -1936,8 +1936,8 @@ def _find_possibly_containing_temptable( cls, ra, dec, session, prov_id=None ): session : Session Required here, otherwise the temp table would be useless. - prov_id : str or None - If not None, search for objects with this provenance. + prov_id : str, list of str, or None + If not None, search for objects with this provenance, or any of these provenances if a list. """ session.execute( sa.text( "DROP TABLE IF EXISTS temp_find_containing" ) ) @@ -1963,8 +1963,14 @@ def _find_possibly_containing_temptable( cls, ra, dec, session, prov_id=None ): ) subdict = { "ra": ra, "dec": dec } if prov_id is not None: - query += " AND provenance_id=:prov" - subdict['prov'] = prov_id + if isinstance( prov_id, str ): + query += " AND provenance_id=:prov" + subdict['prov'] = prov_id + elif isinstance( prov_id, list ): + query += " AND provenance_id IN :prov" + subidct['prov'] = tuple( prov_id ) + else: + raise TypeError( "prov_id must be a a str or a list of str" ) session.execute( sa.text( query ), subdict ) @@ -1977,8 +1983,8 @@ def find_containing( cls, ra, dec, prov_id=None, session=None ): ---------- ra, dec: float, decimal degrees - prov_id: str or None - id of the provenance of cls objects to search; if None, won't filter on provenance + prov_id : str, list of str, or None + If not None, search for objects with this provenance, or any of these provenances if a list. Returns ------- @@ -2001,12 +2007,13 @@ def find_containing( cls, ra, dec, prov_id=None, session=None ): # we'll q3c_poly_query. with SmartSession( session ) as sess: - cls._find_possibly_containing_temptable( ra, dec, session, prov_id=prov_id ) - query = sa.text( f"SELECT i._id FROM temp_find_containing i " - f"WHERE q3c_poly_query( {ra}, {dec}, ARRAY[ i.ra_corner_00, i.dec_corner_00, " - f" i.ra_corner_01, i.dec_corner_01, " - f" i.ra_corner_11, i.dec_corner_11, " - f" i.ra_corner_10, i.dec_corner_10 ])" ) + cls._find_possibly_containing_temptable( ra, dec, sess, prov_id=prov_id ) + query = sa.text( f"SELECT i.* FROM {cls.__tablename__} i " + f"INNER JOIN temp_find_containing t ON t._id=i._id " + f"WHERE q3c_poly_query( {ra}, {dec}, ARRAY[ t.ra_corner_00, t.dec_corner_00, " + f" t.ra_corner_01, t.dec_corner_01, " + f" t.ra_corner_11, t.dec_corner_11, " + f" t.ra_corner_10, t.dec_corner_10 ])" ) objs = sess.scalars( sa.select( cls ).from_statement( query ) ).all() sess.execute( sa.text( "DROP TABLE temp_find_containing" ) ) return objs @@ -2031,9 +2038,8 @@ def _find_potential_overlapping_temptable( cls, fcobj, session, prov_id=None ): session : Session required here; otherwise, the temp table wouldn't be useful - prov_id : str, default None - The id of the provenance of objects to look for; defaults to - not filtering on provenance (which is almost never what you want). + prov_id: str or None + id of the provenance of cls objects to search; if None, won't filter on provenance """ @@ -2072,8 +2078,14 @@ def _find_potential_overlapping_temptable( cls, fcobj, session, prov_id=None ): subdict = { 'minra': fcobj.minra, 'maxra': fcobj.maxra, 'mindec': fcobj.mindec, 'maxdec': fcobj.maxdec } if prov_id is not None: - query += "AND provenance_id=:prov" - subdict['prov'] = prov_id + if isinstance( prov_id, str ): + query += " AND provenance_id=:prov" + subdict['prov'] = prov_id + elif isinstance( prov_id, list ): + query += " AND provenance_id IN :prov" + subidct['prov'] = tuple( prov_id ) + else: + raise TypeError( "prov_id must be a a str or a list of str" ) session.execute( sa.text( query ), subdict ) diff --git a/models/image.py b/models/image.py index 43e68137..5a2c0538 100644 --- a/models/image.py +++ b/models/image.py @@ -9,6 +9,7 @@ import sqlalchemy as sa from sqlalchemy import orm +import psycopg2.extras from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.dialects.postgresql import UUID as sqlUUID @@ -16,6 +17,7 @@ from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.exc import IntegrityError from sqlalchemy.schema import CheckConstraint +from sqlalchemy.sql import or_, and_ from astropy.time import Time from astropy.wcs import WCS @@ -861,7 +863,7 @@ def from_images(cls, images, index=0, alignment_target=None, set_is_coadd=True): # want to save, or where we can control this. upstream_ids = [ i.id for i in images ] - if alignment_target is not None: + if alignment_target is None: alignment_target = images[index] output = Image( nofile=True, is_coadd=set_is_coadd ) @@ -1547,75 +1549,18 @@ def get_downstreams(self, session=None, only_images=False, siblings=False): return downstreams - @staticmethod def find_images( ra=None, dec=None, - session=None, - **kwargs - ): - """Return a list of images that match criteria. - - Similar to query_images (and **kwargs is forwarded there), - except that it returns the actual list rather than an SQLAlchemy - thingy, and ra/dec searching works. - - Parameters - ---------- - ra, dec: float (decimal degrees) or str (HH:MM:SS and dd:mm:ss) or None - Search for images that contain this point. Must either provide both - or neither of ra and dec. - - session: Session or None - - *** See query_images for remaining parameters - - Returns - ------- - list of Image - - """ - - if ( ra is None ) != ( dec is None ): - raise ValueError( "Must provide both or neither of ra/dec" ) - - stmt = Image.query_images( ra=ra, dec=dec, **kwargs ) - - with SmartSession( session ) as sess: - images = sess.scalars( stmt ).all() - - if ( ra is not None ) and ( len(images) > 0 ): - if isinstance( ra, str ): - ra = parse_ra_hms_to_deg( ra ) - if isinstance( dec, str ): - dec = parse_dec_dms_to_deg( dec ) - # We selected by minra/maxra mindec/maxdec in query_images() - # because there are indexes on those fields. (We could - # have just done a q3c_poly_query using the corners, but - # alas the q3c function will use an index on the ra/dec - # being searched, not the polygon, so it would not have - # used an index and would have been very slow.) But, if - # images aren't square to the sky, that will be a superset - # of what we want. Crop down here. - keptimages = [] - for img in images: - poly = shapely.geometry.Polygon( [ ( img.ra_corner_00, img.dec_corner_00 ), - ( img.ra_corner_01, img.dec_corner_01 ), - ( img.ra_corner_11, img.dec_corner_11 ), - ( img.ra_corner_10, img.dec_corner_10 ), - ( img.ra_corner_00, img.dec_corner_00 ) ] ) - if poly.contains( shapely.geometry.Point( ra, dec ) ): - keptimages.append( img ) - images = keptimages - - return images - - - @staticmethod - def query_images( - ra=None, - dec=None, + minra=None, + maxra=None, + mindec=None, + maxdec=None, + image=None, + overlapfrac=None, + provenance_ids=None, + type=[1,2,3,4], target=None, section_id=None, project=None, @@ -1637,50 +1582,67 @@ def query_images( max_background=None, min_zero_point=None, max_zero_point=None, - order_by='latest', + order_by=None, seeing_quality_factor=3.0, - provenance_ids=None, - type=[1, 2, 3, 4], # TODO: is there a smarter way to only get science images? + max_number=None ): - """Get a SQL alchemy statement object for Image objects, with some filters applied. - - This is a convenience method to get a statement object that can be further filtered. - If no parameters are given, will happily return all images (be careful with this). - - If you want to filter by ra/dec (which is often what you want to - do), you may want to use find_images() rather than this - function, because a query using the result of this function will - may return a superset of images. For example, the following - image (lines) will be returned even though it doesn't include - the specified RA/dec (asterix): - - *╱╲ - ╱ ╲ - ╲ ╱ - ╲╱ - - The images are sorted either by MJD or by image quality. - Quality is defined as sum of the limiting magnitude and the seeing, - multiplied by the negative "seeing_quality_factor" parameter: - = - * - This means that as the seeing FWHM is smaller, and the limiting magnitude - is bigger (fainter) the quality is higher. - Choose a higher seeing_quality_factor to give more weight to the seeing, - and less weight to the limiting magnitude. + """Return a list of images that match criteria. + + For position searching, can operate in three modes: + + * Not filtering on position + + * Finding all images that include a point. Specify ra/dec. + + * Finding all images that overlap a rectangle. Specify either + minra/maxra/mindec/maxdec or image. If overlapfrac is not + None, only include images that overlap the desired area by at + least this fraction. (Note: this isn't really right, as this + routine treats all images as N-S/E-W aligned rectangles on the + sky; see doc on overlap frac below.) + + It will combine the position filters with all the other + conditions, ANDing together all the criteria. Parameters ---------- ra, dec: float (decimal degrees) or (HH:MM:SS / dd:mm:ss) or None - If supplied, will find images that *might* contain this ra + If supplied, will find images that contain this ra and dec. The images you get back will be a susperset of - images that actually contain this ra and dec. For - efficiency, the filtering is done in the - minra/maxra/mindec/maxdec fields of the database (which have - indexes). If the image is not square to the sky, it's - possible that the image doesn't actually contain the - requested ra/dec. If you want to be (more) sure that the - image actually does contain the ra/dec, use - Image.find_images() instead of query_images(). + images t + + minra, maxra, mindec, maxdec: float (decimal degrees) or (HH:MM:SS / dd:mm:ss) or None + Specify a rectangle on the sky, find all images that overlap + this rectangle at all. You can only give one of ra/dec or + minra/maxra/mindec/maxdec. Note that minra is the West side + of the image, and maxra is the East side of the image, so if + the image is 2° wide centered around 0°, minra will be 359 + and maxra will be 1. + + image: Image (really, any object that inherits the FourCorners mixin) or None + If supplied, pull the minra/maxra/mindec/maxdec from this image + + overlapfrac: float or None + If supplied (which may only happen when min/max ra/dec are + supplied), only return images that overlap the target + rectangle by at least this much. (Sort of.) NOTE: this + doesn't do real overlap fractions of images! Rather, it + does the overlap fraction of the NS/EW-aligned bounding + boxes of images! See docstring for + FourCorners.get_overlap_frac(). If that is fixed, this should + be too. + + provenance_ids: str or list of strings + Find images with these provenance IDs. + + type: integer or string or list of integers or strings, None + List of image types to search for; see + enums_and_bitflags.py::ImageTypeConverter for the values. + Use "Sci" or 1 to get regular (non-coadd, non-subtraction) + images. This defaults to [1,2,3,4], which gets science, + coadded science, difference, and coadded difference images; + it omits calibration images (bias, flats, etc.) and warped + images. Set this to None to get everything. target: str or list of strings (optional) Find images that have this target name (e.g., field ID or Object name). @@ -1759,139 +1721,186 @@ def query_images( seeing_quality_factor: float, default 3.0 The factor to multiply the seeing FWHM by in the quality calculation. - provenance_ids: str or list of strings - Find images with these provenance IDs. - - type: integer or string or list of integers or strings, default [1,2,3,4] - List of integer converted types of images to search for. - This defaults to [1,2,3,4] which corresponds to the - science images, coadds and subtractions - (see enums_and_bitflags.ImageTypeConverter for more details). - Choose 1 to get only the regular (non-coadd, non-subtraction) images. + max_number: int or None + If not None, only return this many images. Will return the + first max_number returned from the database, ordered as + specified in order_by. Returns ------- - stmt: SQL alchemy select statement - The statement to be executed to get the images. - Do session.scalars(stmt).all() to get the images. - Additional filtering can be done on the statement before executing it. + list of Image """ - stmt = sa.select(Image) - - if ( ra is None ) != ( dec is None ): - raise ValueError( "Must provide both or neither of ra/dec" ) - - # Filter by position - if ( ra is not None ): - if isinstance( ra, str ): - ra = parse_ra_hms_to_deg( ra ) - if isinstance( dec, str ): - dec = parse_dec_dms_to_deg( dec ) - # Select on minra/maxra/mindex/maxdec because there are - # indexes on those fields. If the image isn't square to the - # sky, it's possible that it will be included here even - # though it doesn't actually contain ra/dec. - stmt = stmt.where( Image.minra <= ra, - Image.maxra >= ra, - Image.mindec <= dec, - Image.maxdec >= dec ) - - # filter by target (e.g., field ID, object name) and possibly section ID and/or project - targets = listify(target) - if targets is not None: - stmt = stmt.where(Image.target.in_(targets)) - section_ids = listify(section_id) - if section_ids is not None: - stmt = stmt.where(Image.section_id.in_(section_ids)) - projects = listify(project) - if projects is not None: - stmt = stmt.where(Image.project.in_(projects)) - - # filter by filter and instrument - filters = listify(filter) - if filters is not None: - stmt = stmt.where(Image.filter.in_(filters)) - instruments = listify(instrument) - if instruments is not None: - stmt = stmt.where(Image.instrument.in_(instruments)) - - # filter by MJD or dateobs - if min_mjd is not None: - if min_dateobs is not None: - raise ValueError("Cannot filter by both minimal MJD and dateobs.") - stmt = stmt.where(Image.mjd >= min_mjd) - if max_mjd is not None: - if max_dateobs is not None: - raise ValueError("Cannot filter by both maximal MJD and dateobs.") - stmt = stmt.where(Image.mjd <= max_mjd) - if min_dateobs is not None: - min_dateobs = parse_dateobs(min_dateobs, output='mjd') - stmt = stmt.where(Image.mjd >= min_dateobs) - if max_dateobs is not None: - max_dateobs = parse_dateobs(max_dateobs, output='mjd') - stmt = stmt.where(Image.mjd <= max_dateobs) - - # filter by exposure time - if min_exp_time is not None: - stmt = stmt.where(Image.exp_time >= min_exp_time) - if max_exp_time is not None: - stmt = stmt.where(Image.exp_time <= max_exp_time) - - # filter by seeing FWHM - if min_seeing is not None: - stmt = stmt.where(Image.fwhm_estimate >= min_seeing) - if max_seeing is not None: - stmt = stmt.where(Image.fwhm_estimate <= max_seeing) - - # filter by limiting magnitude - if max_lim_mag is not None: - stmt = stmt.where(Image.lim_mag_estimate <= max_lim_mag) - if min_lim_mag is not None: - stmt = stmt.where(Image.lim_mag_estimate >= min_lim_mag) - - # filter by airmass - if max_airmass is not None: - stmt = stmt.where(Image.airmass <= max_airmass) - if min_airmass is not None: - stmt = stmt.where(Image.airmass >= min_airmass) - - # filter by background - if max_background is not None: - stmt = stmt.where(Image.bkg_rms_estimate <= max_background) - if min_background is not None: - stmt = stmt.where(Image.bkg_rms_estimate >= min_background) - - # filter by zero point - if max_zero_point is not None: - stmt = stmt.where(Image.zero_point_estimate <= max_zero_point) - if min_zero_point is not None: - stmt = stmt.where(Image.zero_point_estimate >= min_zero_point) - - # filter by provenances - provenance_ids = listify(provenance_ids) - if provenance_ids is not None: - stmt = stmt.where(Image.provenance_id.in_(provenance_ids)) - - # filter by image types - types = listify(type) - if types is not None: - int_types = [ImageTypeConverter.to_int(t) for t in types] - stmt = stmt.where(Image._type.in_(int_types)) - - # sort the images - if order_by == 'earliest': - stmt = stmt.order_by(Image.mjd) - elif order_by == 'latest': - stmt = stmt.order_by(sa.desc(Image.mjd)) - elif order_by == 'quality': - stmt = stmt.order_by( - sa.desc(Image.lim_mag_estimate - abs(seeing_quality_factor) * Image.fwhm_estimate) - ) - elif order_by is not None: - raise ValueError(f'Unknown order_by parameter: {order_by}. Use "earliest", "latest" or "quality".') - - return stmt + + # Name of the table we're going to search after position searches are done + searchtable = None + fcobj = None + + with SmartSession() as sess: + + # First: position filter (but not including overlapfrac). This may involve + # calling a FourCorners routine to build a temporary table. + + if ( ra is not None ) or ( dec is not None ): + # Filter by position + if any( i is not None for i in [ minra, maxra, mindec, maxdec ] ): + raise ValueError( "Cannot specify min/max ra/dec and ra/dec together" ) + if ( ra is None ) or ( dec is None ): + raise ValueError( "Must provide both or neither of ra/dec" ) + if overlapfrac is not None: + raise ValueError( "Can't provide overlap frac with ra/dec" ) + + if isinstance( ra, str ): + ra = parse_ra_hms_to_deg( ra ) + if isinstance( dec, str ): + dec = parse_dec_dms_to_deg( dec ) + + Image._find_possibly_containing_temptable( ra, dec, session=sess, prov_id=provenance_ids ) + searchtable = "temp_find_containing" + + elif any( i is not None for i in [ image, minra, maxra, mindec, maxdec ] ): + # Filter by rectangle + if image is not None: + if any( i is not None for i in [ minra, maxra, mindec, maxdec ] ): + raise ValueError( "May specify either image or min/max ra/dec, not both" ) + minra = image.minra + maxra = image.maxra + mindec = image.mindec + maxdec = image.maxdec + else: + if any( i is None for i in [ minra, maxra, mindec, maxdec ] ): + raise ValueError( "Must specify either all or none of minra/maxra/mindec/maxdec" ) + if isinstance( minra, str ): + minra = parse_ra_hms_to_deg( minra ) + if isinstance( maxra, str ): + maxra = parse_ra_hms_to_deg( maxra ) + if isinstance( mindec, str ): + mindec = parse_dec_dms_to_deg( mindec ) + if isinstance( maxdec, str ): + maxdec = parse_dec_dms_to_deg( maxdec ) + + fcobj = FourCorners() + fcobj.dec = (mindec + maxdec) / 2. + fcobj.ra_corner_00 = minra + fcobj.ra_corner_01 = minra + fcobj.minra = minra + fcobj.ra_corner_10 = maxra + fcobj.ra_corner_11 = maxra + fcobj.maxra = maxra + fcobj.dec_corner_00 = mindec + fcobj.dec_corner_10 = mindec + fcobj.mindec = mindec + fcobj.dec_corner_01 = maxdec + fcobj.dec_corner_11 = maxdec + fcobj.maxdec = maxdec + + Image._find_potential_overlapping_temptable( fcobj, session=sess, prov_id=provenance_ids ) + searchtable = "temp_find_overlapping" + else: + if overlapfrac is not None: + raise ValueError( "overlapfrac only makes sense with image or min/max ra/dec" ) + + # Second: all filtering other than position + + # Build the query that allows us to search either our temp + # table or the images table This is a little awkward, + # because we're using SQLAlchemy. I don't know an easy way + # to join to a table that SQLA doesn't know about (i.e. our + # temp tables) using SQLA constructs, so I'm going to just + # build a SQL query and hope that I understand SQLA + # from_statement well enough to do the right thing. (I'm + # also not sure how I'd do a q3c_poly_query with SQLA, and + # it's not worth the effort of trying to figure out the + # syntax.) + + subdict = {} + andtxt = "WHERE " + if searchtable is None: + q = "SELECT i.* FROM images i " + else: + q = f"SELECT i.* FROM {searchtable} t INNER JOIN images i ON t._id=i._id " + if searchtable == "temp_find_containing": + q += ( "WHERE q3c_poly_query(:ra, :dec, ARRAY[ i.ra_corner_00, i.dec_corner_00, " + " i.ra_corner_01, i.dec_corner_01, " + " i.ra_corner_11, i.dec_corner_11, " + " i.ra_corner_10, i.dec_corner_10 ]) " ) + subdict[ 'ra' ] = ra + subdict[ 'dec' ] = dec + andtxt = " AND " + + # A few fields need preprocessing before feeding into the code below + min_dateobs = None if min_dateobs is None else parse_dateobs(min_dateobs, output='mjd') + max_dateobs = None if max_dateobs is None else parse_dateobs(max_dateobs, output='mjd') + types = None if type is None else [ ImageTypeConverter.to_int(t) for t in listify(type) ] + + fields = [ { 'field': 'project', 'val': project, 'type': 'list' }, + { 'field': 'target', 'val': target, 'type': 'list' }, + { 'field': 'section_id', 'val': section_id, 'type': 'list' }, + { 'field': 'filter', 'val': filter, 'type': 'list' }, + { 'field': 'instrument', 'val': instrument, 'type': 'list' }, + { 'field': 'provenance_id', 'val': provenance_ids, 'type': 'list' }, + { 'field': '_type', 'val': types, 'type': 'list' }, + { 'field': 'mjd', 'val': min_mjd, 'type': 'ge' }, + { 'field': 'mjd', 'val': min_dateobs, 'type': 'ge' }, + { 'field': 'mjd', 'val': max_mjd, 'type': 'le' }, + { 'field': 'mjd', 'val': max_dateobs, 'type': 'le' }, + { 'field': 'exp_time', 'val': min_exp_time, 'type': 'ge' }, + { 'field': 'exp_time', 'val': max_exp_time, 'type': 'le' }, + { 'field': 'fwhm_estimate', 'val': min_seeing, 'type': 'ge' }, + { 'field': 'fwhm_estimate', 'val': max_seeing, 'type': 'le' }, + { 'field': 'lim_mag_estimate', 'val': min_lim_mag, 'type': 'ge' }, + { 'field': 'lim_mag_estimate', 'val': max_lim_mag, 'type': 'le' }, + { 'field': 'airmass', 'val': min_airmass, 'type': 'ge' }, + { 'field': 'airmass', 'val': max_airmass, 'type': 'le' }, + { 'field': 'zero_point_estimate', 'val': min_zero_point, 'type': 'ge' }, + { 'field': 'zero_point_estimate', 'val': max_zero_point, 'type': 'le' }, + { 'field': 'bkg_rms_estimate', 'val': min_background, 'type': 'ge' }, + { 'field': 'bkg_rms_estimate', 'val': max_background, 'type': 'le' } ] + paramn = 0 + for field in fields: + if field['val'] is not None: + val = field['val'] + q += f"{andtxt} i.{field['field']} " + if field['type'] == 'list': + q += f" IN :param{paramn}" + val = tuple( listify( val ) ) + elif field['type'] == 'ge': + q += f" >= :param{paramn}" + elif field['type'] == 'le': + q += f" <= :param{paramn}" + else: + raise RuntimeError( f"Unknown field type {field['type']}; this should never happen." ) + subdict[ f"param{paramn}" ] = val + paramn += 1 + andtxt = " AND " + + # Third: Sort + + if order_by == 'earliest': + q += " ORDER BY i.mjd " + elif order_by == 'latest': + q += " ORDER BY i.mjd DESC " + elif order_by == 'quality': + q += f" ORDER BY i.lim_mag_estimate - ({np.abs(seeing_quality_factor)}*i.fwhm_estimate) DESC " + elif order_by is not None: + raise ValueError(f'Unknown order_by parameter: {order_by}. Use "earliest", "latest" or "quality".') + + # Get the Image records + images = sess.scalars( sa.select( Image ).from_statement( sa.text( q ).bindparams( **subdict ) ) ).all() + + # Should we delete temp tables? They ought to get dropped automatically when the session closes. + + # Fourth: remove things with too small overlap fraction if relevant + + if overlapfrac is not None: + retimages = [] + for im in images: + if FourCorners.get_overlap_frac( fcobj, im ) >= overlapfrac: + retimages.append( im ) + else: + retimages = list( images ) + + return retimages @staticmethod diff --git a/pipeline/ref_maker.py b/pipeline/ref_maker.py index acb10a80..eb3ebf2f 100644 --- a/pipeline/ref_maker.py +++ b/pipeline/ref_maker.py @@ -74,6 +74,28 @@ def __init__(self, **kwargs): critical=True, ) + self.corner_distance = self.add_par( + 'corner_distance', + 0.8, + (None, float), + ( 'When finding references, make sure that we have at least min_number references overlapping ' + 'nine positions on the rectangle we care about, specified by minra/maxra/mindec/maxdec passed ' + 'to run(). One is the center. The other eight are in a rectangle around the center; ' + 'corner_distance is the fraction of the distance from the center to the edge along the ' + 'relevant direction. If this is None, then only consider the center; in that case, pass ' + 'only ra and dec to run().' ), + critical=True, + ) + + self.overlap_fraction = self.add_par( + 'overlap_fraction', + 0.9, + (None, float), + ( "When looking for pre-existing references, only return ones whose are overlaps this " + "fraction of the desired rectangle's area. Must be None if corner distance is None." ), + critical=True, + ) + self.instruments = self.add_par( 'instruments', None, @@ -124,7 +146,8 @@ def __init__(self, **kwargs): 'min_number', 1, int, - 'Construct a reference only if there are at least this many images that pass all other criteria. ', + ( 'Construct a reference only if there are at least this many images that pass all other criteria ' + ' ' ), critical=True, ) @@ -235,6 +258,10 @@ def __init__(self, **kwargs): maker_dict.update(maker_overrides) # user can provide override arguments in kwargs self.pars = ParsRefMaker(**maker_dict) # initialize without the pipeline/coaddition parameters + if ( self.pars.corner_distance is None ) != ( self.pars.overlap_fraction is None ): + raise ValueError( "Configuration error; for RefMaker, must have a float for both of " + "corner_distance and overlap_fraction, or both must be None." ) + # first, make sure we can assemble the provenances up to extraction: self.im_provs = None # the provenances used to make images going into the reference (these are coadds!) self.ex_provs = None # the provenances used to make other products like SourceLists, that go into the reference @@ -243,13 +270,27 @@ def __init__(self, **kwargs): self.ref_prov = None # the provenance of the reference itself self.refset = None # the RefSet object that was found / created - # these attributes tell us the place in the sky where we want to look for objects (given to run()) - # optionally it also specifies which filter we want the reference to be in + self.reset() + + # ====================================================================== + + def reset( self ): + # these attributes tell us the place in the sky (in degrees) + # where we want to look for objects (given to run()), # and the + # filter we want to be in. Optionally, it can also specify a + # target and section_id to limit images to. + + self.minra = None + self.maxra = None + self.mindec = None + self.maxdec = None + self.target = None self.ra = None # in degrees self.dec = None # in degrees self.target = None # the name of the target / field ID / Object ID self.section_id = None # a string with the section ID - self.filter = None # a string with the (short) name of the filter + + # ====================================================================== def setup_provenances(self, session=None): """Make the provenances for the images and all their products, including the coadd image. @@ -312,6 +353,8 @@ def setup_provenances(self, session=None): self.ref_prov.insert_if_needed() + # ====================================================================== + def parse_arguments(self, *args, **kwargs): """Figure out if the input parameters are given as coordinates or as target + section ID pairs. @@ -385,6 +428,8 @@ def parse_arguments(self, *args, **kwargs): return session + # ====================================================================== + def _append_provenance_to_refset_if_appropriate( self, existing, session ): """Used internally by make_refset.""" @@ -434,6 +479,8 @@ def _append_provenance_to_refset_if_appropriate( self, existing, session ): self.refset = None raise + # ====================================================================== + def make_refset(self, session=None): """Create or load an existing RefSet with the required name. @@ -481,24 +528,120 @@ def make_refset(self, session=None): self._append_provenance_to_refset_if_appropriate( existing, dbsession ) - def run(self, *args, **kwargs): - """Check if a reference exists for the given coordinates/field ID, and filter, and make it if it is missing. + # ====================================================================== - Will check if a RefSet exists with the same provenance and name, and if it doesn't, will create a new - RefSet with these properties, to keep track of the reference provenances. + def parse_arguments( self, image=None, ra=None, dec=None, + minra=None, maxra=None, mindec=None, maxdec=None, + target=None, section_id=None, + reset=True ): + """Parse arguments for the RefMaker. + + There are two modes in which RefMaker can operate: + + * If the corner_distance parameter is None, then we're making a + reference that covers a single point (useful for forced + photometry, for instance). In this case, either specify an + image (in which case its central ra and dec are used), or + specify ra/dec. - Arguments specifying where in the sky to look for / create the reference are parsed by parse_arguments(). - Same is true for the filter choice. - The remaining policy regarding which images to pick, and what provenance to use to find references, - is defined by the parameters object of self and of self.pipeline. + * If the corner_distance parameter is not None, we're making a + reference that covers a rectangle on the sky (covering at + least the overlap_fraction parameter of the recetangle). In + this case, either specify an image that defines the rectangle + on the sky, or specify minra/maxra/mindec/maxdec. + + Optionally, specify a target and section_id that images must + have to be considered for inclusion in a reference. Only use + this if you're using a survey that's very careful about setting + its target names, and if you always go back to exactly the same + fields so you know that the same chip is always going to be in + the same place. + + """ + if ( image is not None ) and any( i is not None for i in [ ra, dec, minra, maxra, mindec, maxdecd ] ): + raise ValueError( "If you pass image RefMaker.run, you can't pass any coordinates." ) + + if self.pars.corner_distance is None: + if any( i is not None for i in [ ra, dec, minra, maxra, mindec, maxdec ] ): + raise ValueError( "For RefMaker corner_distance None, can't specify minra/maxra/mindec/maxdec" ) + if image is not None: + if ( ra is not None ) or ( dec is not None ): + raise ValueError( "For RefMaker corner_distance None, must specify image or ra/dec, not both" ) + ra = image.ra + dec = image.dec + else: + if ( ra is None ) or ( dec is None ): + raise ValueError( "For RefMaker corner_distance None, must provide either image or both ra & dec" ) + else: + if ( ra is not None ) or ( dec is not None ): + raise ValueError( "For RefMaker corner_distance not None, can't specify ra/dec" ) + if image is not None: + if any( i is not None for i in [ minra, maxra, mindec, maxdec ] ): + raise ValueError( "For RefMaker corner_distance not None, must specify image or " + "minra/maxra/mindex/maxdec, not both" ) + minra = image.minra + maxra = image.maxra + mindec = image.mindec + maxdec = image.maxdec + else: + if any ( i is None for i in [ minra, maxra, mindec, maxdec ] ): + raise ValueError( "For RefMaker corner_distance not None, must specify image or " + "all of minra/maxra/mindec/maxdec" ) - If one of the inputs is a session, will use that in the entire process. - Otherwise, will open internal sessions and close them whenever they are not needed. + + self.minra = minra + self.maxra = maxra + self.mindec = mindec + self.maxdec = maxdec + self.ra = ra + self.dec = dec + self.target = target + self.section_id = section_id + + # ====================================================================== + + def _identify_references_at_position( self ): + pass + + # ====================================================================== + + def identify_references( self, *args, _do_not_parse_arguments=False, **kwargs ): + """Identify existing references in the database. + + See parse_arguments for a description of the arguments. + + (Parameter _do_not_parse_arguments is used internally, ignore it + if calling this from the outside.) + + """ + if not _do_not_parse_arguments: + self.parse_arguments( *args, **kwargs ) + + + + + + # ====================================================================== + + def run(self, *args, do_not_build=False, **kwargs ): + """Look to see if there is an existing reference that matches the specs; if not, optionally build one. + + See parse_arguments for function call parameters. The remaining + policy for which images to picdk, and what provenacne to use to + find references, is defined by the parameters object of self and + self.pipeline. + + If do_not_build is true, this becomes a thin front-end for Reference.get_references(). + + Will check if a RefSet exists with the same provenance and name, and if it doesn't, will create a new + RefSet with these properties, to keep track of the reference provenances. Will return a Reference, or None in case it doesn't exist and cannot be created (e.g., because there are not enough images that pass the criteria). + """ - session = self.parse_arguments(*args, **kwargs) + + self.parse_arguments( *args, **kwargs ) self.make_refset( session=session ) @@ -522,6 +665,9 @@ def run(self, *args, **kwargs): raise RuntimeError( f'Found multiple references with the same provenance ' f'{self.ref_prov.id} and location!' ) + if do_not_build: + return None + ############### no reference found, need to build one! ################ # first get all the images that could be used to build the reference diff --git a/pipeline/subtraction.py b/pipeline/subtraction.py index be29d334..71f15122 100644 --- a/pipeline/subtraction.py +++ b/pipeline/subtraction.py @@ -330,7 +330,7 @@ def run(self, *args, **kwargs): if self.has_recalculated: # Align the images - to_index = self.aligner.pars.to_index + to_index = self.pars.alignment_index if to_index == 'ref': SCLogger.error( "Aligning new to ref will violate assumptions in detection.py and measuring.py" ) raise RuntimeError( "Aligning new to ref not supported; align ref to new instead" ) @@ -379,7 +379,7 @@ def run(self, *args, **kwargs): ds.aligned_wcs = ds.wcs else: - raise ValueError( f"aligner to_index must be ref or new, not {to_index}" ) + raise ValueError( f"alignment_index must be ref or new, not {to_index}" ) ImageAligner.cleanup_temp_images() diff --git a/tests/conftest.py b/tests/conftest.py index dd466265..bb6423f5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -37,8 +37,8 @@ # at the end of tests. In general, we want this to be True, so we can make sure # that our tests are properly cleaning up after themselves. However, the errors # from this can hide other errors and failures, so when debugging, set it to False. -# verify_archive_database_empty = True -verify_archive_database_empty = False +verify_archive_database_empty = True +# verify_archive_database_empty = False pytest_plugins = [ diff --git a/tests/fixtures/datastore_factory.py b/tests/fixtures/datastore_factory.py index 513ab50f..bf6732e4 100644 --- a/tests/fixtures/datastore_factory.py +++ b/tests/fixtures/datastore_factory.py @@ -120,8 +120,16 @@ def make_datastore( """ - SCLogger.debug( f"make_datastore called with a {type(exporim).__name__}, " - f"overrides={overrides}, augments={augments}" ) + SCLogger.debug( f"make_datastore called with a {type(exporim).__name__};\n" + f" overrides={overrides}\n" + f" augments={augments}\n" + f" cache_dir={cache_dir}\n" + f" cache_base_name={cache_base_name}\n" + f" bad_pixel_map is a {type(bad_pixel_map)}\n" + f" save_original_image={save_original_image}\n" + f" skip_sub={skip_sub}\n" + f" through_step={through_step}\n" + f" provtag={provtag}" ) overrides = {} if overrides is None else overrides augments = {} if augments is None else augments @@ -193,8 +201,9 @@ def make_datastore( if 'preprocessing' in stepstodo: if ds.image is None and use_cache: # check if preprocessed image is in cache + SCLogger.debug( f'make_datastore searching cache for {cache_name}' ) if os.path.isfile(image_cache_path): - SCLogger.debug('make_datastore loading image from cache. ') + SCLogger.debug('make_datastore loading image from cache') img = copy_from_cache(Image, cache_dir, cache_name) # assign the correct exposure to the object loaded from cache if ds.exposure_id is not None: @@ -215,7 +224,7 @@ def make_datastore( ds.image.save(verify_md5=False) if ds.image is None: # make the preprocessed image - SCLogger.debug('make_datastore making preprocessed image. ') + SCLogger.debug('make_datastore making preprocessed image') ds = p.preprocessor.run(ds) if bad_pixel_map is not None: ds.image.flags |= bad_pixel_map @@ -264,8 +273,9 @@ def make_datastore( # try to get the source list from cache cache_name = f'{cache_base_name}.sources_{filename_barf}.fits.json' sources_cache_path = os.path.join(cache_dir, cache_name) + SCLogger.debug( f'make_datastore searching cache for source list {cache_name}' ) if os.path.isfile(sources_cache_path): - SCLogger.debug('make_datastore loading source list from cache. ') + SCLogger.debug('make_datastore loading source list from cache') ds.sources = copy_from_cache(SourceList, cache_dir, cache_name) ds.sources.provenance_id = ds.prov_tree['extraction'].id ds.sources.image_id = ds.image.id @@ -276,8 +286,9 @@ def make_datastore( # try to get the PSF from cache cache_name = f'{cache_base_name}.psf_{filename_barf}.fits.json' psf_cache_path = os.path.join(cache_dir, cache_name) + SCLogger.debug( f'make_datastore searching cache for psf {cache_name}' ) if os.path.isfile(psf_cache_path): - SCLogger.debug('make_datastore loading PSF from cache. ') + SCLogger.debug('make_datastore loading PSF from cache') ds.psf = copy_from_cache(PSF, cache_dir, cache_name) ds.psf.sources_id = ds.sources.id # make sure this is saved to the archive as well @@ -315,6 +326,7 @@ def make_datastore( bg_cache_path = os.path.join(cache_dir, cache_name) if use_cache and found_sources_in_cache: # try to get the background from cache + SCLogger.debug( f'make_datastore searching cache for background {cache_name}' ) if os.path.isfile(bg_cache_path): SCLogger.debug('make_datastore loading background from cache. ') ds.bg = copy_from_cache( Background, cache_dir, cache_name, @@ -341,6 +353,7 @@ def make_datastore( wcs_cache_path = os.path.join(cache_dir, cache_name) if use_cache and found_sources_in_cache: # try to get the WCS from cache + SCLogger.debug( f'make_datastore searching cache for wcs {cache_name}' ) if os.path.isfile(wcs_cache_path): SCLogger.debug('make_datastore loading WCS from cache. ') ds.wcs = copy_from_cache(WorldCoordinates, cache_dir, cache_name) @@ -364,6 +377,7 @@ def make_datastore( zp_cache_path = os.path.join(cache_dir, cache_name) if use_cache and found_sources_in_cache: # try to get the ZP from cache + SCLogger.debug( f'make_datastore searching cache for zero point {cache_name}' ) if os.path.isfile(zp_cache_path): SCLogger.debug('make_datastore loading zero point from cache. ') ds.zp = copy_from_cache(ZeroPoint, cache_dir, cache_name) @@ -463,6 +477,7 @@ def make_datastore( # f = f[:-6] + prov_aligned_new.id[:6] # filename_aligned_new = f + SCLogger.debug( f'make_datastore searching for subtraction cache including {sub_cache_path}' ) if ( ( os.path.isfile(sub_cache_path) ) and ( os.path.isfile(zogy_score_cache_path) ) and ( os.path.isfile(zogy_alpha_cache_path) ) and @@ -516,6 +531,8 @@ def make_datastore( if 'detection' in stepstodo: cache_name = os.path.join(cache_dir, cache_sub_name + f'.sources_{ds.prov_tree["detection"].id[:6]}.npy.json') + if use_cache: + SCLogger.debug( f'make_datastore searching cache for detections {cache_name}' ) if use_cache and os.path.isfile(cache_name): SCLogger.debug( "make_datastore loading detections from cache." ) ds.detections = copy_from_cache(SourceList, cache_dir, cache_name) @@ -534,6 +551,7 @@ def make_datastore( if 'cutting' in stepstodo: cache_name = os.path.join(cache_dir, cache_sub_name + f'.cutouts_{ds.prov_tree["cutting"].id[:6]}.h5') + SCLogger.debug( f'make_datastore searching cache for cutouts {cache_name}' ) if use_cache and ( os.path.isfile(cache_name) ): SCLogger.debug( 'make_datastore loading cutouts from cache.' ) ds.cutouts = copy_from_cache(Cutouts, cache_dir, cache_name) @@ -557,6 +575,8 @@ def make_datastore( measurements_cache_name = os.path.join(cache_dir, cache_sub_name + f'.measurements_{ds.prov_tree["measuring"].id[:6]}.json') + SCLogger.debug( f'make_datastore searching cache for all measurements {all_measurements_cache_name} ' + f'and measurements {measurements_cache_name}' ) if ( use_cache and os.path.isfile(measurements_cache_name) and os.path.isfile(all_measurements_cache_name) diff --git a/tests/fixtures/decam.py b/tests/fixtures/decam.py index 22257a4c..87cf73bb 100644 --- a/tests/fixtures/decam.py +++ b/tests/fixtures/decam.py @@ -367,7 +367,7 @@ def decam_datastore( decam_exposure, 'S3', cache_dir=decam_cache_dir, - cache_base_name='007/c4d_20230702_080904_S3_r_Sci_NBXRIO', + cache_base_name='007/c4d_20230702_080904_S3_r_Sci_PN4G4I', overrides={ 'subtraction': { 'refset': 'test_refset_decam' } }, save_original_image=True, provtag='decam_datastore' diff --git a/tests/fixtures/ptf.py b/tests/fixtures/ptf.py index 9a3ae028..cd28674a 100644 --- a/tests/fixtures/ptf.py +++ b/tests/fixtures/ptf.py @@ -395,7 +395,7 @@ def ptf_aligned_image_datastores(request, ptf_reference_image_datastores, ptf_ca ( os.path.isfile(os.path.join(cache_dir, 'manifest.txt')) ) ): - aligner = ImageAligner( method='swarp', to_index='last' ) + aligner = ImageAligner( method='swarp' ) # Going to assume that the upstream provenances are the same for all # of the images. That will be true here by construction... I think. ds = ptf_reference_image_datastores[0] @@ -426,7 +426,7 @@ def ptf_aligned_image_datastores(request, ptf_reference_image_datastores, ptf_ca # ref: https://stackoverflow.com/a/75337251 # ptf_reference_image_datastores = request.getfixturevalue('ptf_reference_image_datastores') - coadder = Coadder( alignment={ 'method': 'swarp', 'to_index': 'last' } ) + coadder = Coadder( alignment_index='last', alignment={ 'method': 'swarp' } ) coadder.run_alignment( ptf_reference_image_datastores, len(ptf_reference_image_datastores)-1 ) for ds in coadder.aligned_datastores: diff --git a/tests/models/test_image_querying.py b/tests/models/test_image_querying.py index 8c73138f..35b51c17 100644 --- a/tests/models/test_image_querying.py +++ b/tests/models/test_image_querying.py @@ -5,7 +5,7 @@ from astropy.time import Time -from models.base import SmartSession +from models.base import SmartSession, FourCorners from models.provenance import Provenance from models.image import Image, image_upstreams_association_table @@ -348,471 +348,339 @@ def im_qual(im, factor=3.0): return im.lim_mag_estimate - factor * im.fwhm_estimate -def test_image_query(ptf_ref, decam_reference, decam_datastore, decam_default_calibrators): +def test_find_images(ptf_reference_image_datastores, ptf_ref, + decam_reference, decam_datastore, decam_default_calibrators): # TODO: need to fix some of these values (of lim_mag and quality) once we get actual limiting magnitude measurements - - # Note that (I believe) all of the images in the fixture are square to the sky, - # so find_images and query_images will return the same thing for an ra/dec - # search. TODO: make an image that's not square to the sky. + # (...isn't that done now? TODO: verify that the limiting magnitude estimates in the tests below come + # from Dan's code, and if it is, remove these three lines of comments.) with SmartSession() as session: - stmt = Image.query_images() - results = session.scalars(stmt).all() - total = len(results) - - # from pprint import pprint - # pprint(results) - # - # print(f'MJD: {[im.mjd for im in results]}') - # print(f'date: {[im.observation_time for im in results]}') - # print(f'RA: {[im.ra for im in results]}') - # print(f'DEC: {[im.dec for im in results]}') - # print(f'target: {[im.target for im in results]}') - # print(f'section_id: {[im.section_id for im in results]}') - # print(f'project: {[im.project for im in results]}') - # print(f'Instrument: {[im.instrument for im in results]}') - # print(f'Filter: {[im.filter for im in results]}') - # print(f'FWHM: {[im.fwhm_estimate for im in results]}') - # print(f'LIMMAG: {[im.lim_mag_estimate for im in results]}') - # print(f'B/G: {[im.bkg_rms_estimate for im in results]}') - # print(f'ZP: {[im.zero_point_estimate for im in results]}') - # print(f'EXPTIME: {[im.exp_time for im in results]}') - # print(f'AIRMASS: {[im.airmass for im in results]}') - # print(f'QUAL: {[im_qual(im) for im in results]}') - - # get only the science images - stmt = Image.query_images(type=1) - found = Image.find_images(type=1) - results1 = session.scalars(stmt).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert all(im._type == 1 for im in results1) - assert all(im.type == 'Sci' for im in results1) - assert len(results1) < total - - # get the coadd and subtraction images - stmt = Image.query_images(type=[2, 3, 4]) - found = Image.find_images(type=[2, 3, 4]) - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert all(im._type in [2, 3, 4] for im in results2) - assert all(im.type in ['ComSci', 'Diff', 'ComDiff'] for im in results2) - assert len(results2) < total - assert len(results1) + len(results2) == total - - # use the names of the types instead of integers, or a mixture of ints and strings - stmt = Image.query_images(type=['ComSci', 'Diff', 4]) - found = Image.find_images(type=['ComSci', 'Diff', 4]) - results3 = session.scalars(stmt).all() - assert [ i.id for i in results3 ] == [ i.id for i in found ] - assert results2 == results3 - - # filter by MJD and observation date - value = 57000.0 - stmt = Image.query_images(min_mjd=value) - found = Image.find_images(min_mjd=value) - results1 = session.scalars(stmt).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert all(im.mjd >= value for im in results1) - assert all(im.instrument == 'DECam' for im in results1) - assert len(results1) < total - - stmt = Image.query_images(max_mjd=value) - found = Image.find_images(max_mjd=value) - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert all(im.mjd <= value for im in results2) - assert all(im.instrument == 'PTF' for im in results2) - assert len(results2) < total - assert len(results1) + len(results2) == total - - stmt = Image.query_images(min_mjd=value, max_mjd=value) - found = Image.find_images(min_mjd=value, max_mjd=value) - results3 = session.scalars(stmt).all() - assert [ i.id for i in results3 ] == [ i.id for i in found ] - assert len(results3) == 0 - - # filter by observation date - t = Time(57000.0, format='mjd').datetime - stmt = Image.query_images(min_dateobs=t) - found = Image.find_images(min_dateobs=t) - results4 = session.scalars(stmt).all() - assert [ i.id for i in results4 ] == [ i.id for i in found ] - assert all(im.observation_time >= t for im in results4) - assert all(im.instrument == 'DECam' for im in results4) - assert set(results4) == set(results1) - assert len(results4) < total - - stmt = Image.query_images(max_dateobs=t) - found = Image.find_images(max_dateobs=t) - results5 = session.scalars(stmt).all() - assert [ i.id for i in results5 ] == [ i.id for i in found ] - assert all(im.observation_time <= t for im in results5) - assert all(im.instrument == 'PTF' for im in results5) - assert set(results5) == set(results2) - assert len(results5) < total - assert len(results4) + len(results5) == total - - # filter by images that contain this point (ELAIS-E1, chip S3) - ra = 7.449 - dec = -42.926 - - results1 = Image.find_containing( ra, dec, session=session ) - assert all(im.instrument == 'DECam' for im in results1) - assert all(im.target == 'ELAIS-E1' for im in results1) - assert len(results1) < total - - # filter by images that contain this point (ELAIS-E1, chip N16) - ra = 7.659 - dec = -43.420 - - stmt = Image.query_images(ra=ra, dec=dec) - found = Image.find_images(ra=ra, dec=dec) - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert all(im.instrument == 'DECam' for im in results2) - assert all(im.target == 'ELAIS-E1' for im in results2) - assert len(results2) < total - - # # filter by images that contain this point (PTF field number 100014) - ra = 188.0 - dec = 4.5 - stmt = Image.query_images(ra=ra, dec=dec) - found = Image.find_images(ra=ra, dec=dec ) - results3 = session.scalars(stmt).all() - assert [ i.id for i in results3 ] == [ i.id for i in found ] - assert all(im.instrument == 'PTF' for im in results3) - assert all(im.target == '100014' for im in results3) - assert len(results3) < total - assert len(results1) + len(results2) + len(results3) == total - - # filter by section ID - stmt = Image.query_images(section_id='S3') - found = Image.find_images(section_id='S3') - results1 = session.scalars(stmt).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert all(im.section_id == 'S3' for im in results1) - assert all(im.instrument == 'DECam' for im in results1) - assert len(results1) < total - - stmt = Image.query_images(section_id='N16') - found = Image.find_images(section_id='N16') - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert all(im.section_id == 'N16' for im in results2) - assert all(im.instrument == 'DECam' for im in results2) - assert len(results2) < total - - stmt = Image.query_images(section_id='11') - found = Image.find_images(section_id='11') - results3 = session.scalars(stmt).all() - assert [ i.id for i in results3 ] == [ i.id for i in found ] - assert all(im.section_id == '11' for im in results3) - assert all(im.instrument == 'PTF' for im in results3) - assert len(results3) < total - assert len(results1) + len(results2) + len(results3) == total - - # filter by the PTF project name - stmt = Image.query_images(project='PTF_DyC_survey') - found = Image.find_images(project='PTF_DyC_survey') - results1 = session.scalars(stmt).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert all(im.project == 'PTF_DyC_survey' for im in results1) - assert all(im.instrument == 'PTF' for im in results1) - assert len(results1) < total - - # filter by the two different project names for DECam: - stmt = Image.query_images(project=['many', '2023A-716082']) - found = Image.find_images(project=['many', '2023A-716082']) - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert all(im.project in ['many', '2023A-716082'] for im in results2) - assert all(im.instrument == 'DECam' for im in results2) - assert len(results2) < total - assert len(results1) + len(results2) == total - - # filter by instrument - stmt = Image.query_images(instrument='PTF') - found = Image.find_images(instrument='PTF') - results1 = session.scalars(stmt).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert all(im.instrument == 'PTF' for im in results1) - assert len(results1) < total - - stmt = Image.query_images(instrument='DECam') - found = Image.find_images(instrument='DECam') - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert all(im.instrument == 'DECam' for im in results2) - assert len(results2) < total - assert len(results1) + len(results2) == total - - stmt = Image.query_images(instrument=['PTF', 'DECam']) - found = Image.find_images(instrument=['PTF', 'DECam']) - results3 = session.scalars(stmt).all() - assert [ i.id for i in results3 ] == [ i.id for i in found ] - assert len(results3) == total - - stmt = Image.query_images(instrument=['foobar']) - found = Image.find_images(instrument=['foobar']) - results4 = session.scalars(stmt).all() - assert len(results4) == 0 - assert len(found) == 0 - - # filter by filter - stmt = Image.query_images(filter='R') - found = Image.find_images(filter='R') - results6 = session.scalars(stmt).all() - assert [ i.id for i in results6 ] == [ i.id for i in found ] - assert all(im.filter == 'R' for im in results6) - assert all(im.instrument == 'PTF' for im in results6) - assert set(results6) == set(results1) - - stmt = Image.query_images(filter='r DECam SDSS c0002 6415.0 1480.0') - found = Image.find_images(filter='r DECam SDSS c0002 6415.0 1480.0') - results7 = session.scalars(stmt).all() - assert [ i.id for i in results7 ] == [ i.id for i in found ] - assert all(im.filter == 'r DECam SDSS c0002 6415.0 1480.0' for im in results7) - assert all(im.instrument == 'DECam' for im in results7) - assert set(results7) == set(results2) - - # filter by seeing FWHM - value = 3.0 - stmt = Image.query_images(max_seeing=value) - found = Image.find_images(max_seeing=value) - results1 = session.scalars(stmt).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert all(im.instrument == 'DECam' for im in results1) - assert all(im.fwhm_estimate <= value for im in results1) - assert len(results1) < total - - stmt = Image.query_images(min_seeing=value) - found = Image.find_images(min_seeing=value) - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert all(im.instrument == 'PTF' for im in results2) - assert all(im.fwhm_estimate >= value for im in results2) - assert len(results2) < total - assert len(results1) + len(results2) == total - - stmt = Image.query_images(min_seeing=value, max_seeing=value) - found = Image.find_images(min_seeing=value, max_seeing=value) - results3 = session.scalars(stmt).all() - assert len(results3) == 0 # we will never have exactly that number - assert len(found) == 0 - - # filter by limiting magnitude - value = 21.0 - stmt = Image.query_images(min_lim_mag=value) - found = Image.find_images(min_lim_mag=value) - results1 = session.scalars(stmt).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert all(im.instrument == 'DECam' for im in results1) - assert all(im.lim_mag_estimate >= value for im in results1) - assert len(results1) < total - - stmt = Image.query_images(max_lim_mag=value) - found = Image.find_images(max_lim_mag=value) - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert all(im.instrument == 'PTF' for im in results2) - assert all(im.lim_mag_estimate <= value for im in results2) - assert len(results2) < total - assert len(results1) + len(results2) == total - - stmt = Image.query_images(min_lim_mag=value, max_lim_mag=value) - found = Image.find_images(min_lim_mag=value, max_lim_mag=value) - results3 = session.scalars(stmt).all() - assert len(results3) == 0 - assert len(found) == 0 - - # filter by background - value = 25.0 - stmt = Image.query_images(min_background=value) - found = Image.find_images(min_background=value) - results1 = session.scalars(stmt).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert all(im.bkg_rms_estimate >= value for im in results1) - assert len(results1) < total - - stmt = Image.query_images(max_background=value) - found = Image.find_images(max_background=value) - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert all(im.bkg_rms_estimate <= value for im in results2) - assert len(results2) < total - assert len(results1) + len(results2) == total - - stmt = Image.query_images(min_background=value, max_background=value) - found = Image.find_images(min_background=value, max_background=value) - results3 = session.scalars(stmt).all() - assert len(results3) == 0 - assert len(found) == 0 - - # filter by zero point - value = 27.0 - stmt = Image.query_images(min_zero_point=value) - found = Image.find_images(min_zero_point=value) - results1 = session.scalars(stmt).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert all(im.zero_point_estimate >= value for im in results1) - assert len(results1) < total - - stmt = Image.query_images(max_zero_point=value) - found = Image.find_images(max_zero_point=value) - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert all(im.zero_point_estimate <= value for im in results2) - assert len(results2) < total - assert len(results1) + len(results2) == total - - stmt = Image.query_images(min_zero_point=value, max_zero_point=value) - found = Image.find_images(min_zero_point=value, max_zero_point=value) - results3 = session.scalars(stmt).all() - assert len(results3) == 0 - assert len(found) == 0 - - # filter by exposure time - value = 60.0 + 1.0 - stmt = Image.query_images(min_exp_time=value) - found = Image.find_images(min_exp_time=value) - results1 = session.scalars(stmt).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert all(im.exp_time >= value for im in results1) - assert len(results1) < total - - stmt = Image.query_images(max_exp_time=value) - found = Image.find_images(max_exp_time=value) - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert all(im.exp_time <= value for im in results2) - assert len(results2) < total - - stmt = Image.query_images(min_exp_time=60.0, max_exp_time=60.0) - found = Image.find_images(min_exp_time=60.0, max_exp_time=60.0) - results3 = session.scalars(stmt).all() - assert [ i.id for i in results3 ] == [ i.id for i in found ] - assert len(results3) == len(results2) # all those under 31s are those with exactly 30s - - # query based on airmass - value = 1.15 - total_with_airmass = len([im for im in results if im.airmass is not None]) - stmt = Image.query_images(max_airmass=value) - found = Image.find_images(max_airmass=value) - results1 = session.scalars(stmt).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert all(im.airmass <= value for im in results1) - assert len(results1) < total_with_airmass - - stmt = Image.query_images(min_airmass=value) - found = Image.find_images(min_airmass=value) - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert all(im.airmass >= value for im in results2) - assert len(results2) < total_with_airmass - assert len(results1) + len(results2) == total_with_airmass - - # order the results by quality (lim_mag - 3 * fwhm) - # note that we cannot filter by quality, it is not a meaningful number - # on its own, only as a way to compare images and find which is better. - # sort all the images by quality and get the best one - stmt = Image.query_images(order_by='quality') - best = session.scalars(stmt).first() - - # the best overall quality from all images - assert im_qual(best) == max([im_qual(im) for im in results]) - - # get the two best images from the PTF instrument (exp_time chooses the single images only) - stmt = Image.query_images(max_exp_time=60, order_by='quality') - found = Image.find_images(max_exp_time=60, order_by='quality')[:2] - results1 = session.scalars(stmt.limit(2)).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert len(results1) == 2 - assert all(im_qual(im) > 9.0 for im in results1) - - # change the seeing factor a little: - factor = 2.8 - stmt = Image.query_images(max_exp_time=60, order_by='quality', seeing_quality_factor=factor) - found = Image.find_images(max_exp_time=60, order_by='quality', seeing_quality_factor=factor)[:2] - results2 = session.scalars(stmt.limit(2)).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - - # quality will be a little bit higher, but the images are the same - assert results2 == results1 - assert im_qual(results2[0], factor=factor) > im_qual(results1[0]) - assert im_qual(results2[1], factor=factor) > im_qual(results1[1]) - - # change the seeing factor dramatically: - factor = 0.2 - stmt = Image.query_images(max_exp_time=60, order_by='quality', seeing_quality_factor=factor) - found = Image.find_images(max_exp_time=60, order_by='quality', seeing_quality_factor=factor)[:2] - results3 = session.scalars(stmt.limit(2)).all() - assert [ i.id for i in results3 ] == [ i.id for i in found ] - - # TODO -- assumptions that went into this test aren't right, come up with - # a test case where it will actually work - # quality will be a higher, but also a different image will now have the second-best quality - # assert results3 != results1 - # assert im_qual(results3[0], factor=factor) > im_qual(results1[0]) - - # do a cross filtering of coordinates and background (should only find the PTF coadd) - ra = 188.0 - dec = 4.5 - background = 5 - - stmt = Image.query_images(ra=ra, dec=dec, max_background=background) - found = Image.find_images(ra=ra, dec=dec, max_background=background) - results1 = session.scalars(stmt).all() - assert [ i.id for i in results1 ] == [ i.id for i in found ] - assert len(results1) == 1 - assert results1[0].instrument == 'PTF' - assert results1[0].type == 'ComSci' - - # cross the DECam target and section ID with the exposure time that's of the S3 ref image - target = 'ELAIS-E1' - section_id = 'S3' - exp_time = 120.0 - - stmt = Image.query_images(target=target, section_id=section_id, min_exp_time=exp_time) - found = Image.find_images(target=target, section_id=section_id, min_exp_time=exp_time) - results2 = session.scalars(stmt).all() - assert [ i.id for i in results2 ] == [ i.id for i in found ] - assert len(results2) == 1 - assert results2[0].instrument == 'DECam' - assert results2[0].type == 'ComSci' - assert results2[0].exp_time == 150.0 - - # cross filter on MJD and instrument in a way that has no results - mjd = 55000.0 - instrument = 'PTF' - - stmt = Image.query_images(min_mjd=mjd, instrument=instrument) - found = Image.find_images(min_mjd=mjd, instrument=instrument) - results3 = session.scalars(stmt).all() - assert len(results3) == 0 - assert len(found) == 0 - - # cross filter MJD and sort by quality to get the coadd PTF image - mjd = 54926.31913 - - stmt = Image.query_images(max_mjd=mjd, order_by='quality') - found = Image.find_images(max_mjd=mjd, order_by='quality') - results4 = session.scalars(stmt).all() - assert [ i.id for i in results4 ] == [ i.id for i in found ] - assert len(results4) == 2 - assert results4[0].mjd == results4[1].mjd # same time, as one is a coadd of the other images - assert results4[0].instrument == 'PTF' - # TODO : these next two tests don't work right; see Issue #343 - # assert results4[0].type == 'ComSci' # the first one out is the high quality coadd - # assert results4[1].type == 'Sci' # the second one is the regular image - - # check that the DECam difference and new image it is based on have the same limiting magnitude and quality - stmt = Image.query_images(instrument='DECam', type=3) - diff = session.scalars(stmt).first() - stmt = Image.query_images(instrument='DECam', type=1, min_mjd=diff.mjd, max_mjd=diff.mjd) - new = session.scalars(stmt).first() - assert diff.lim_mag_estimate == new.lim_mag_estimate - assert diff.fwhm_estimate == new.fwhm_estimate - assert im_qual(diff) == im_qual(new) + total_w_calibs = session.query( Image ).count() + total = session.query( Image ).filter( Image._type.in_([1,2,3,4]) ).count() + + # try finding them all + all_images_w_calibs = Image.find_images( type=None ) + assert len(all_images_w_calibs) == total_w_calibs + + all_images = Image.find_images() + assert len(all_images) == total + + results = Image.find_images( order_by='earliest' ) + assert len(results) == total + assert all( results[i].mjd <= results[i+1].mjd for i in range(len(results)-1) ) + + results = Image.find_images( order_by='latest' ) + assert len(results) == total + assert all( results[i].mjd >= results[i+1].mjd for i in range(len(results)-1) ) + + # get only the science images + found1 = Image.find_images(type=1) + assert all(im._type == 1 for im in found1) + assert all(im.type == 'Sci' for im in found1) + assert len(found1) < total + + # get the coadd and subtraction images + found2 = Image.find_images(type=[2, 3, 4]) + assert all(im._type in [2, 3, 4] for im in found2) + assert all(im.type in ['ComSci', 'Diff', 'ComDiff'] for im in found2) + assert len(found2) < total + assert len(found1) + len(found2) == total + + # use the names of the types instead of integers, or a mixture of ints and strings + found3 = Image.find_images(type=['ComSci', 'Diff', 4]) + assert [ f._id for f in found2 ] == [ f._id for f in found3 ] + + # filter by MJD and observation date + value = 57000.0 + found1 = Image.find_images(min_mjd=value) + assert all(im.mjd >= value for im in found1) + assert all(im.instrument == 'DECam' for im in found1) + assert len(found1) < total + + found2 = Image.find_images(max_mjd=value) + assert all(im.mjd <= value for im in found2) + assert all(im.instrument == 'PTF' for im in found2) + assert len(found2) < total + assert len(found1) + len(found2) == total + + found3 = Image.find_images(min_mjd=value, max_mjd=value) + assert len(found3) == 0 + + # filter by observation date + t = Time(57000.0, format='mjd').datetime + found4 = Image.find_images(min_dateobs=t) + assert all(im.observation_time >= t for im in found4) + assert all(im.instrument == 'DECam' for im in found4) + assert set( f._id for f in found4 ) == set( f._id for f in found1 ) + assert len(found4) < total + + found5 = Image.find_images(max_dateobs=t) + assert all(im.observation_time <= t for im in found5) + assert all(im.instrument == 'PTF' for im in found5) + assert set( f._id for f in found5 ) == set( f._id for f in found2 ) + assert len(found5) < total + assert len(found4) + len(found5) == total + + # filter by images that contain this point (ELAIS-E1, chip S3) + ra = 7.449 + dec = -42.926 + found1 = Image.find_containing( ra, dec ) # note: find_containing is a FourCorners method + found1a = Image.find_images( ra=ra, dec=dec ) + assert set( i.id for i in found1 ) == set( i.id for i in found1a ) + assert all(im.instrument == 'DECam' for im in found1) + assert all(im.target == 'ELAIS-E1' for im in found1) + assert len(found1) < total + + # filter by images that contain this point (ELAIS-E1, chip N16) + ra = 7.659 + dec = -43.420 + found2 = Image.find_images(ra=ra, dec=dec) + assert all(im.instrument == 'DECam' for im in found2) + assert all(im.target == 'ELAIS-E1' for im in found2) + assert len(found2) < total + + # filter by images that contain this point (PTF field number 100014) + ra = 188.0 + dec = 4.5 + found3 = Image.find_images(ra=ra, dec=dec ) + assert all(im.instrument == 'PTF' for im in found3) + assert all(im.target == '100014' for im in found3) + assert len(found3) < total + assert len(found1) + len(found2) + len(found3) == total + + # find images that overlap + ptfdses = ptf_reference_image_datastores + found1 = Image.find_images( image=ptfdses[0].image ) + found1ids = set( f._id for f in found1 ) + assert len(found1) == 6 + assert set( d.image.id for d in ptfdses ).issubset( found1ids ) + assert ptf_ref.image_id in found1ids + + found2 = Image.find_images( minra=ptfdses[0].image.minra, + maxra=ptfdses[0].image.maxra, + mindec=ptfdses[0].image.mindec, + maxdec=ptfdses[0].image.maxdec ) + found2ids = set( f._id for f in found2 ) + assert found1ids == found2ids + + found3 = Image.find_images( image=ptfdses[0].image, overlapfrac=0.98 ) + found3ids = set( f._id for f in found3 ) + assert found3ids.issubset( found1ids ) + assert all( FourCorners.get_overlap_frac( ptfdses[0].image, f ) >= 0.98 for f in found3 ) + assert len(found3ids) == 2 + + # filter by the PTF project name + found1 = Image.find_images(project='PTF_DyC_survey') + assert all(im.project == 'PTF_DyC_survey' for im in found1) + assert all(im.instrument == 'PTF' for im in found1) + assert len(found1) < total + + # filter by the two different project names for DECam: + found2 = Image.find_images(project=['many', '2023A-716082']) + assert all(im.project in ['many', '2023A-716082'] for im in found2) + assert all(im.instrument == 'DECam' for im in found2) + assert len(found2) < total + assert len(found1) + len(found2) == total + + # filter by instrument + found1 = Image.find_images(instrument='PTF') + assert all(im.instrument == 'PTF' for im in found1) + assert len(found1) < total + + found2 = Image.find_images(instrument='DECam') + assert all(im.instrument == 'DECam' for im in found2) + assert len(found2) < total + assert len(found1) + len(found2) == total + + found3 = Image.find_images(instrument=['PTF', 'DECam']) + assert len(found3) == total + + found4 = Image.find_images(instrument=['foobar']) + assert len(found4) == 0 + + # filter by filter + found6 = Image.find_images(filter='R') + assert all(im.filter == 'R' for im in found6) + assert all(im.instrument == 'PTF' for im in found6) + assert set( f.id for f in found6 ) == set( f.id for f in found1 ) + + found7 = Image.find_images(filter='r DECam SDSS c0002 6415.0 1480.0') + assert all(im.filter == 'r DECam SDSS c0002 6415.0 1480.0' for im in found7) + assert all(im.instrument == 'DECam' for im in found7) + assert set( f.id for f in found7 ) == set( f.id for f in found2 ) + + # filter by seeing FWHM + value = 3.0 + found1 = Image.find_images(max_seeing=value) + assert all(im.instrument == 'DECam' for im in found1) + assert all(im.fwhm_estimate <= value for im in found1) + assert len(found1) < total + + found2 = Image.find_images(min_seeing=value) + assert all(im.instrument == 'PTF' for im in found2) + assert all(im.fwhm_estimate >= value for im in found2) + assert len(found2) < total + assert len(found1) + len(found2) == total + + found3 = Image.find_images(min_seeing=value, max_seeing=value) + assert len(found3) == 0 # we will never have exactly that number + + # filter by limiting magnitude + value = 21.0 + found1 = Image.find_images(min_lim_mag=value) + assert all(im.instrument == 'DECam' for im in found1) + assert all(im.lim_mag_estimate >= value for im in found1) + assert len(found1) < total + + found2 = Image.find_images(max_lim_mag=value) + assert all(im.instrument == 'PTF' for im in found2) + assert all(im.lim_mag_estimate <= value for im in found2) + assert len(found2) < total + assert len(found1) + len(found2) == total + + found3 = Image.find_images(min_lim_mag=value, max_lim_mag=value) + assert len(found3) == 0 + + # filter by background + value = 28.0 + found1 = Image.find_images(min_background=value) + assert all(im.bkg_rms_estimate >= value for im in found1) + assert len(found1) < total + + found2 = Image.find_images(max_background=value) + assert all(im.bkg_rms_estimate <= value for im in found2) + assert len(found2) < total + assert len(found1) + len(found2) == total + + found3 = Image.find_images(min_background=value, max_background=value) + assert len(found3) == 0 + + # filter by zero point + value = 28.0 + found1 = Image.find_images(min_zero_point=value) + assert all(im.zero_point_estimate >= value for im in found1) + assert len(found1) < total + + found2 = Image.find_images(max_zero_point=value) + assert all(im.zero_point_estimate <= value for im in found2) + assert len(found2) < total + assert len(found1) + len(found2) == total + + found3 = Image.find_images(min_zero_point=value, max_zero_point=value) + assert len(found3) == 0 + + # filter by exposure time + value = 60.0 + 1.0 + found1 = Image.find_images(min_exp_time=value) + assert all(im.exp_time >= value for im in found1) + assert len(found1) < total + + found2 = Image.find_images(max_exp_time=value) + assert all(im.exp_time <= value for im in found2) + assert len(found2) < total + + found3 = Image.find_images(min_exp_time=60.0, max_exp_time=60.0) + assert len(found3) == len(found2) # all those under 31s are those with exactly 30s + + # query based on airmass + value = 1.15 + total_with_airmass = len([im for im in all_images if im.airmass is not None]) + found1 = Image.find_images(max_airmass=value) + assert all(im.airmass <= value for im in found1) + assert len(found1) < total_with_airmass + + found2 = Image.find_images(min_airmass=value) + assert all(im.airmass >= value for im in found2) + assert len(found2) < total_with_airmass + assert len(found1) + len(found2) == total_with_airmass + + # order the found by quality (lim_mag - 3 * fwhm) + # note that we cannot filter by quality, it is not a meaningful number + # on its own, only as a way to compare images and find which is better. + # sort all the images by quality and get the best one + found = Image.find_images(order_by='quality') + best = found[0] + + # the best overall quality from all images + assert im_qual(best) == max([im_qual(im) for im in found]) + + # get the two best images from the PTF instrument (exp_time chooses the single images only) + found1 = Image.find_images(max_exp_time=60, order_by='quality')[:2] + assert len(found1) == 2 + assert all(im_qual(im) > 9.0 for im in found1) + + # change the seeing factor a little: + factor = 2.8 + found2 = Image.find_images(max_exp_time=60, order_by='quality', seeing_quality_factor=factor)[:2] + assert [ i.id for i in found2 ] == [ i.id for i in found1 ] + + # quality will be a little bit higher, but the images are the same + assert [ f._id for f in found2 ] == [ f._id for f in found1 ] + assert im_qual(found2[0], factor=factor) > im_qual(found1[0]) + assert im_qual(found2[1], factor=factor) > im_qual(found1[1]) + + # change the seeing factor dramatically: + factor = 0.2 + found3 = Image.find_images(max_exp_time=60, order_by='quality', seeing_quality_factor=factor)[:2] + assert [ i.id for i in found3 ] == [ i.id for i in found1 ] + + # TODO -- assumptions that went into this test aren't right, come up with + # a test case where it will actually work + # quality will be a higher, but also a different image will now have the second-best quality + # assert [ f._id for f in found3 ] != [ f._id for f in found1 ] + # assert im_qual(found3[0], factor=factor) > im_qual(found1[0]) + + # do a cross filtering of coordinates and background (should only find the PTF coadd) + ra = 188.0 + dec = 4.5 + background = 5. + + found1 = Image.find_images(ra=ra, dec=dec, max_background=background) + assert len(found1) == 1 + assert found1[0].instrument == 'PTF' + assert found1[0].type == 'ComSci' + + # cross the DECam target and section ID with the exposure time that's of the S3 ref image + target = 'ELAIS-E1' + section_id = 'S3' + exp_time = 120.0 + + found2 = Image.find_images(target=target, section_id=section_id, min_exp_time=exp_time) + assert len(found2) == 1 + assert found2[0].instrument == 'DECam' + assert found2[0].type == 'ComSci' + assert found2[0].exp_time == 150.0 + + # cross filter on MJD and instrument in a way that has no found + mjd = 55000.0 + instrument = 'PTF' + + found3 = Image.find_images(min_mjd=mjd, instrument=instrument) + assert len(found3) == 0 + + # cross filter MJD and sort by quality to get the coadd PTF image + mjd = 54926.31913 + + found4 = Image.find_images(max_mjd=mjd, order_by='quality') + assert len(found4) == 2 + assert found4[0].mjd == found4[1].mjd # same time, as one is a coadd of the other images + assert found4[0].instrument == 'PTF' + # TODO : these next two tests don't work right; see Issue #343 + # assert found4[0].type == 'ComSci' # the first one out is the high quality coadd + # assert found4[1].type == 'Sci' # the second one is the regular image + + # check that the DECam difference and new image it is based on have the same limiting magnitude and quality + # (...this check probably really belongs in a test of subtractions!) + diff = Image.find_images(instrument='DECam', type=3) + assert len(diff) == 1 + diff = diff[0] + new = Image.find_images(instrument='DECam', type=1, min_mjd=diff.mjd, max_mjd=diff.mjd) + assert len(new) == 1 + new = new[0] + assert new.id != diff.id + assert diff.lim_mag_estimate == new.lim_mag_estimate + assert diff.fwhm_estimate == new.fwhm_estimate + assert im_qual(diff) == im_qual(new) def test_image_get_upstream_images( ptf_ref, ptf_supernova_image_datastores, ptf_subtraction1_datastore ): diff --git a/util/logger.py b/util/logger.py index b6fb4b90..15731ccb 100644 --- a/util/logger.py +++ b/util/logger.py @@ -3,8 +3,8 @@ import multiprocessing import logging -# _default_log_level = logging.INFO -_default_log_level = logging.DEBUG +_default_log_level = logging.INFO +# _default_log_level = logging.DEBUG _default_datefmt = '%Y-%m-%d %H:%M:%S' # Normally you don't want to show milliseconds, because it's additional gratuitous information @@ -15,7 +15,7 @@ class SCLogger: - """Holds the logging instance that we use throught SeeChange. + """Holds the logging instance that we use throughout SeeChange. Normal use: get the logger object with SCLogger.get(), which is a stander logging logger object. Or, just call SCLogger.debug,