From 6d758d7080a6b95e7c940d320b6302177ae0cc9d Mon Sep 17 00:00:00 2001 From: Rob Knop Date: Mon, 29 Jul 2024 14:01:54 -0700 Subject: [PATCH] Adapt the webap for cutouts files. Adapt the webap to use provenance tags. WEbap tests. --- improc/tools.py | 8 +- models/provenance.py | 14 +- tests/conftest.py | 4 +- tests/webap/test_webap.py | 153 +++++++++++++- util/radec.py | 4 +- util/util.py | 2 +- webap/seechange_webap.py | 428 ++++++++++++++++++++++++++++---------- webap/static/seechange.js | 124 ++++++----- 8 files changed, 554 insertions(+), 183 deletions(-) diff --git a/improc/tools.py b/improc/tools.py index e37b8ebc..5f4c3226 100644 --- a/improc/tools.py +++ b/improc/tools.py @@ -239,10 +239,10 @@ def strip_wcs_keywords( hdr ): """ - basematch = re.compile( "^C(RVAL|RPIX|UNIT|DELT|TYPE)[12]$" ) - cdmatch = re.compile( "^CD[12]_[12]$" ) - sipmatch = re.compile( "^[AB]P?_(ORDER|(\d+)_(\d+))$" ) - tpvmatch = re.compile( "^P[CV]\d+_\d+$" ) + basematch = re.compile( r"^C(RVAL|RPIX|UNIT|DELT|TYPE)[12]$" ) + cdmatch = re.compile( r"^CD[12]_[12]$" ) + sipmatch = re.compile( r"^[AB]P?_(ORDER|(\d+)_(\d+))$" ) + tpvmatch = re.compile( r"^P[CV]\d+_\d+$" ) tonuke = set() for kw in hdr.keys(): diff --git a/models/provenance.py b/models/provenance.py index eb3a1e4f..bbd536ff 100644 --- a/models/provenance.py +++ b/models/provenance.py @@ -384,6 +384,14 @@ class ProvenanceTagExistsError(Exception): pass class ProvenanceTag(Base, AutoIDMixin): + """A human-readable tag to associate with provenances. + + A well-defined provenane tag will have a provenance defined for every step, but there will + only be a *single* provenance for each step (except for refrenceing, where there could be + multiple provenances defined). The class method validate can check this for duplicates. + + """ + __tablename__ = "provenance_tags" __table_args__ = ( UniqueConstraint( 'tag', 'provenance_id', name='_provenancetag_prov_tag_uc' ), ) @@ -437,15 +445,15 @@ def newtag( cls, tag, provs, session=None ): with SmartSession( session ) as sess: # Get all the provenance IDs we're going to insert - provids = [] + provids = set() for prov in provs: if isinstance( prov, Provenance ): - provids.append( prov.id ) + provids.add( prov.id ) elif isinstance( prov, str ): provobj = sess.get( Provenance, prov ) if provobj is None: raise ValueError( f"Unknown Provenance ID {prov}" ) - provids.append( provobj.id ) + provids.add( provobj.id ) else: raise TypeError( f"Everything in the provs list must be Provenance or str, not {type(prov)}" ) diff --git a/tests/conftest.py b/tests/conftest.py index c330742c..df900927 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -33,8 +33,8 @@ # at the end of tests. In general, we want this to be True, so we can make sure # that our tests are properly cleaning up after themselves. However, the errors # from this can hide other errors and failures, so when debugging, set it to False. -# verify_archive_database_empty = True -verify_archive_database_empty = False +verify_archive_database_empty = True +# verify_archive_database_empty = False pytest_plugins = [ diff --git a/tests/webap/test_webap.py b/tests/webap/test_webap.py index 6ca8aec4..2a376047 100644 --- a/tests/webap/test_webap.py +++ b/tests/webap/test_webap.py @@ -1,20 +1,157 @@ +import re +import time import pytest +import sqlalchemy as sa + import selenium import selenium.webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions from selenium.webdriver.support.wait import WebDriverWait +from selenium.webdriver.support.ui import Select from selenium.webdriver.remote.webelement import WebElement +from models.base import SmartSession +from models.provenance import CodeVersion, Provenance, ProvenanceTag + +from util.logger import SCLogger + def test_webap( browser, webap_url, decam_datastore ): - import pdb; pdb.set_trace() + ds = decam_datastore + junkprov = None + + try: + # Create a new provenance tag, tagging the provenances that are in decam_datastore + ProvenanceTag.newtag( 'test_webap', + [ ds.exposure.provenance, + ds.image.provenance, + ds.sources.provenance, + ds.reference.provenance, + ds.sub_image.provenance, + ds.detections.provenance, + ds.cutouts.provenance, + ds.measurements[0].provenance ] ) + + # Create a throwaway provenance and provenance tag so we can test + # things *not* being found + with SmartSession() as session: + cv = session.query( CodeVersion ).first() + junkprov = Provenance( process='no_process', code_version=cv, is_testing=True ) + session.add( junkprov ) + session.commit() + ProvenanceTag.newtag( 'no_such_tag', [ junkprov ] ) + + browser.get( webap_url ) + WebDriverWait( browser, timeout=10 ).until( + lambda d: d.find_element(By.ID, 'seechange_context_render_page_complete' ) ) + + # The "test_webap" option in the provtag_wid select widget won't necessarily + # be there immediately, because it's filled in with a callback from a web request + tries = 5 + while ( tries > 0 ): + provtag_wid = browser.find_element( By.ID, "provtag_wid" ) + options = provtag_wid.find_elements( By.TAG_NAME, "option" ) + if any( [ o.text == 'test_webap' for o in options ] ): + break + tries -= 1 + if tries < 0: + assert False, "Failed to find the test_webap option in the provenances select widget" + else: + SCLogger.debug( "Didn't find test_webap in the provtag_wid select, sleeping 1s and retrying" ) + time.sleep( 1 ) + + buttons = browser.find_elements( By.XPATH, "//input[@type='button']" ) + buttons = { b.get_attribute("value") : b for b in buttons } + + # Make sure we get no exposures if we ask for the junk tag + + select = Select( provtag_wid ) + select.select_by_visible_text( 'no_such_tag' ) + buttons['Show Exposures'].click() + + WebDriverWait( browser, timeout=10 ).until( + lambda d: d.find_element( By.XPATH, "//h2[contains(.,'Exposures from')]" ) ) + + # Make sure that the "Exposure List" div is what's shown + # WARNING -- this absolute xpath might change if the page layout is changed! + tabcontentdiv = browser.find_element( By.XPATH, "html/body/div/div/div/div/div/div/div[2]" ) + assert tabcontentdiv.text[:15] == 'Exposures from ' + explist = browser.find_element( By.ID, "exposure_list_table" ) + rows = explist.find_elements( By.TAG_NAME, "tr" ) + assert len(rows) == 1 # Just the header row + + # Now ask for the test_webap tag, see if we get the one exposure we expect + + select.select_by_visible_text( 'test_webap' ) + buttons['Show Exposures'].click() + # Give it half a second to go at least get to the "loading" screen; that's + # all javascript with no server communcation, so should be fast. + time.sleep( 0.5 ) + WebDriverWait( browser, timeout=10 ).until( + lambda d: d.find_element( By.XPATH, "//h2[contains(.,'Exposures from')]" ) ) + + tabcontentdiv = browser.find_element( By.XPATH, "html/body/div/div/div/div/div/div/div[2]" ) + assert tabcontentdiv.text[:15] == 'Exposures from ' + explist = browser.find_element( By.ID, "exposure_list_table" ) + rows = explist.find_elements( By.TAG_NAME, "tr" ) + assert len(rows) == 2 # Just the header row + + cols = rows[1].find_elements( By.XPATH, "./*" ) + assert cols[0].text == 'c4d_230702_080904_ori.fits.fz' + assert cols[2].text == 'ELAIS-E1' + assert cols[5].text == '1' # n_images + assert cols[6].text == '172' # detections + assert cols[7].text == '6' # sources + + # Try to click on the exposure name, make sure we get the exposure details + expnamelink = cols[0].find_element( By.TAG_NAME, 'a' ) + expnamelink.click() + WebDriverWait( browser, timeout=10 ).until( + lambda d: d.find_element( By.XPATH, "//h2[contains(.,'Exposure c4d_230702_080904_ori.fits.fz')]" ) ) + + # OMG I nest a lot of divs + tabcontentdiv = browser.find_element( By.XPATH, "html/body/div/div/div/div/div/div/div[2]" ) + imagesdiv = tabcontentdiv.find_element( By.XPATH, "./div/div/div/div[2]/div" ) + assert re.search( r"^Exposure has 1 images and 1 completed subtractions.*" + r"6 out of 172 detections pass preliminary cuts", + imagesdiv.text, re.DOTALL ) is not None + + + imagestab = imagesdiv.find_element( By.TAG_NAME, 'table' ) + rows = imagestab.find_elements( By.TAG_NAME, 'tr' ) + assert len(rows) == 2 + cols = rows[1].find_elements( By.XPATH, "./*" ) + assert re.search( r'^c4d_20230702_080904_S3_r_Sci', cols[1].text ) is not None + + # Find the sources tab and click on that + tabbuttonsdiv = tabcontentdiv.find_element( By.XPATH, "./div/div/div/div[1]" ) + sourcestab = tabbuttonsdiv.find_element( By.XPATH, "//.[.='Sources']" ) + sourcestab.click() + # Give it half a second to go at least get to the "loading" screen; that's + # all javascript with no server communcation, so should be fast. + time.sleep( 0.5 ) + WebDriverWait( browser, timeout=10 ).until( + lambda d: d.find_element( By.XPATH, "//p[contains(.,'Sources for all successfully completed chips')]" ) ) + + # Now imagesdiv should have information about the sources + tabcontentdiv = browser.find_element( By.XPATH, "html/body/div/div/div/div/div/div/div[2]" ) + imagesdiv = tabcontentdiv.find_element( By.XPATH, "./div/div/div/div[2]/div" ) + + sourcestab = imagesdiv.find_element( By.TAG_NAME, 'table' ) + rows = sourcestab.find_elements( By.TAG_NAME, 'tr' ) + assert len(rows) == 7 + # check stuff about the rows? + + # There is probably more we should be testing here. Definitely. + + finally: + # Clean up the junk Provenance, and the ProvenanceTags we created + with SmartSession() as session: + session.execute( sa.text( "DELETE FROM provenance_tags " + "WHERE tag IN ('test_webap', 'no_such_tag')" ) ) + if junkprov is not None: + session.delete( junkprov ) + session.commit() - browser.get( webap_url ) - WebDriverWait( browser, timeout=10 ).until( - lambda d: d.find_element(By.ID, 'seechange_context_render_page_complete' ) ) - - - pass - diff --git a/util/radec.py b/util/radec.py index 9740e352..11f07b69 100644 --- a/util/radec.py +++ b/util/radec.py @@ -5,8 +5,8 @@ from astropy.coordinates import SkyCoord, BarycentricTrueEcliptic import astropy.units as u -_radecparse = re.compile( '^ *(?P[\-\+])? *(?P[0-9]{1,2}): *(?P[0-9]{1,2}):' - ' *(?P[0-9]{1,2}(\.[0-9]*)?) *$' ) +_radecparse = re.compile( r'^ *(?P[\-\+])? *(?P[0-9]{1,2}): *(?P[0-9]{1,2}):' + r' *(?P[0-9]{1,2}(\.[0-9]*)?) *$' ) def parse_sexigesimal_degrees( strval, hours=False, positive=None ): diff --git a/util/util.py b/util/util.py index dcaeb24d..979eb6c1 100644 --- a/util/util.py +++ b/util/util.py @@ -453,7 +453,7 @@ def as_datetime( string ): --------- string : str or datetime.datetime The string to convert. If a datetime.datetime, the return - value is just this. If none or an empty string ("^\s*$"), will + value is just this. If none or an empty string ("^\\s*$"), will return None. Otherwise, must be a string that dateutil.parser.parse can handle. diff --git a/webap/seechange_webap.py b/webap/seechange_webap.py index 048a8048..cce670eb 100644 --- a/webap/seechange_webap.py +++ b/webap/seechange_webap.py @@ -73,8 +73,8 @@ def provtags(): try: conn = next( dbconn() ) cursor = conn.cursor() - cursor.execute( 'SELECT DISCTINCT ON(tag) tag FROM provenance_tags ORDER BY tag' ) - return { 'status': ok, + cursor.execute( 'SELECT DISTINCT ON(tag) tag FROM provenance_tags ORDER BY tag' ) + return { 'status': 'ok', 'provenance_tags': [ row[0] for row in cursor.fetchall() ] } except Exception as ex: @@ -102,30 +102,67 @@ def exposures(): conn = next( dbconn() ) cursor = conn.cursor() - # TODO : deal with provenance! - # (We need some kind of provenance tagging table, so that the user can specify - # a user-readable name (e.g. "default", "latest", "dr1", whatever) that specifies - # a set of provenances to search. One of these names must be all the provenances - # we're using "right now" in the active pipeline; that will be the one that - # (by default) the webap uses. - q = ( 'SELECT m.id, m.filepath, m.mjd, m.target, m.filter, m.filter_array, m.exp_time, ' - ' m.n_images, m.n_cutouts, m.n_measurements, ' - ' SUM( CASE WHEN r.success THEN 1 ELSE 0 END ) AS n_successim, ' - ' SUM( CASE WHEN r.error_message IS NOT NULL THEN 1 ELSE 0 END ) as n_errors ' - 'FROM ( ' - ' SELECT e.id, e.filepath, e.mjd, e.target, e.filter, e.filter_array, e.exp_time, ' - ' COUNT(DISTINCT(i.id)) AS n_images, COUNT(c.id) AS n_cutouts, COUNT(m.id) AS n_measurements ' - ' FROM exposures e ' - ' LEFT JOIN images i ON i.exposure_id=e.id ' - ' LEFT JOIN image_upstreams_association ias ON ias.upstream_id=i.id ' - ' LEFT JOIN images s ON s.id = ias.downstream_id AND s.is_sub ' - ' LEFT JOIN source_lists sl ON sl.image_id=s.id ' - ' LEFT JOIN cutouts c ON c.sources_id=sl.id ' - ' LEFT JOIN measurements m ON m.cutouts_id=c.id ' - ' LEFT JOIN reports r ON r.exposure_id=e.id ' ) + + # Gonna do this in three steps. First, get all the images with + # counts of source lists and counts of measurements in a temp + # table, then do the sums and things on that temp table. + # Filtering on provenance tags makes this more complicated, so + # we'll do a different query if we're doing that. Truthfully, + # asking for all provenance tags is going to be a mess for the + # user.... perhaps we should disable it? + haveputinwhere = False subdict = {} + if data['provenancetag'] is None: + q = ( 'SELECT e.id, e.filepath, e.mjd, e.target, e.filter, e.filter_array, e.exp_time, ' + ' i.id AS imgid, s.id AS subid, sl.id AS slid, sl.num_sources, ' + ' COUNT(m.id) AS num_measurements ' + 'INTO TEMP TABLE temp_imgs ' + 'FROM exposures e ' + 'LEFT JOIN images i ON i.exposure_id=e.id ' + 'LEFT JOIN ( ' + ' SELECT su.id, ias.upstream_id ' + ' FROM images su ' + ' INNER JOIN image_upstreams_association ias ON ias.downstream_id=su.id ' + ' WHERE su.is_sub ' + ') s ON s.upstream_id=i.id ' + 'LEFT JOIN source_lists sl ON sl.image_id=s.id ' + 'LEFT JOIN cutouts cu ON cu.sources_id=sl.id ' + 'LEFT JOIN measurements m ON m.cutouts_id=cu.id ' + 'GROUP BY e.id, i.id, s.id, sl.id ' + ) + else: + q = ( 'SELECT e.id, e.filepath, e.mjd, e.target, e.filter, e.filter_array, e.exp_time, ' + ' i.id AS imgid, s.id AS subid, sl.id AS slid, sl.num_sources, ' + ' COUNT(m.id) AS num_measurements ' + 'INTO TEMP TABLE temp_imgs ' + 'FROM exposures e ' + 'LEFT JOIN ( ' + ' SELECT im.id, im.exposure_id FROM images im ' + ' INNER JOIN provenance_tags impt ON impt.provenance_id=im.provenance_id AND impt.tag=%(provtag)s ' + ') i ON i.exposure_id=e.id ' + 'LEFT JOIN ( ' + ' SELECT su.id, ias.upstream_id FROM images su ' + ' INNER JOIN image_upstreams_association ias ON ias.downstream_id=su.id AND su.is_sub ' + ' INNER JOIN provenance_tags supt ON supt.provenance_id=su.provenance_id AND supt.tag=%(provtag)s ' + ') s ON s.upstream_id=i.id ' + 'LEFT JOIN ( ' + ' SELECT sli.id, sli.image_id, sli.num_sources FROM source_lists sli ' + ' INNER JOIN provenance_tags slpt ON slpt.provenance_id=sli.provenance_id AND slpt.tag=%(provtag)s ' + ') sl ON sl.image_id=s.id ' + 'LEFT JOIN ( ' + ' SELECT cu.id, cu.sources_id FROM cutouts cu ' + ' INNER JOIN provenance_tags cupt ON cu.provenance_id=cupt.provenance_id AND cupt.tag=%(provtag)s ' + ') c ON c.sources_id=sl.id ' + 'LEFT JOIN ( ' + ' SELECT meas.id, meas.cutouts_id FROM measurements meas ' + ' INNER JOIN provenance_tags mept ON mept.provenance_id=meas.provenance_id AND mept.tag=%(provtag)s ' + ') m ON m.cutouts_id=c.id ' + 'INNER JOIN provenance_tags ept ON ept.provenance_id=e.provenance_id AND ept.tag=%(provtag)s ' + 'GROUP BY e.id, i.id, s.id, sl.id, sl.num_sources ' + ) + subdict['provtag'] = data['provenancetag'] if ( t0 is not None ) or ( t1 is not None ): - q += " WHERE " + q += 'WHERE ' if t0 is not None: q += 'e.mjd >= %(t0)s' subdict['t0'] = t0 @@ -133,15 +170,46 @@ def exposures(): if t0 is not None: q += ' AND ' q += 'e.mjd <= %(t1)s' subdict['t1'] = t1 - q += ( ' GROUP BY e.id ' # ,e.filepath,e.mjd,e.target,e.filter,e.filter_array,e.exp_time ' - ' ORDER BY e.mjd, e.filter, e.filter_array ' ) - - q += ( ') m ' - 'LEFT JOIN reports r ON m.id=r.exposure_id ' - 'GROUP BY m.id, m.filepath, m.mjd, m.target, m.filter, m.filter_array, m.exp_time, ' - ' m.n_images, m.n_cutouts, m.n_measurements ' ) cursor.execute( q, subdict ) + + # Now run a second query to count and sum those things + # These numbers will be wrong (double-counts) if not filtering on a provenance tag, or if the + # provenance tag includes multiple provenances for a given step! + q = ( 'SELECT t.id, t.filepath, t.mjd, t.target, t.filter, t.filter_array, t.exp_time, ' + ' COUNT(t.subid) AS num_subs, SUM(t.num_sources) AS num_sources, ' + ' SUM(t.num_measurements) AS num_measurements ' + 'INTO TEMP TABLE temp_imgs_2 ' + 'FROM temp_imgs t ' + 'GROUP BY t.id, t.filepath, t.mjd, t.target, t.filter, t.filter_array, t.exp_time ' + ) + + cursor.execute( q ) + + # Run a third query count reports + subdict = {} + q = ( 'SELECT t.id, t.filepath, t.mjd, t.target, t.filter, t.filter_array, t.exp_time, ' + ' t.num_subs, t.num_sources, t.num_measurements, ' + ' SUM( CASE WHEN r.success THEN 1 ELSE 0 END ) as n_successim, ' + ' SUM( CASE WHEN r.error_message IS NOT NULL THEN 1 ELSE 0 END ) AS n_errors ' + 'FROM temp_imgs_2 t ' + ) + if data['provenancetag'] is None: + q += 'LEFT JOIN reports r ON r.exposure_id=t.id ' + else: + q += ( 'LEFT JOIN ( ' + ' SELECT re.exposure_id, re.success, re.error_message ' + ' FROM reports re ' + ' INNER JOIN provenance_tags rept ON rept.provenance_id=re.provenance_id AND rept.tag=%(provtag)s ' + ') r ON r.exposure_id=t.id ' + ) + subdict['provtag'] = data['provenancetag'] + # I wonder if making a primary key on the temp table would be more efficient than + # all these columns in GROUP BY? Investigate this. + q += ( 'GROUP BY t.id, t.filepath, t.mjd, t.target, t.filter, t.filter_array, t.exp_time, ' + ' t.num_subs, t.num_sources, t.num_measurements ' ) + + cursor.execute( q, subdict ) columns = { cursor.description[i][0]: i for i in range(len(cursor.description)) } ids = [] @@ -150,9 +218,9 @@ def exposures(): target = [] filtername = [] exp_time = [] - n_images = [] - n_cutouts = [] + n_subs = [] n_sources = [] + n_measurements = [] n_successim = [] n_errors = [] @@ -170,15 +238,16 @@ def exposures(): f"filter_array={row[columns['filter_array']]} type {row[columns['filter_array']]}" ) filtername.append( row[columns['filter']] ) exp_time.append( row[columns['exp_time']] ) - n_images.append( row[columns['n_images']] ) - n_cutouts.append( row[columns['n_cutouts']] ) - n_sources.append( row[columns['n_measurements']] ) + n_subs.append( row[columns['num_subs']] ) + n_sources.append( row[columns['num_sources']] ) + n_measurements.append( row[columns['num_measurements']] ) n_successim.append( row[columns['n_successim']] ) n_errors.append( row[columns['n_errors']] ) return { 'status': 'ok', 'startdate': t0, 'enddate': t1, + 'provenance_tag': data['provenancetag'], 'exposures': { 'id': ids, 'name': name, @@ -186,9 +255,9 @@ def exposures(): 'target': target, 'filter': filtername, 'exp_time': exp_time, - 'n_images': n_images, - 'n_cutouts': n_cutouts, + 'n_subs': n_subs, 'n_sources': n_sources, + 'n_measurements': n_measurements, 'n_successim': n_successim, 'n_errors': n_errors, } @@ -204,47 +273,117 @@ def exposures(): # ********************************************************************** -@app.route( "/exposure_images/", methods=['GET', 'POST'], strict_slashes=False ) -def exposure_images( expid ): +@app.route( "/exposure_images//", methods=['GET', 'POST'], strict_slashes=False ) +def exposure_images( expid, provtag ): try: conn = next( dbconn() ) cursor = conn.cursor() - # TODO : deal with provenance! - q = ( 'SELECT i.id, i.filepath, i.ra, i.dec, i.gallat, i.section_id, i.fwhm_estimate, ' - ' i.zero_point_estimate, i.lim_mag_estimate, i.bkg_mean_estimate, i.bkg_rms_estimate, ' - ' s.id AS subid, COUNT(c.id) AS numcutouts, COUNT(m.id) AS nummeasurements, ' - ' r.error_step, r.error_type, r.error_message, r.warnings, ' - ' r.process_memory, r.process_runtime, r.progress_steps_bitflag, r.products_exist_bitflag ' + + # Going to do this in a few steps again. Might be able to write one + # bigass query, but it's probably more efficient to use temp tables. + # Easier to build the queries that way too. + + subdict = { 'expid': int(expid), 'provtag': provtag } + + # Step 1: collect image info into temp_exposure_images + q = ( 'SELECT i.id, i.filepath, i.ra, i.dec, i.gallat, i.exposure_id, i.section_id, i.fwhm_estimate, ' + ' i.zero_point_estimate, i.lim_mag_estimate, i.bkg_mean_estimate, i.bkg_rms_estimate ' + 'INTO TEMP TABLE temp_exposure_images ' 'FROM images i ' - 'LEFT JOIN image_upstreams_association ias ON ias.upstream_id=i.id ' - 'LEFT JOIN images s ON s.id = ias.downstream_id AND s.is_sub ' - 'LEFT JOIN source_lists sl ON sl.image_id=s.id ' - 'LEFT JOIN cutouts c ON c.sources_id=sl.id ' - 'LEFT JOIN measurements m ON c.id=m.cutouts_id ' - 'LEFT JOIN reports r ON r.exposure_id=i.exposure_id AND r.section_id=i.section_id ' - 'WHERE i.is_sub=false AND i.exposure_id=%(expid)s ' - 'GROUP BY i.id,s.id,r.id ' - 'ORDER BY i.section_id,s.id ' ) - app.logger.debug( f"Getting images for exposure {expid}; query = {cursor.mogrify(q, {'expid': int(expid)})}" ) - cursor.execute( q, { 'expid': int(expid) } ) + 'INNER JOIN provenance_tags ipt ON ipt.provenance_id=i.provenance_id ' + 'WHERE i.exposure_id=%(expid)s ' + ' AND ipt.tag=%(provtag)s ' + ) + # app.logger.debug( f"exposure_images finding images; query: {cursor.mogrify(q,subdict)}" ) + cursor.execute( q, subdict ) + cursor.execute( "ALTER TABLE temp_exposure_images ADD PRIMARY KEY(id)" ) + # **** + # cursor.execute( "SELECT COUNT(*) FROM temp_exposure_images" ) + # app.logger.debug( f"Got {cursor.fetchone()[0]} images" ) + # **** + + # Step 2: count measurements by joining temp_exposure_images to many things. + q = ( 'SELECT i.id, s.id AS subid, sl.num_sources AS numsources, COUNT(m.id) AS nummeasurements ' + 'INTO TEMP TABLE temp_exposure_images_counts ' + 'FROM temp_exposure_images i ' + 'INNER JOIN image_upstreams_association ias ON ias.upstream_id=i.id ' + 'INNER JOIN images s ON s.is_sub AND s.id=ias.downstream_id ' + 'INNER JOIN provenance_tags spt ON spt.provenance_id=s.provenance_id AND spt.tag=%(provtag)s ' + 'LEFT JOIN ( ' + ' SELECT sli.id, sli.image_id, sli.num_sources FROM source_lists sli ' + ' INNER JOIN provenance_tags slpt ON slpt.provenance_id=sli.provenance_id AND slpt.tag=%(provtag)s ' + ') sl ON sl.image_id=s.id ' + 'LEFT JOIN (' + ' SELECT cu.id, cu.sources_id FROM cutouts cu ' + ' INNER JOIN provenance_tags cupt ON cupt.provenance_id=cu.provenance_id AND cupt.tag=%(provtag)s ' + ') c ON c.sources_id=sl.id ' + 'LEFT JOIN (' + ' SELECT me.id, me.cutouts_id FROM measurements me ' + ' INNER JOIN provenance_tags mept ON mept.provenance_id=me.provenance_id AND mept.tag=%(provtag)s ' + ') m ON m.cutouts_id=c.id ' + 'GROUP BY i.id, s.id, sl.num_sources ' + ) + # app.logger.debug( f"exposure_images counting sources: query {cursor.mogrify(q,subdict)}" ) + cursor.execute( q, subdict ) + # We will get an error here if there are multiple rows for a given image. + # (Which is good; there shouldn't be multiple rows! There should only be + # one (e.g.) source list child of the image for a given provenance tag, etc.) + cursor.execute( "ALTER TABLE temp_exposure_images_counts ADD PRIMARY KEY(id)" ) + # **** + # cursor.execute( "SELECT COUNT(*) FROM temp_exposure_images_counts" ) + # app.logger.debug( f"Got {cursor.fetchone()[0]} rows with counts" ) + # **** + + # Step 3: join to the report table. This one is probably mergeable with step 1. + q = ( 'SELECT i.id, r.error_step, r.error_type, r.error_message, r.warnings, ' + ' r.process_memory, r.process_runtime, r.progress_steps_bitflag, r.products_exist_bitflag ' + 'INTO TEMP TABLE temp_exposure_images_reports ' + 'FROM temp_exposure_images i ' + 'INNER JOIN ( ' + ' SELECT re.exposure_id, re.section_id, ' + ' re.error_step, re.error_type, re.error_message, re.warnings, ' + ' re.process_memory, re.process_runtime, re.progress_steps_bitflag, re.products_exist_bitflag ' + ' FROM reports re ' + ' INNER JOIN provenance_tags rept ON rept.provenance_id=re.provenance_id AND rept.tag=%(provtag)s ' + ') r ON r.exposure_id=i.exposure_id AND r.section_id=i.section_id ' + ) + # app.logger.debug( f"exposure_images getting reports; query {cursor.mogrify(q,subdict)}" ) + cursor.execute( q, subdict ) + # Again, we will get an error here if there are multiple rows for a given image + cursor.execute( "ALTER TABLE temp_exposure_images_reports ADD PRIMARY KEY(id)" ) + # **** + # cursor.execute( "SELECT COUNT(*) FROM temp_exposure_images_reports" ) + # app.logger.debug( f"Got {cursor.fetchone()[0]} rows with reports" ) + # **** + + cursor.execute( "SELECT t1.*, t2.*, t3.* " + "FROM temp_exposure_images t1 " + "LEFT JOIN temp_exposure_images_counts t2 ON t1.id=t2.id " + "LEFT JOIN temp_exposure_images_reports t3 ON t1.id=t3.id " + "ORDER BY t1.section_id" ) columns = { cursor.description[i][0]: i for i in range(len(cursor.description)) } - app.logger.debug( f"Got {len(columns)} columns, {cursor.rowcount} rows" ) + rows = cursor.fetchall() + # app.logger.debug( f"exposure_images got {len(rows)} rows from the final query." ) fields = ( 'id', 'ra', 'dec', 'gallat', 'section_id', 'fwhm_estimate', 'zero_point_estimate', 'lim_mag_estimate', 'bkg_mean_estimate', 'bkg_rms_estimate', - 'numcutouts', 'nummeasurements', 'subid', + 'numsources', 'nummeasurements', 'subid', 'error_step', 'error_type', 'error_message', 'warnings', 'process_memory', 'process_runtime', 'progress_steps_bitflag', 'products_exist_bitflag' ) - retval = { 'status': 'ok', 'name': [] } + retval = { 'status': 'ok', + 'provenancetag': provtag, + 'name': [] } + for field in fields : retval[ field ] = [] lastimg = -1 + multiples = set() slashre = re.compile( '^.*/([^/]+)$' ) - for row in cursor.fetchall(): + for row in rows: if row[columns['id']] == lastimg: - app.logger.warning( f'Multiple subtractions for image {lastimg}, need to deal with provenance!' ) + multiples.add( row[columns['id']] ) continue lastimg = row[columns['id']] @@ -253,6 +392,13 @@ def exposure_images( expid ): for field in fields: retval[field].append( row[columns[field]] ) + if len(multiples) != 0: + return { 'status': 'error', + 'error': ( f'Some images had multiple rows in the query; this probably indicates ' + f'that the reports table is not well-formed. Or maybe something else. ' + f'offending images: {multiples}' ) } + + app.logger.debug( f"exposure_images returning {retval}" ) return retval except Exception as ex: @@ -262,20 +408,21 @@ def exposure_images( expid ): # ********************************************************************** -@app.route( "/png_cutouts_for_sub_image///", +@app.route( "/png_cutouts_for_sub_image////", methods=['GET', 'POST'], strict_slashes=False ) -@app.route( "/png_cutouts_for_sub_image////", +@app.route( "/png_cutouts_for_sub_image/////", methods=['GET', 'POST'], strict_slashes=False ) -@app.route( "/png_cutouts_for_sub_image/////", +@app.route( "/png_cutouts_for_sub_image/////" + "/", methods=['GET', 'POST'], strict_slashes=False ) -def png_cutouts_for_sub_image( exporsubid, issubid, nomeas, limit=None, offset=0 ): +def png_cutouts_for_sub_image( exporsubid, provtag, issubid, nomeas, limit=None, offset=0 ): try: data = { 'sortby': 'fluxdesc_chip_index' } if flask.request.is_json: data.update( flask.request.json ) app.logger.debug( f"Processing {flask.request.url}" ) - if nomeas: + if issubid: app.logger.debug( f"Looking for cutouts from subid {exporsubid} ({'with' if nomeas else 'without'} " f"missing-measurements)" ) else: @@ -284,7 +431,6 @@ def png_cutouts_for_sub_image( exporsubid, issubid, nomeas, limit=None, offset=0 conn = next( dbconn() ) cursor = conn.cursor() - # TODO : deal with provenance! # TODO : r/b and sorting # Figure out the subids, zeropoints, backgrounds, and apertures we need @@ -300,23 +446,36 @@ def png_cutouts_for_sub_image( exporsubid, issubid, nomeas, limit=None, offset=0 q = ( 'SELECT s.id AS subid, z.zp, z.dzp, z.aper_cor_radii, z.aper_cors, ' ' i.id AS imageid, i.bkg_mean_estimate ' 'FROM images s ' - 'INNER JOIN image_upstreams_association ias ON ias.downstream_id=s.id ' - ' AND s.ref_image_id != ias.upstream_id ' - 'INNER JOIN images i ON ias.upstream_id=i.id ' - 'INNER JOIN source_lists sl ON sl.image_id=i.id ' - 'INNER JOIN zero_points z ON sl.id=z.sources_id ' ) + ) + if not issubid: + # If we got an exposure id, make sure only to get subtractions of the requested provenance + q += 'INNER JOIN provenance_tags spt ON s.provenance_id=spt.provenance_id AND spt.tag=%(provtag)s ' + q += ( 'INNER JOIN image_upstreams_association ias ON ias.downstream_id=s.id ' + ' AND s.ref_image_id != ias.upstream_id ' + 'INNER JOIN images i ON ias.upstream_id=i.id ' + 'INNER JOIN source_lists sl ON sl.image_id=i.id ' + 'INNER JOIN provenance_tags slpt ON sl.provenance_id=slpt.provenance_id AND slpt.tag=%(provtag)s ' + 'INNER JOIN zero_points z ON sl.id=z.sources_id ' ) + # (Don't need to check provenance tag of zeropoint since we have a + # 1:1 relationship between zeropoints and source lists. Don't need + # to check image provenance, because there will be a single image id + # upstream of each sub id. + if issubid: q += 'WHERE s.id=%(subid)s ' - cursor.execute( q, { 'subid': exporsubid } ) + cursor.execute( q, { 'subid': exporsubid, 'provtag': provtag } ) cols = { cursor.description[i][0]: i for i in range(len(cursor.description)) } rows = cursor.fetchall() if len(rows) > 1: - app.logger.warning( f"Multiple zeropoints for subid {exporsubid}, deal with provenance" ) + app.logger.error( f"Multiple rows for subid {exporsubid}, provenance tag {provtag} " + f"is not well-defined, or something else is wrong." ) + return { 'status': 'error', + 'error': ( f"Multiple rows for subid {exporsubid}, provenance tag {provtag} " + f"is not well-defined, or something else is wrong." ) } if len(rows) == 0: app.logger.error( f"Couldn't find a zeropoint for subid {exporsubid}" ) - zp = -99 - dzp = -99 - imageid = -99 + return { 'status': 'error', + 'error': f"Coudn't find zeropoint for subid {exporsubid}" } subids.append( exporsubid ) zps[exporsubid] = rows[0][cols['zp']] dzps[exporsubid] = rows[0][cols['dzp']] @@ -328,13 +487,16 @@ def png_cutouts_for_sub_image( exporsubid, issubid, nomeas, limit=None, offset=0 else: q += ( 'INNER JOIN exposures e ON i.exposure_id=e.id ' 'WHERE e.id=%(expid)s ORDER BY i.section_id ' ) - cursor.execute( q, { 'expid': exporsubid } ) + # Don't need to verify provenance here, because there's just going to be one expid! + cursor.execute( q, { 'expid': exporsubid, 'provtag': provtag } ) cols = { cursor.description[i][0]: i for i in range(len(cursor.description)) } rows = cursor.fetchall() for row in rows: subid = row[cols['subid']] if ( subid in subids ): - app.logger.warning( f"subid {subid} showed up more than once in zp qury, deal with provenance" ) + app.logger.error( f"subid {subid} showed up more than once in zp query" ) + return { 'status': 'error', + 'error': f"subid {subid} showed up more than once in zp query" } subids.append( subid ) zps[subid] = row[cols['zp']] dzps[subid] = row[cols['dzp']] @@ -342,45 +504,64 @@ def png_cutouts_for_sub_image( exporsubid, issubid, nomeas, limit=None, offset=0 newbkgs[subid] = row[cols['bkg_mean_estimate']] aperradses[subid] = row[cols['aper_cor_radii']] apercorses[subid] = row[cols['aper_cors']] + app.logger.debug( f'Got {len(subids)} subtractions.' ) - app.logger.debug( f"Getting cutouts for sub images {subids}" ) - q = ( 'SELECT c.id AS id, c.filepath, c.ra, c.dec, c.x, c.y, c.index_in_sources, m.best_aperture, ' - ' m.flux, m.dflux, m.name, m.is_test, m.is_fake, ' - ' m.ra AS measra, m.dec AS measdec, s.id AS subid, s.section_id ' + app.logger.debug( f"Getting cutouts files for sub images {subids}" ) + q = ( 'SELECT c.filepath,s.id AS subimageid,sl.filepath AS sources_path ' + 'FROM cutouts c ' + 'INNER JOIN provenance_tags cpt ON cpt.provenance_id=c.provenance_id AND cpt.tag=%(provtag)s ' + 'INNER JOIN source_lists sl ON c.sources_id=sl.id ' + 'INNER JOIN images s ON sl.image_id=s.id ' + 'WHERE s.id IN %(subids)s ' ) + # Don't have to check the source_lists provenance tag because the cutouts provenance + # tag cut will limit us to a single source_list for each cutouts + cursor.execute( q, { 'subids': tuple(subids), 'provtag': provtag } ) + cols = { cursor.description[i][0]: i for i in range(len(cursor.description)) } + rows = cursor.fetchall() + cutoutsfiles = { c[cols['subimageid']]: c[cols['filepath']] for c in rows } + sourcesfiles = { c[cols['subimageid']]: c[cols['sources_path']] for c in rows } + app.logger.debug( f"Got: {cutoutsfiles}" ) + + app.logger.debug( f"Getting measurements for sub images {subids}" ) + q = ( 'SELECT m.ra AS measra, m.dec AS measdec, m.index_in_sources, m.best_aperture, ' + ' m.flux, m.dflux, m.psfflux, m.dpsfflux, m.is_bad, m.name, m.is_test, m.is_fake, ' + ' s.id AS subid, s.section_id ' 'FROM cutouts c ' + 'INNER JOIN provenance_tags cpt ON cpt.provenance_id=c.provenance_id AND cpt.tag=%(provtag)s ' 'INNER JOIN source_lists sl ON c.sources_id=sl.id ' 'INNER JOIN images s ON sl.image_id=s.id ' 'LEFT JOIN ' - ' ( SELECT meas.cutouts_id AS meascutid, meas.ra, meas.dec, meas.best_aperture, ' - ' meas.flux_apertures[meas.best_aperture+1] AS flux, ' - ' meas.flux_apertures_err[meas.best_aperture+1] AS dflux, obj.name, obj.is_test, obj.is_fake ' + ' ( SELECT meas.cutouts_id AS meascutid, meas.index_in_sources, meas.ra, meas.dec, meas.is_bad, ' + ' meas.best_aperture, meas.flux_apertures[meas.best_aperture+1] AS flux, ' + ' meas.flux_apertures_err[meas.best_aperture+1] AS dflux, ' + ' meas.flux_psf AS psfflux, meas.flux_psf_err AS dpsfflux, ' + ' obj.name, obj.is_test, obj.is_fake ' ' FROM measurements meas ' - ' INNER JOIN objects obj ON meas.object_id=obj.id ' - ' ) AS m ON m.meascutid=c.id ' - 'WHERE s.id IN %(subids)s ' ) + ' INNER JOIN provenance_tags mpt ON meas.provenance_id=mpt.provenance_id AND mpt.tag=%(provtag)s ' + ' INNER JOIN objects obj ON meas.object_id=obj.id ' ) if not nomeas: - q += "AND m.best_aperture IS NOT NULL " + q += ' WHERE NOT meas.is_bad ' + q += ( ' ) AS m ON m.meascutid=c.id ' + 'WHERE s.id IN %(subids)s ' ) if data['sortby'] == 'fluxdesc_chip_index': - q += 'ORDER BY flux DESC NULLS LAST,s.section_id,c.index_in_sources ' + q += 'ORDER BY flux DESC NULLS LAST,s.section_id,m.index_in_sources ' else: raise RuntimeError( f"Unknown sort criterion {data['sortby']}" ) if limit is not None: q += 'LIMIT %(limit)s OFFSET %(offset)s' - subdict = { 'subids': tuple(subids), 'limit': limit, 'offset': offset } + subdict = { 'subids': tuple(subids), 'provtag': provtag, 'limit': limit, 'offset': offset } + app.logger.debug( f"Sending query to get measurements: {cursor.mogrify(q,subdict)}" ) cursor.execute( q, subdict ); cols = { cursor.description[i][0]: i for i in range(len(cursor.description)) } rows = cursor.fetchall() app.logger.debug( f"Got {len(cols)} columns, {len(rows)} rows" ) - hdf5files = {} retval = { 'status': 'ok', 'cutouts': { 'sub_id': [], 'image_id': [], 'section_id': [], - 'id': [], - 'ra': [], - 'dec': [], + 'source_index': [], 'measra': [], 'measdec': [], 'flux': [], @@ -388,6 +569,7 @@ def png_cutouts_for_sub_image( exporsubid, issubid, nomeas, limit=None, offset=0 'aperrad': [], 'mag': [], 'dmag': [], + 'is_bad': [], 'objname': [], 'is_test': [], 'is_fake': [], @@ -403,11 +585,14 @@ def png_cutouts_for_sub_image( exporsubid, issubid, nomeas, limit=None, offset=0 scaler = astropy.visualization.ZScaleInterval() - for row in rows: - subid = row[cols['subid']] - if row[cols['filepath']] not in hdf5files: - hdf5files[row[cols['filepath']]] = h5py.File( ARCHIVE_DIR / row[cols['filepath']], 'r' ) - grp = hdf5files[row[cols['filepath']]][f'source_{row[cols["index_in_sources"]]}'] + # Open all the hdf5 files + + hdf5files = {} + for subid in cutoutsfiles.keys(): + hdf5files[ subid ] = h5py.File( ARCHIVE_DIR / cutoutsfiles[subid], 'r' ) + + def append_to_retval( subid, index_in_sources, row ): + grp = hdf5files[ subid ][f'source_index_{row[cols["index_in_sources"]]}'] vmin, vmax = scaler.get_limits( grp['new_data'] ) scalednew = ( grp['new_data'] - vmin ) * 255. / ( vmax - vmin ) # TODO : there's an assumption here that the ref is background @@ -447,25 +632,28 @@ def png_cutouts_for_sub_image( exporsubid, issubid, nomeas, limit=None, offset=0 retval['cutouts']['new_png'].append( base64.b64encode( newim.getvalue() ).decode('ascii') ) retval['cutouts']['ref_png'].append( base64.b64encode( refim.getvalue() ).decode('ascii') ) retval['cutouts']['sub_png'].append( base64.b64encode( subim.getvalue() ).decode('ascii') ) - retval['cutouts']['id'].append( row[cols['id']] ) - retval['cutouts']['ra'].append( row[cols['ra']] ) - retval['cutouts']['dec'].append( row[cols['dec']] ) - retval['cutouts']['x'].append( row[cols['x']] ) - retval['cutouts']['y'].append( row[cols['y']] ) + # TODO : if we want to return x and y, we also have + # to read the source list file... + # We could also copy them to the cutouts file as attributes + # retval['cutouts']['x'].append( row[cols['x']] ) + # retval['cutouts']['y'].append( row[cols['y']] ) retval['cutouts']['w'].append( scalednew.shape[0] ) retval['cutouts']['h'].append( scalednew.shape[1] ) + + retval['cutouts']['is_bad'].append( row[cols['is_bad']] ) retval['cutouts']['objname'].append( row[cols['name']] ) retval['cutouts']['is_test'].append( row[cols['is_test']] ) retval['cutouts']['is_fake'].append( row[cols['is_fake']] ) - # Measurements columns - - # WARNING : assumption here that the aper cor radii list in the - # zero point is the same as was used in the measurements. - # (I think that's a good assumption, but still.) + if row[cols['psfflux']] is None: + flux = row[cols['flux']] + dflux = row[cols['dflux']] + aperrad = aperradses[subid][ row[cols['best_aperture']] ] + else: + flux = row[cols['psfflux']] + dflux = row[cols['dpsfflux']] + aperrad = 0. - flux = row[cols['flux']] - dflux = row[cols['dflux']] if flux is None: for field in [ 'flux', 'dflux', 'aperrad', 'mag', 'dmag', 'measra', 'measdec' ]: retval['cutouts'][field].append( None ) @@ -480,10 +668,20 @@ def png_cutouts_for_sub_image( exporsubid, issubid, nomeas, limit=None, offset=0 retval['cutouts']['measdec'].append( row[cols['measdec']] ) retval['cutouts']['flux'].append( flux ) retval['cutouts']['dflux'].append( dflux ) - retval['cutouts']['aperrad'].append( aperradses[subid][ row[cols['best_aperture']] ] ) + retval['cutouts']['aperrad'].append( aperrad ) retval['cutouts']['mag'].append( mag ) retval['cutouts']['dmag'].append( dmag ) + # First: put in all the measurements, in the order we got them + + alredy_done = set() + for row in rows: + subid = row[cols['subid']] + index_in_sources = row[ cols['index_in_sources'] ] + append_to_retval( subid, index_in_sources, row ) + + # TODO : things that we don't have measurements of + for f in hdf5files.values(): f.close() diff --git a/webap/static/seechange.js b/webap/static/seechange.js index 5887d35e..da90814a 100644 --- a/webap/static/seechange.js +++ b/webap/static/seechange.js @@ -36,32 +36,27 @@ seechange.Context.prototype.render_page = function() this.frontpagediv = rkWebUtil.elemaker( "div", this.maindiv ); p = rkWebUtil.elemaker( "p", this.frontpagediv, { "text": "Search provenance tag: " } ); - this.provtag_wid = rkWebUtil.elemaker( "select", p ); - rkWebUtil.elemaker( "option", this.provtag_wid, - { "text": "", - "attributes": { "value": "", - "selected": 1 } } ); + this.provtag_wid = rkWebUtil.elemaker( "select", p, { "attributes": { "id": "provtag_wid" } } ); + // rkWebUtil.elemaker( "option", this.provtag_wid, + // { "text": "", + // "attributes": { "value": "", + // "selected": 1 } } ); this.connector.sendHttpRequest( "provtags", {}, (data) => { self.populate_provtag_wid(data) } ); - rkWebUtil.elemaker( "option", this.provtag_wid, - { "text": "second", - "attributes": { "value": "second" } } ); - rkWebUtil.elemaker( "option", this.provtag_wid, - { "text": "third", - "attributes": { "value": "third" } } ); - p = rkWebUtil.elemaker( "p", this.frontpagediv ); button = rkWebUtil.button( p, "Show Exposures", function() { self.show_exposures(); } ); p.appendChild( document.createTextNode( " from " ) ); this.startdatewid = rkWebUtil.elemaker( "input", p, - { "attributes": { "type": "text", + { "attributes": { "id": "show_exposures_from_wid", + "type": "text", "size": 20 } } ); this.startdatewid.addEventListener( "blur", function(e) { rkWebUtil.validateWidgetDate( self.startdatewid ); } ); p.appendChild( document.createTextNode( " to " ) ); this.enddatewid = rkWebUtil.elemaker( "input", p, - { "attributes": { "type": "text", + { "attributes": { "id": "show_exposures_to_wid", + "type": "text", "size": 20 } } ); this.enddatewid.addEventListener( "blur", function(e) { rkWebUtil.validateWidgetDate( self.enddatewid ); @@ -91,7 +86,7 @@ seechange.Context.prototype.populate_provtag_wid = function( data ) seechange.Context.prototype.show_exposures = function() { var self = this; - var startdate, enddate; + var startdate, enddate, provtag; try { startdate = this.startdatewid.value.trim(); if ( startdate.length > 0 ) @@ -108,12 +103,15 @@ seechange.Context.prototype.show_exposures = function() console.log( "Exception parsing dates: " + ex.toString() ); return; } + provtag = this.provtag_wid.value; + if ( provtag == '' ) provtag = null; rkWebUtil.wipeDiv( this.subdiv ); rkWebUtil.elemaker( "p", this.subdiv, { "text": "Loading exposures...", "classes": [ "warning", "bold", "italic" ] } ); - this.connector.sendHttpRequest( "exposures", { "startdate": startdate, "enddate": enddate }, + this.connector.sendHttpRequest( "exposures", + { "startdate": startdate, "enddate": enddate, "provenancetag": provtag }, function( data ) { self.actually_show_exposures( data ); } ); } @@ -124,7 +122,11 @@ seechange.Context.prototype.actually_show_exposures = function( data ) window.alert( "Unexpected response from server when looking for exposures." ); return } - let exps = new seechange.ExposureList( this, this.subdiv, data["exposures"], data["startdate"], data["enddate"] ); + let exps = new seechange.ExposureList( this, this.subdiv, + data["exposures"], + data["startdate"], + data["enddate"], + data["provenance_tag"] ); exps.render_page(); } @@ -132,13 +134,14 @@ seechange.Context.prototype.actually_show_exposures = function( data ) // ********************************************************************** // ********************************************************************** -seechange.ExposureList = function( context, parentdiv, exposures, fromtime, totime ) +seechange.ExposureList = function( context, parentdiv, exposures, fromtime, totime, provtag ) { this.context = context; this.parentdiv = parentdiv; this.exposures = exposures; this.fromtime = fromtime; this.totime = totime; + this.provtag = provtag; this.masterdiv = null; this.listdiv = null; this.exposurediv = null; @@ -189,16 +192,27 @@ seechange.ExposureList.prototype.render_page = function() h2.appendChild( document.createTextNode( " from " + this.fromtime + " to " + this.totime ) ); } - table = rkWebUtil.elemaker( "table", this.listdiv, { "classes": [ "exposurelist" ] } ); + if ( this.provtag == null ) { + h2.appendChild( document.createTextNode( " including all provenances" ) ); + } else { + h2.appendChild( document.createTextNode( " with provenance tag " + this.provtag ) ); + } + + rkWebUtil.elemaker( "p", this.listdiv, + { "text": '"Detections" are everything found on subtratcions; ' + + '"Sources" are things that passed prelminary cuts.' } ) + + table = rkWebUtil.elemaker( "table", this.listdiv, { "classes": [ "exposurelist" ], + "attributes": { "id": "exposure_list_table" } } ); tr = rkWebUtil.elemaker( "tr", table ); th = rkWebUtil.elemaker( "th", tr, { "text": "Exposure" } ); th = rkWebUtil.elemaker( "th", tr, { "text": "MJD" } ); th = rkWebUtil.elemaker( "th", tr, { "text": "target" } ); th = rkWebUtil.elemaker( "th", tr, { "text": "filter" } ); th = rkWebUtil.elemaker( "th", tr, { "text": "t_exp (s)" } ); - th = rkWebUtil.elemaker( "th", tr, { "text": "n_images" } ); - th = rkWebUtil.elemaker( "th", tr, { "text": "n_cutouts" } ); - th = rkWebUtil.elemaker( "th", tr, { "text": "n_sources" } ); + th = rkWebUtil.elemaker( "th", tr, { "text": "subs" } ); + th = rkWebUtil.elemaker( "th", tr, { "text": "detections" } ); + th = rkWebUtil.elemaker( "th", tr, { "text": "sources" } ); th = rkWebUtil.elemaker( "th", tr, { "text": "n_successim" } ); th = rkWebUtil.elemaker( "th", tr, { "text": "n_errors" } ); @@ -226,9 +240,9 @@ seechange.ExposureList.prototype.render_page = function() td = rkWebUtil.elemaker( "td", row, { "text": exps["target"][i] } ); td = rkWebUtil.elemaker( "td", row, { "text": exps["filter"][i] } ); td = rkWebUtil.elemaker( "td", row, { "text": exps["exp_time"][i] } ); - td = rkWebUtil.elemaker( "td", row, { "text": exps["n_images"][i] } ); - td = rkWebUtil.elemaker( "td", row, { "text": exps["n_cutouts"][i] } ); + td = rkWebUtil.elemaker( "td", row, { "text": exps["n_subs"][i] } ); td = rkWebUtil.elemaker( "td", row, { "text": exps["n_sources"][i] } ); + td = rkWebUtil.elemaker( "td", row, { "text": exps["n_measurements"][i] } ); td = rkWebUtil.elemaker( "td", row, { "text": exps["n_successim"][i] } ); td = rkWebUtil.elemaker( "td", row, { "text": exps["n_errors"][i] } ); countdown -= 1; @@ -252,7 +266,8 @@ seechange.ExposureList.prototype.show_exposure = function( id, name, mjd, filter rkWebUtil.wipeDiv( this.exposurediv ); rkWebUtil.elemaker( "p", this.exposurediv, { "text": "Loading...", "classes": [ "warning", "bold", "italic" ] } ); - this.context.connector.sendHttpRequest( "exposure_images/" + id, null, + this.context.connector.sendHttpRequest( "exposure_images/" + id + "/" + this.provtag, + null, (data) => { self.actually_show_exposure( id, name, mjd, filter, target, exp_time, data ); @@ -343,6 +358,8 @@ seechange.Exposure.prototype.render_page = function() h2 = rkWebUtil.elemaker( "h2", this.div, { "text": "Exposure " + this.name } ); ul = rkWebUtil.elemaker( "ul", this.div ); li = rkWebUtil.elemaker( "li", ul ); + li.innerHTML = "provenance tag: " + this.data.provenancetag; + li = rkWebUtil.elemaker( "li", ul ); li.innerHTML = "target: " + this.target; li = rkWebUtil.elemaker( "li", ul ); li.innerHTML = "mjd: " + this.mjd @@ -359,15 +376,19 @@ seechange.Exposure.prototype.render_page = function() let totncutouts = 0; let totnsources = 0; for ( let i in this.data['id'] ) { - totncutouts += this.data['numcutouts'][i]; + totncutouts += this.data['numsources'][i]; totnsources += this.data['nummeasurements'][i]; } + let numsubs = 0; + for ( let sid of this.data.subid ) if ( sid != null ) numsubs += 1; p = rkWebUtil.elemaker( "p", this.imagesdiv, - { "text": "Exposure has " + this.data.id.length + " completed subtractions." } ) + { "text": ( "Exposure has " + this.data.id.length + " images and " + numsubs + + " completed subtractions" ) } ) p = rkWebUtil.elemaker( "p", this.imagesdiv, { "text": ( totnsources.toString() + " out of " + - totncutouts.toString() + " sources pass preliminary cuts." ) } ); + totncutouts.toString() + " detections pass preliminary cuts " + + "(i.e. are \"sources\")." ) } ); p = rkWebUtil.elemaker( "p", this.imagesdiv ); @@ -385,7 +406,7 @@ seechange.Exposure.prototype.render_page = function() { "type": "checkbox", "id": "cutouts_sans_measurements", "name": "cutouts_sans_measurements_checkbox" } } ); - rkWebUtil.elemaker( "label", p, { "text": "Show cutouts that failed the preliminary cuts", + rkWebUtil.elemaker( "label", p, { "text": "Show detections that failed the preliminary cuts (i.e. aren't sources)", "attributes": { "for": "cutouts_sans_measurements_checkbox" } } ); @@ -400,8 +421,8 @@ seechange.Exposure.prototype.render_page = function() th = rkWebUtil.elemaker( "th", tr, { "text": "fwhm" } ); th = rkWebUtil.elemaker( "th", tr, { "text": "zp" } ); th = rkWebUtil.elemaker( "th", tr, { "text": "mag_lim" } ); - th = rkWebUtil.elemaker( "th", tr, { "text": "n_cutouts" } ); - th = rkWebUtil.elemaker( "th", tr, { "text": "n_sources" } ); + th = rkWebUtil.elemaker( "th", tr, { "text": "detections" } ); + th = rkWebUtil.elemaker( "th", tr, { "text": "sources" } ); th = rkWebUtil.elemaker( "th", tr, { "text": "compl. step" } ); th = rkWebUtil.elemaker( "th", tr, {} ); // products exist th = rkWebUtil.elemaker( "th", tr, {} ); // error @@ -431,7 +452,7 @@ seechange.Exposure.prototype.render_page = function() td = rkWebUtil.elemaker( "td", tr, { "text": seechange.nullorfixed( this.data["zero_point_estimate"][i], 2 ) } ); td = rkWebUtil.elemaker( "td", tr, { "text": seechange.nullorfixed( this.data["lim_mag_estimate"][i], 1 ) } ); - td = rkWebUtil.elemaker( "td", tr, { "text": this.data["numcutouts"][i] } ); + td = rkWebUtil.elemaker( "td", tr, { "text": this.data["numsources"][i] } ); td = rkWebUtil.elemaker( "td", tr, { "text": this.data["nummeasurements"][i] } ); td = rkWebUtil.elemaker( "td", tr ); @@ -503,7 +524,7 @@ seechange.Exposure.prototype.update_cutouts = function() if ( this.cutoutsallimages_checkbox.checked ) { rkWebUtil.elemaker( "p", this.cutoutsdiv, - { "text": "Sources for all succesfully completed chips" } ); + { "text": "Sources for all successfully completed chips" } ); let div = rkWebUtil.elemaker( "div", this.cutoutsdiv ); rkWebUtil.elemaker( "p", div, { "text": "...updating cutouts...", @@ -517,7 +538,7 @@ seechange.Exposure.prototype.update_cutouts = function() } else { this.context.connector.sendHttpRequest( - "png_cutouts_for_sub_image/" + this.id + "/0/" + withnomeas, + "png_cutouts_for_sub_image/" + this.id + "/" + this.data.provenancetag + "/0/" + withnomeas, {}, (data) => { self.show_cutouts_for_image( div, prop, data ); } ); @@ -544,7 +565,8 @@ seechange.Exposure.prototype.update_cutouts = function() } else { this.context.connector.sendHttpRequest( - "png_cutouts_for_sub_image/" + this.data['subid'][i] + "/1/" + withnomeas, + "png_cutouts_for_sub_image/" + this.data['subid'][i] + "/" + this.data.provenancetag + + "/1/" + withnomeas, {}, (data) => { self.show_cutouts_for_image( div, prop, data ); } ); @@ -625,19 +647,25 @@ seechange.Exposure.prototype.show_cutouts_for_image = function( div, dex, indata // TODO: use "warning" color for low r/b if ( data.cutouts['flux'][i] == null ) td.classList.add( 'bad' ); else td.classList.add( 'good' ); - subdiv.innerHTML = ( "chip: " + data.cutouts.section_id[i] + "
" + - // "cutout (α, δ): (" + data.cutouts['ra'][i].toFixed(5) + " , " - // + data.cutouts['dec'][i].toFixed(5) + ")
" + - "(α, δ): (" + seechange.nullorfixed( data.cutouts['measra'][i], 5 ) + " , " - + seechange.nullorfixed( data.cutouts['measdec'][i],5 ) + ")
" + - "(x, y): (" + data.cutouts['x'][i].toFixed(2) + " , " - + data.cutouts['y'][i].toFixed(2) + ")
" + - "Flux: " + seechange.nullorfixed( data.cutouts['flux'][i], 0 ) - + " ± " + seechange.nullorfixed( data.cutouts['dflux'][i], 0 ) - + "  (aper r=" + seechange.nullorfixed( data.cutouts['aperrad'][i], 2) + " px)" - + "
" + "Mag: " + seechange.nullorfixed( data.cutouts['mag'][i], 2 ) - + " ± " + seechange.nullorfixed( data.cutouts['dmag'][i], 2 ) - ); + let textblob = ( "chip: " + data.cutouts.section_id[i] + "
" + + // "cutout (α, δ): (" + data.cutouts['ra'][i].toFixed(5) + " , " + // + data.cutouts['dec'][i].toFixed(5) + ")
" + + "(α, δ): (" + seechange.nullorfixed( data.cutouts['measra'][i], 5 ) + " , " + + seechange.nullorfixed( data.cutouts['measdec'][i],5 ) + ")
" + + // TODO : put x, y back if the server ever starts returning it again! -- Issue #340 + // "(x, y): (" + data.cutouts['x'][i].toFixed(2) + " , " + // + data.cutouts['y'][i].toFixed(2) + ")
" + + "Flux: " + seechange.nullorfixed( data.cutouts['flux'][i], 0 ) + + " ± " + seechange.nullorfixed( data.cutouts['dflux'][i], 0 ) + ); + if ( ( data.cutouts['aperrad'][i] == null ) || ( data.cutouts['aperrad'][i] <= 0 ) ) + textblob += "  (psf)"; + else + textblob += + "  (aper r=" + seechange.nullorfixed( data.cutouts['aperrad'][i], 2) + " px)"; + textblob += ("
" + "Mag: " + seechange.nullorfixed( data.cutouts['mag'][i], 2 ) + + " ± " + seechange.nullorfixed( data.cutouts['dmag'][i], 2 ) + ); + subdiv.innerHTML = textblob; } }