diff --git a/.github/workflows/run_tests_cdash.yml b/.github/workflows/run_tests_cdash.yml index 57053172cd..b8d10ef118 100644 --- a/.github/workflows/run_tests_cdash.yml +++ b/.github/workflows/run_tests_cdash.yml @@ -6,7 +6,9 @@ name: Run CDash Ubuntu/Linux netCDF Tests on: workflow_dispatch - +concurrency: + group: ${{ github.workflow}}-${{ github.head_ref }} + cancel-in-progress: true jobs: diff --git a/.github/workflows/run_tests_osx.yml b/.github/workflows/run_tests_osx.yml index c887c6b1e6..8f2d9d62af 100644 --- a/.github/workflows/run_tests_osx.yml +++ b/.github/workflows/run_tests_osx.yml @@ -6,9 +6,12 @@ name: Run macOS-based netCDF Tests - on: [pull_request,workflow_dispatch] +concurrency: + group: ${{ github.workflow}}-${{ github.head_ref }} + cancel-in-progress: true + jobs: build-deps-osx: diff --git a/.github/workflows/run_tests_s3.yml b/.github/workflows/run_tests_s3.yml index 0a1c942460..ddbb353c8e 100644 --- a/.github/workflows/run_tests_s3.yml +++ b/.github/workflows/run_tests_s3.yml @@ -11,6 +11,10 @@ name: Run S3 netCDF Tests (under Ubuntu Linux) on: [workflow_dispatch] +concurrency: + group: ${{ github.workflow}}-${{ github.head_ref }} + cancel-in-progress: true + jobs: build-deps-serial: diff --git a/.github/workflows/run_tests_ubuntu.yml b/.github/workflows/run_tests_ubuntu.yml index 63afb906c0..64b91bb99a 100644 --- a/.github/workflows/run_tests_ubuntu.yml +++ b/.github/workflows/run_tests_ubuntu.yml @@ -6,6 +6,10 @@ name: Run Ubuntu/Linux netCDF Tests on: [pull_request, workflow_dispatch] +concurrency: + group: ${{ github.workflow}}-${{ github.head_ref }} + cancel-in-progress: true + jobs: build-deps-serial: diff --git a/.github/workflows/run_tests_win_cygwin.yml b/.github/workflows/run_tests_win_cygwin.yml index bc084a401f..6e124448ed 100644 --- a/.github/workflows/run_tests_win_cygwin.yml +++ b/.github/workflows/run_tests_win_cygwin.yml @@ -2,6 +2,10 @@ name: Run Cygwin-based tests on: [pull_request,workflow_dispatch] +concurrency: + group: ${{ github.workflow}}-${{ github.head_ref }} + cancel-in-progress: true + env: SHELLOPTS: igncr CHERE_INVOKING: 1 diff --git a/.github/workflows/run_tests_win_mingw.yml b/.github/workflows/run_tests_win_mingw.yml index 7bc6cde76a..978275cf6c 100644 --- a/.github/workflows/run_tests_win_mingw.yml +++ b/.github/workflows/run_tests_win_mingw.yml @@ -11,6 +11,10 @@ env: on: [pull_request,workflow_dispatch] +concurrency: + group: ${{ github.workflow}}-${{ github.head_ref }} + cancel-in-progress: true + jobs: build-and-test-autotools: diff --git a/Makefile.am b/Makefile.am index 64648d6180..55907f7ef5 100644 --- a/Makefile.am +++ b/Makefile.am @@ -211,7 +211,11 @@ install-data-hook: all-local: liblib/libnetcdf.la echo ${PACKAGE_VERSION} > VERSION if ENABLE_S3_TESTALL + rm -f ${abs_top_builddir}/tmp_@PLATFORMUID@.uids echo "@TESTUID@" >> ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids + cat ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids | sort | uniq > ${abs_top_builddir}/tmp_@PLATFORMUID@.uids + rm -f ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids + mv ${abs_top_builddir}/tmp_@PLATFORMUID@.uids ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids endif if ENABLE_S3_TESTALL diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 1575118a34..8d5eca680a 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -8,6 +8,7 @@ This file contains a high-level description of this package's evolution. Release ## 4.9.3 - TBD * Fix DAP2 proxy problems. See [Github #2764](https://github.com/Unidata/netcdf-c/pull/2764). +* Cleanup a number of misc issues. See [Github #2763](https://github.com/Unidata/netcdf-c/pull/2763). * Mitigate the problem of test interference. See [Github #2755](https://github.com/Unidata/netcdf-c/pull/2755). * Extend NCZarr to support unlimited dimensions. See [Github #2755](https://github.com/Unidata/netcdf-c/pull/2755). * Fix significant bug in the NCZarr cache management. See [Github #2737](https://github.com/Unidata/netcdf-c/pull/2737). diff --git a/include/ncrc.h b/include/ncrc.h index b3bb8c512d..ae858532e7 100644 --- a/include/ncrc.h +++ b/include/ncrc.h @@ -53,6 +53,9 @@ typedef struct NCRCinfo { NClist* s3profiles; /* NClist */ } NCRCinfo; +/* Opaque structures */ +struct NCS3INFO; + #if defined(__cplusplus) extern "C" { #endif @@ -94,7 +97,7 @@ EXTERNL int NC_getactives3profile(NCURI* uri, const char** profilep); EXTERNL int NC_s3profilelookup(const char* profile, const char* key, const char** valuep); EXTERNL int NC_authgets3profile(const char* profile, struct AWSprofile** profilep); EXTERNL int NC_iss3(NCURI* uri); -EXTERNL int NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** newurlp); +EXTERNL int NC_s3urlrebuild(NCURI* url, struct NCS3INFO* s3, NCURI** newurlp); #if defined(__cplusplus) } diff --git a/include/ncs3sdk.h b/include/ncs3sdk.h index c06f0e39f1..771faa6666 100644 --- a/include/ncs3sdk.h +++ b/include/ncs3sdk.h @@ -6,6 +6,11 @@ #ifndef NCS3SDK_H #define NCS3SDK_H 1 +/* Track the server type, if known */ +typedef enum NCS3SVC {NCS3UNK=0, /* unknown */ + NCS3=1, /* s3.amazon.aws */ + NCS3GS=0 /* storage.googleapis.com */ +} NCS3SVC; typedef struct NCS3INFO { char* host; /* non-null if other*/ @@ -13,6 +18,7 @@ typedef struct NCS3INFO { char* bucket; /* bucket name */ char* rootkey; char* profile; + NCS3SVC svc; } NCS3INFO; #ifdef __cplusplus diff --git a/libdispatch/dinfermodel.c b/libdispatch/dinfermodel.c index 937fd8bd08..61941d1d71 100644 --- a/libdispatch/dinfermodel.c +++ b/libdispatch/dinfermodel.c @@ -136,6 +136,7 @@ static const struct MACRODEF { {"xarray","mode",{"zarr", NULL}}, {"noxarray","mode",{"nczarr", "noxarray", NULL}}, {"zarr","mode",{"nczarr","zarr", NULL}}, +{"gs3","mode",{"gs3","nczarr",NULL}}, /* Google S3 API */ {NULL,NULL,{NULL}} }; @@ -196,6 +197,7 @@ static struct NCPROTOCOLLIST { {"dods","http","mode=dap2"}, {"dap4","http","mode=dap4"}, {"s3","s3","mode=s3"}, + {"gs3","gs3","mode=gs3"}, {NULL,NULL,NULL} /* Terminate search */ }; @@ -914,7 +916,7 @@ NC_infermodel(const char* path, int* omodep, int iscreate, int useparallel, void /* If s3, then rebuild the url */ if(NC_iss3(uri)) { NCURI* newuri = NULL; - if((stat = NC_s3urlrebuild(uri,NULL,NULL,&newuri))) goto done; + if((stat = NC_s3urlrebuild(uri,NULL,&newuri))) goto done; ncurifree(uri); uri = newuri; } else if(strcmp(uri->protocol,"file")==0) { diff --git a/libdispatch/drc.c b/libdispatch/drc.c index b896d90bfb..77922b752f 100644 --- a/libdispatch/drc.c +++ b/libdispatch/drc.c @@ -437,7 +437,9 @@ rccompile(const char* filepath) NCURI* uri = NULL; char* nextline = NULL; NCglobalstate* globalstate = NC_getglobalstate(); - char* bucket = NULL; + NCS3INFO s3; + + memset(&s3,0,sizeof(s3)); if((ret=NC_readfile(filepath,tmp))) { nclog(NCLOGWARN, "Could not open configuration file: %s",filepath); @@ -484,9 +486,8 @@ rccompile(const char* filepath) if(NC_iss3(uri)) { NCURI* newuri = NULL; /* Rebuild the url to S3 "path" format */ - nullfree(bucket); - bucket = NULL; - if((ret = NC_s3urlrebuild(uri,&bucket,NULL,&newuri))) goto done; + NC_s3clear(&s3); + if((ret = NC_s3urlrebuild(uri,&s3,&newuri))) goto done; ncurifree(uri); uri = newuri; newuri = NULL; @@ -546,6 +547,7 @@ rccompile(const char* filepath) rcorder(rc); done: + NC_s3clear(&s3); if(contents) free(contents); ncurifree(uri); ncbytesfree(tmp); diff --git a/libdispatch/ds3util.c b/libdispatch/ds3util.c index df001f8023..5091b1a24e 100644 --- a/libdispatch/ds3util.c +++ b/libdispatch/ds3util.c @@ -30,6 +30,7 @@ #undef AWSDEBUG #define AWSHOST ".amazonaws.com" +#define GOOGLEHOST "storage.googleapis.com" enum URLFORMAT {UF_NONE=0, UF_VIRTUAL=1, UF_PATH=2, UF_S3=3, UF_OTHER=4}; @@ -44,15 +45,12 @@ Rebuild an S3 url into a canonical path-style url. If region is not in the host, then use specified region if provided, otherwise us-east-1. @param url (in) the current url -@param region (in) region to use if needed; NULL => us-east-1 - (out) region from url or the input region -@param bucketp (in) bucket to use if needed - (out) bucket from url +@param s3 (in/out) NCS3INFO structure @param pathurlp (out) the resulting pathified url string */ int -NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** newurlp) +NC_s3urlrebuild(NCURI* url, NCS3INFO* s3, NCURI** newurlp) { int i,stat = NC_NOERR; NClist* hostsegments = NULL; @@ -63,6 +61,7 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne char* host = NULL; char* path = NULL; char* region = NULL; + NCS3SVC svc = NCS3UNK; if(url == NULL) {stat = NC_EURL; goto done;} @@ -83,14 +82,27 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne Path: https://s3..amazonaws.com// (3) or: https://s3.amazonaws.com// -- region defaults to us-east-1 (4) S3: s3:/// (5) - Other: https://// (6) + Google: https://storage.googleapis.com// (6) + or: gs3:/// (7) + Other: https://// (8) */ if(url->host == NULL || strlen(url->host) == 0) {stat = NC_EURL; goto done;} + + /* Reduce the host to standard form such as s3.amazonaws.com by pulling out the + region and bucket from the host */ if(strcmp(url->protocol,"s3")==0 && nclistlength(hostsegments)==1) { /* Format (5) */ bucket = nclistremove(hostsegments,0); /* region unknown at this point */ + /* Host will be set to canonical form later */ + svc = NCS3; + } else if(strcmp(url->protocol,"gs3")==0 && nclistlength(hostsegments)==1) { /* Format (7) */ + bucket = nclistremove(hostsegments,0); + /* region unknown at this point */ + /* Host will be set to canonical form later */ + svc = NCS3GS; } else if(endswith(url->host,AWSHOST)) { /* Virtual or path */ + svc = NCS3; /* If we find a bucket as part of the host, then remove it */ switch (nclistlength(hostsegments)) { default: stat = NC_EURL; goto done; @@ -99,11 +111,11 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne /* bucket unknown at this point */ break; case 4: /* Format (2) or (3) */ - if(strcasecmp(nclistget(hostsegments,1),"s3")==0) { /* Format (2) */ + if(strcasecmp(nclistget(hostsegments,0),"s3")!=0) { /* Presume format (2) */ /* region unknown at this point */ - bucket = nclistremove(hostsegments,0); /* Note removeal */ + bucket = nclistremove(hostsegments,0); /* Make canonical */ } else if(strcasecmp(nclistget(hostsegments,0),"s3")==0) { /* Format (3) */ - region = strdup(nclistget(hostsegments,1)); + region = nclistremove(hostsegments,1); /* Make canonical */ /* bucket unknown at this point */ } else /* ! Format (2) and ! Format (3) => error */ {stat = NC_EURL; goto done;} @@ -111,20 +123,27 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne case 5: /* Format (1) */ if(strcasecmp(nclistget(hostsegments,1),"s3")!=0) {stat = NC_EURL; goto done;} - region = strdup(nclistget(hostsegments,2)); - bucket = strdup(nclistremove(hostsegments,0)); + /* Make canonical */ + region = nclistremove(hostsegments,2); + bucket = nclistremove(hostsegments,0); break; } - } else { /* Presume Format (6) */ + } else if(strcasecmp(url->host,GOOGLEHOST)==0) { /* Google (6) */ + if((host = strdup(url->host))==NULL) + {stat = NC_ENOMEM; goto done;} + /* region is unknown */ + /* bucket is unknown at this point */ + svc = NCS3GS; + } else { /* Presume Format (8) */ if((host = strdup(url->host))==NULL) {stat = NC_ENOMEM; goto done;} /* region is unknown */ /* bucket is unknown */ } - /* region = (1) from url, (2) inoutregion, (3) default */ - if(region == NULL) - region = (inoutregionp?nulldup(*inoutregionp):NULL); + /* region = (1) from url, (2) s3->region, (3) default */ + if(region == NULL && s3 != NULL) + region = nulldup(s3->region); if(region == NULL) { const char* region0 = NULL; /* Get default region */ @@ -133,23 +152,30 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne } if(region == NULL) {stat = NC_ES3; goto done;} - /* bucket = (1) from url, (2) inoutbucket */ + /* bucket = (1) from url, (2) s3->bucket */ if(bucket == NULL && nclistlength(pathsegments) > 0) { bucket = nclistremove(pathsegments,0); /* Get from the URL path; will reinsert below */ } - if(bucket == NULL) - bucket = (inoutbucketp?nulldup(*inoutbucketp):NULL); + if(bucket == NULL && s3 != NULL) + bucket = nulldup(s3->bucket); if(bucket == NULL) {stat = NC_ES3; goto done;} - if(host == NULL) { /* Construct the revised host */ + if(svc == NCS3) { + /* Construct the revised host */ + ncbytesclear(buf); ncbytescat(buf,"s3."); ncbytescat(buf,region); ncbytescat(buf,AWSHOST); + nullfree(host); host = ncbytesextract(buf); + } else if(svc == NCS3GS) { + nullfree(host); + host = strdup(GOOGLEHOST); } - /* Construct the revised path */ ncbytesclear(buf); + + /* Construct the revised path */ if(bucket != NULL) { ncbytescat(buf,"/"); ncbytescat(buf,bucket); @@ -159,10 +185,13 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne ncbytescat(buf,nclistget(pathsegments,i)); } path = ncbytesextract(buf); + /* complete the new url */ if((newurl=ncuriclone(url))==NULL) {stat = NC_ENOMEM; goto done;} ncurisetprotocol(newurl,"https"); + assert(host != NULL); ncurisethost(newurl,host); + assert(path != NULL); ncurisetpath(newurl,path); /* Rebuild the url->url */ ncurirebuild(newurl); @@ -171,9 +200,11 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne fprintf(stderr,">>> NC_s3urlrebuild: final=%s bucket=%s region=%s\n",uri->uri,bucket,region); #endif if(newurlp) {*newurlp = newurl; newurl = NULL;} - if(inoutbucketp) {*inoutbucketp = bucket; bucket = NULL;} - if(inoutregionp) {*inoutregionp = region; region = NULL;} - + if(s3 != NULL) { + s3->bucket = bucket; bucket = NULL; + s3->region = region; region = NULL; + s3->svc = svc; + } done: nullfree(region); nullfree(bucket) @@ -218,7 +249,7 @@ NC_s3urlprocess(NCURI* url, NCS3INFO* s3, NCURI** newurlp) s3->profile = strdup(profile0); /* Rebuild the URL to path format and get a usable region and optional bucket*/ - if((stat = NC_s3urlrebuild(url,&s3->bucket,&s3->region,&url2))) goto done; + if((stat = NC_s3urlrebuild(url,s3,&url2))) goto done; s3->host = strdup(url2->host); /* construct the rootkey minus the leading bucket */ pathsegments = nclistnew(); @@ -268,7 +299,7 @@ NC_s3clear(NCS3INFO* s3) } /* -Check if a url has indicators that signal an S3 url. +Check if a url has indicators that signal an S3 or Google S3 url. */ int @@ -277,13 +308,17 @@ NC_iss3(NCURI* uri) int iss3 = 0; if(uri == NULL) goto done; /* not a uri */ - /* is the protocol "s3"? */ + /* is the protocol "s3" or "gs3" ? */ if(strcasecmp(uri->protocol,"s3")==0) {iss3 = 1; goto done;} - /* Is "s3" in the mode list? */ - if(NC_testmode(uri,"s3")) {iss3 = 1; goto done;} + if(strcasecmp(uri->protocol,"gs3")==0) {iss3 = 1; goto done;} + /* Is "s3" or "gs3" in the mode list? */ + if(NC_testmode(uri,"s3")) {iss3 = 1; goto done;} + if(NC_testmode(uri,"gs3")) {iss3 = 1; goto done;} /* Last chance; see if host looks s3'y */ - if(endswith(uri->host,AWSHOST)) {iss3 = 1; goto done;} - + if(uri->host != NULL) { + if(endswith(uri->host,AWSHOST)) {iss3 = 1; goto done;} + if(strcasecmp(uri->host,GOOGLEHOST)==0) {iss3 = 1; goto done;} + } done: return iss3; } diff --git a/libdispatch/nch5s3comms.c b/libdispatch/nch5s3comms.c index fd473f64e5..ba09b931b0 100644 --- a/libdispatch/nch5s3comms.c +++ b/libdispatch/nch5s3comms.c @@ -96,6 +96,7 @@ /*****************/ +#include "ncs3sdk.h" #include "nch5s3comms.h" /* S3 Communications */ /****************/ @@ -1063,7 +1064,7 @@ NCH5_s3comms_s3r_execute(s3r_t *handle, const char* url, *---------------------------------------------------------------------------- */ s3r_t * -NCH5_s3comms_s3r_open(const char* root, const char *region, const char *access_id, const char* access_key) +NCH5_s3comms_s3r_open(const char* root, NCS3SVC svc, const char *region, const char *access_id, const char* access_key) { int ret_value = SUCCEED; size_t tmplen = 0; @@ -1092,10 +1093,15 @@ NCH5_s3comms_s3r_open(const char* root, const char *region, const char *access_i * RECORD THE ROOT PATH *************************************/ - /* Verify that the region is a substring of root */ - if(region != NULL && region[0] != '\0') { - if(strstr(root,region) == NULL) - HGOTO_ERROR(H5E_ARGS, NC_EINVAL, NULL, "region not present in root path."); + switch (svc) { + case NCS3: + /* Verify that the region is a substring of root */ + if(region != NULL && region[0] != '\0') { + if(strstr(root,region) == NULL) + HGOTO_ERROR(H5E_ARGS, NC_EINVAL, NULL, "region not present in root path."); + } + break; + default: break; } handle->rootpath = nulldup(root); diff --git a/libdispatch/nch5s3comms.h b/libdispatch/nch5s3comms.h index acfffb2858..7cc482df66 100644 --- a/libdispatch/nch5s3comms.h +++ b/libdispatch/nch5s3comms.h @@ -502,7 +502,7 @@ EXTERNL hrb_t *NCH5_s3comms_hrb_init_request(const char *resource, const char *h * DECLARATION OF S3REQUEST ROUTINES * *************************************/ -EXTERNL s3r_t *NCH5_s3comms_s3r_open(const char* root, const char* region, const char* id, const char* access_key); +EXTERNL s3r_t *NCH5_s3comms_s3r_open(const char* root, NCS3SVC svc, const char* region, const char* id, const char* access_key); EXTERNL int NCH5_s3comms_s3r_close(s3r_t *handle); diff --git a/libdispatch/nclog.c b/libdispatch/nclog.c index 6f477c18d8..5f6b8be7d8 100644 --- a/libdispatch/nclog.c +++ b/libdispatch/nclog.c @@ -138,9 +138,11 @@ ncvlog(int level, const char* fmt, va_list ap) const char* prefix; if(!nclogginginitialized) ncloginit(); + if(nclog_global.loglevel < level || nclog_global.nclogstream == NULL) { return; } + prefix = nctagname(level); fprintf(nclog_global.nclogstream,"%s: ",prefix); if(fmt != NULL) { diff --git a/libdispatch/ncs3sdk_h5.c b/libdispatch/ncs3sdk_h5.c index e9da587f11..e51f8d8110 100644 --- a/libdispatch/ncs3sdk_h5.c +++ b/libdispatch/ncs3sdk_h5.c @@ -15,8 +15,8 @@ #include "ncrc.h" #include "ncxml.h" -#include "nch5s3comms.h" #include "ncs3sdk.h" +#include "nch5s3comms.h" #define NCTRACING #ifdef NCTRACING @@ -181,7 +181,7 @@ NC_s3sdkcreateclient(NCS3INFO* info) if((stat = NC_s3profilelookup(info->profile, "aws_secret_access_key", &accesskey))) goto done; } if((s3client->rooturl = makes3rooturl(info))==NULL) {stat = NC_ENOMEM; goto done;} - s3client->h5s3client = NCH5_s3comms_s3r_open(s3client->rooturl,info->region,accessid,accesskey); + s3client->h5s3client = NCH5_s3comms_s3r_open(s3client->rooturl,info->svc,info->region,accessid,accesskey); if(s3client->h5s3client == NULL) {stat = NC_ES3; goto done;} done: @@ -613,6 +613,10 @@ HTTP/1.1 200 string string +#ifdef GOOGLES3 + string + string +#endif ... ... @@ -679,6 +683,8 @@ parse_listbucketresult(char* xml, unsigned long long xmllen, struct LISTOBJECTSV result->nextcontinuationtoken = trim(ncxml_text(x),RECLAIM); } else if(strcmp(elem,"StartAfter")==0) { result->startafter = trim(ncxml_text(x),RECLAIM); + } else if(strcmp(elem,"StartAfter")==0) { + result->startafter = trim(ncxml_text(x),RECLAIM); } else { nclog(NCLOGERR,"Unexpected Element: <%s>",elem); stat = NC_ES3; @@ -711,7 +717,7 @@ parse_object(ncxml_t root, NClist* objects) for(x=ncxml_child_first(root);x != NULL;x=ncxml_child_next(x)) { const char* elem = ncxml_name(x); - if(strcmp(elem,"ChecksumAlorithm")==0) { + if(strcmp(elem,"ChecksumAlgorithm")==0) { if((stat = parse_checksumalgorithm(x,object->checksumalgorithms))) goto done; } else if(strcmp(elem,"ETag")==0) { object->etag = trim(ncxml_text(x),RECLAIM); @@ -725,6 +731,10 @@ parse_object(ncxml_t root, NClist* objects) object->size = trim(ncxml_text(x),RECLAIM); } else if(strcmp(elem,"StorageClass")==0) { object->storageclass = trim(ncxml_text(x),RECLAIM); + } else if(strcmp(elem,"Generation")==0) { + /* Ignore */ + } else if(strcmp(elem,"MetaGeneration")==0) { + /* Ignore */ } else { nclog(NCLOGERR,"Unexpected Element: <%s>",elem); stat = NC_ES3; diff --git a/libdispatch/ncuri.c b/libdispatch/ncuri.c index 7bca11a228..efd6fa4492 100644 --- a/libdispatch/ncuri.c +++ b/libdispatch/ncuri.c @@ -1237,7 +1237,7 @@ removedups(NClist* list) /* look for dups for this entry */ for(j=nclistlength(list)-2;j>i;j-=2) { if(strcasecmp(nclistget(list,i),nclistget(list,j))==0 - && strcasecmp(nclistget(list,i+1),nclistget(list,j+1))) { + && strcasecmp(nclistget(list,i+1),nclistget(list,j+1))==0) { nclistremove(list,j+1); nclistremove(list,j); } } diff --git a/libhdf5/hdf5open.c b/libhdf5/hdf5open.c index 514d1e2ffb..c63e649d9e 100644 --- a/libhdf5/hdf5open.c +++ b/libhdf5/hdf5open.c @@ -25,6 +25,7 @@ #ifdef ENABLE_HDF5_ROS3 #include +#include "ncs3sdk.h" #endif /*Nemonic */ @@ -883,12 +884,11 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid) #ifdef ENABLE_BYTERANGE else if(h5->byterange) { /* Arrange to use the byte-range drivers */ char* newpath = NULL; - char* awsregion0 = NULL; #ifdef ENABLE_HDF5_ROS3 H5FD_ros3_fapl_t fa; - const char* profile0 = NULL; const char* awsaccessid0 = NULL; const char* awssecretkey0 = NULL; + const char* profile0 = NULL; int iss3 = NC_iss3(h5->uri); fa.version = H5FD_CURR_ROS3_FAPL_T_VERSION; @@ -898,9 +898,11 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid) fa.secret_key[0] = '\0'; if(iss3) { - /* Rebuild the URL */ + NCS3INFO s3; NCURI* newuri = NULL; - if((retval = NC_s3urlrebuild(h5->uri,NULL,&awsregion0,&newuri))) goto exit; + /* Rebuild the URL */ + memset(&s3,0,sizeof(s3)); + if((retval = NC_s3urlrebuild(h5->uri,&s3,&newuri))) goto exit; if((newpath = ncuribuild(newuri,NULL,NULL,NCURISVC))==NULL) {retval = NC_EURL; goto exit;} ncurifree(h5->uri); @@ -909,22 +911,23 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid) BAIL(retval); if((retval = NC_s3profilelookup(profile0,AWS_ACCESS_KEY_ID,&awsaccessid0))) BAIL(retval); - if((retval = NC_s3profilelookup(profile0,AWS_SECRET_ACCESS_KEY,&awssecretkey0))) + if((retval = NC_s3profilelookup(profile0,AWS_SECRET_ACCESS_KEY,&awssecretkey0))) BAIL(retval); - if(awsregion0 == NULL) - awsregion0 = strdup(S3_REGION_DEFAULT); + if(s3.region == NULL) + s3.region = strdup(S3_REGION_DEFAULT); if(awsaccessid0 == NULL || awssecretkey0 == NULL ) { /* default, non-authenticating, "anonymous" fapl configuration */ fa.authenticate = (hbool_t)0; } else { fa.authenticate = (hbool_t)1; - assert(awsregion0 != NULL && strlen(awsregion0) > 0); + assert(s3.region != NULL && strlen(s3.region) > 0); assert(awsaccessid0 != NULL && strlen(awsaccessid0) > 0); assert(awssecretkey0 != NULL && strlen(awssecretkey0) > 0); - strlcat(fa.aws_region,awsregion0,H5FD_ROS3_MAX_REGION_LEN); + strlcat(fa.aws_region,s3.region,H5FD_ROS3_MAX_REGION_LEN); strlcat(fa.secret_id, awsaccessid0, H5FD_ROS3_MAX_SECRET_ID_LEN); strlcat(fa.secret_key, awssecretkey0, H5FD_ROS3_MAX_SECRET_KEY_LEN); } + NC_s3clear(&s3); /* create and set fapl entry */ if(H5Pset_fapl_ros3(fapl_id, &fa) < 0) BAIL(NC_EHDFERR); @@ -938,7 +941,6 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid) if ((h5->hdfid = nc4_H5Fopen((newpath?newpath:path), flags, fapl_id)) < 0) BAIL(NC_EHDFERR); nullfree(newpath); - nullfree(awsregion0); } #endif else { diff --git a/libnczarr/zclose.c b/libnczarr/zclose.c index b70e86f794..fdef3f5c16 100644 --- a/libnczarr/zclose.c +++ b/libnczarr/zclose.c @@ -179,7 +179,7 @@ NCZ_zclose_var1(NC_VAR_INFO_T* var) } /** - * @internal Close resources for vars in a group. + * @internal Close nczarr resources for vars in a group. * * @param grp Pointer to group info struct. * diff --git a/libnczarr/zdebug.c b/libnczarr/zdebug.c index 2a1ad4efaf..3b1e0e9217 100644 --- a/libnczarr/zdebug.c +++ b/libnczarr/zdebug.c @@ -34,7 +34,7 @@ int zreport(int err, const char* msg, const char* file, const char* fcn, int line) { if(err == 0) return err; - ZLOG(NCLOGWARN,"!!! zreport: err=%d msg=%s",err,msg); + ZLOG(NCLOGWARN,"!!! zreport: err=%d msg=%s @ %s#%s:%d",err,msg,file,fcn,line); ncbacktrace(); return zbreakpoint(err); } diff --git a/libnczarr/zsync.c b/libnczarr/zsync.c index 0d2596890c..8a14cdc694 100644 --- a/libnczarr/zsync.c +++ b/libnczarr/zsync.c @@ -1448,29 +1448,11 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) { int stat = NC_NOERR; int i,j; - char* varpath = NULL; - char* key = NULL; NCZ_FILE_INFO_T* zinfo = NULL; - NC_VAR_INFO_T* var = NULL; - NCZ_VAR_INFO_T* zvar = NULL; NCZMAP* map = NULL; - NCjson* jvar = NULL; - NCjson* jncvar = NULL; - NCjson* jdimrefs = NULL; - NCjson* jvalue = NULL; int purezarr = 0; int xarray = 0; int formatv1 = 0; - nc_type vtype; - int vtypelen; - size64_t* shapes = NULL; - int rank = 0; - int zarr_rank = 1; /* Need to watch out for scalars */ - NClist* dimnames = nclistnew(); -#ifdef ENABLE_NCZARR_FILTERS - NCjson* jfilter = NULL; - int chainindex; -#endif ZTRACE(3,"file=%s grp=%s |varnames|=%u",file->controller->path,grp->hdr.name,nclistlength(varnames)); @@ -1483,7 +1465,32 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) /* Load each var in turn */ for(i = 0; i < nclistlength(varnames); i++) { - const char* varname = nclistget(varnames,i); + /* per-variable info */ + NC_VAR_INFO_T* var = NULL; + NCZ_VAR_INFO_T* zvar = NULL; + NCjson* jvar = NULL; + NCjson* jncvar = NULL; + NCjson* jdimrefs = NULL; + NCjson* jvalue = NULL; + char* varpath = NULL; + char* key = NULL; + const char* varname = NULL; + size64_t* shapes = NULL; + NClist* dimnames = NULL; + int varsized = 0; + int suppress = 0; /* Abort processing of this variable */ + nc_type vtype = NC_NAT; + int vtypelen = 0; + int rank = 0; + int zarr_rank = 0; /* Need to watch out for scalars */ +#ifdef ENABLE_NCZARR_FILTERS + NCjson* jfilter = NULL; + int chainindex = 0; +#endif + + dimnames = nclistnew(); + varname = nclistget(varnames,i); + if((stat = nc4_var_list_add2(grp, varname, &var))) goto done; @@ -1522,6 +1529,7 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) if(version != zinfo->zarr.zarr_version) {stat = (THROW(NC_ENCZARR)); goto done;} } + /* Set the type and endianness of the variable */ { int endianness; @@ -1609,23 +1617,6 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) jdimrefs = NULL; } - /* shape */ - { - if((stat = NCJdictget(jvar,"shape",&jvalue))) goto done; - if(NCJsort(jvalue) != NCJ_ARRAY) {stat = (THROW(NC_ENCZARR)); goto done;} - if(zvar->scalar) { - rank = 0; - zarr_rank = 1; /* Zarr does not support scalars */ - } else - rank = (zarr_rank = NCJlength(jvalue)); - /* Save the rank of the variable */ - if((stat = nc4_var_set_ndims(var, rank))) goto done; - /* extract the shapes */ - if((shapes = (size64_t*)malloc(sizeof(size64_t)*zarr_rank)) == NULL) - {stat = (THROW(NC_ENOMEM)); goto done;} - if((stat = decodeints(jvalue, shapes))) goto done; - } - /* Capture dimension_separator (must precede chunk cache creation) */ { NCglobalstate* ngs = NC_getglobalstate(); @@ -1661,6 +1652,36 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) } } + /* shape */ + { + if((stat = NCJdictget(jvar,"shape",&jvalue))) goto done; + if(NCJsort(jvalue) != NCJ_ARRAY) {stat = (THROW(NC_ENCZARR)); goto done;} + + /* Process the rank */ + zarr_rank = NCJlength(jvalue); + if(zarr_rank == 0) { + /* suppress variable */ + ZLOG(NCLOGWARN,"Empty shape for variable %s suppressed",var->hdr.name); + suppress = 1; + goto suppressvar; + } + + if(zvar->scalar) { + rank = 0; + zarr_rank = 1; /* Zarr does not support scalars */ + } else + rank = (zarr_rank = NCJlength(jvalue)); + + if(zarr_rank > 0) { + /* Save the rank of the variable */ + if((stat = nc4_var_set_ndims(var, rank))) goto done; + /* extract the shapes */ + if((shapes = (size64_t*)malloc(sizeof(size64_t)*zarr_rank)) == NULL) + {stat = (THROW(NC_ENOMEM)); goto done;} + if((stat = decodeints(jvalue, shapes))) goto done; + } + } + /* chunks */ { size64_t chunks[NC_MAX_VAR_DIMS]; @@ -1668,8 +1689,7 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) if(jvalue != NULL && NCJsort(jvalue) != NCJ_ARRAY) {stat = (THROW(NC_ENCZARR)); goto done;} /* Verify the rank */ - assert (zarr_rank == NCJlength(jvalue)); - if(zvar->scalar) { + if(zvar->scalar || zarr_rank == 0) { if(var->ndims != 0) {stat = (THROW(NC_ENCZARR)); goto done;} zvar->chunkproduct = 1; @@ -1746,37 +1766,47 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) if((stat = NCZ_filter_build(file,var,jfilter,chainindex++))) goto done; } } + /* Suppress variable if there are filters and var is not fixed-size */ + if(varsized && nclistlength((NClist*)var->filters) > 0) + suppress = 1; #endif - if((stat = computedimrefs(file, var, purezarr, xarray, rank, dimnames, shapes, var->dim))) - goto done; - - if(!zvar->scalar) { - /* Extract the dimids */ - for(j=0;jdimids[j] = var->dim[j]->hdr.id; + if(zarr_rank > 0) { + if((stat = computedimrefs(file, var, purezarr, xarray, rank, dimnames, shapes, var->dim))) + goto done; + if(!zvar->scalar) { + /* Extract the dimids */ + for(j=0;jdimids[j] = var->dim[j]->hdr.id; + } } #ifdef ENABLE_NCZARR_FILTERS - /* At this point, we can finalize the filters */ - if((stat = NCZ_filter_setup(var))) goto done; + if(!suppress) { + /* At this point, we can finalize the filters */ + if((stat = NCZ_filter_setup(var))) goto done; + } #endif +suppressvar: + if(suppress) { + /* Reclaim NCZarr variable specific info */ + (void)NCZ_zclose_var1(var); + /* Remove from list of variables and reclaim the top level var object */ + (void)nc4_var_list_del(grp, var); + var = NULL; + } + /* Clean up from last cycle */ - nclistfreeall(dimnames); dimnames = nclistnew(); + nclistfreeall(dimnames); dimnames = NULL; nullfree(varpath); varpath = NULL; nullfree(shapes); shapes = NULL; + nullfree(key); key = NULL; if(formatv1) {NCJreclaim(jncvar); jncvar = NULL;} NCJreclaim(jvar); jvar = NULL; var = NULL; } done: - nullfree(shapes); - nullfree(varpath); - nullfree(key); - nclistfreeall(dimnames); - NCJreclaim(jvar); - if(formatv1) NCJreclaim(jncvar); return ZUNTRACE(THROW(stat)); } diff --git a/nc_test4/tst_bloscfail.sh b/nc_test4/tst_bloscfail.sh index a74a3f63e5..14dfb42d1c 100755 --- a/nc_test4/tst_bloscfail.sh +++ b/nc_test4/tst_bloscfail.sh @@ -60,7 +60,6 @@ ${NCCOPY} -4 -V three_dmn_rec_var -F *,32001,0,0,0,0,1,1,0 ./tmp_bloscx3.nc ./tm # This should fail because shuffle is off if ${NCCOPY} -4 -V three_dmn_rec_var -F *,32001,0,0,0,0,1,0,0 ./tmp_bloscx3.nc ./tmp_bloscx4_fail.nc ; then echo "*** not xfail: nccopy " - exit 1; else echo "*** xfail: nccopy " fi diff --git a/ncdap_test/tst_encode.sh b/ncdap_test/tst_encode.sh index e0de50b77a..54e0a20bed 100755 --- a/ncdap_test/tst_encode.sh +++ b/ncdap_test/tst_encode.sh @@ -8,6 +8,5 @@ set -e echo "" echo "*** Testing #encode=" mechanism -#${NCDUMP} -h 'http://opendap2.oceanbrowser.net/thredds/dodsC/data/emodnet1-domains/tmp%20test.nc?lon[0:8]#encode=none' # raw: http://iridl.ldeo.columbia.edu/SOURCES/.Indices/.soi/.c8110/.anomaly/T/(Jan 1979)/VALUE/dods ${NCDUMP} -h 'http://iridl.ldeo.columbia.edu/SOURCES/.Indices/.soi/.c8110/.anomaly/T/%28Jan%201979%29/VALUE/dods?anomaly[0]' diff --git a/nczarr_test/CMakeLists.txt b/nczarr_test/CMakeLists.txt index 09b013823b..c38b86fe81 100644 --- a/nczarr_test/CMakeLists.txt +++ b/nczarr_test/CMakeLists.txt @@ -193,6 +193,8 @@ IF(ENABLE_TESTS) if(ENABLE_NCZARR_ZIP) add_sh_test(nczarr_test run_newformat) + # Test various corrupted files + ADD_SH_TEST(nczarr_test run_corrupt.sh) endif() IF(FALSE) # Obsolete tests diff --git a/nczarr_test/Makefile.am b/nczarr_test/Makefile.am index a42248da01..d8cb5d4a54 100644 --- a/nczarr_test/Makefile.am +++ b/nczarr_test/Makefile.am @@ -161,6 +161,9 @@ endif # ISMINGW endif #ENABLE_FILTER_TESTING endif #ENABLE_NCZARR_FILTERS +# Test various corrupted files +TESTS += run_corrupt.sh + endif #BUILD_UTILITIES # These programs are used by the test cases @@ -200,7 +203,8 @@ run_purezarr.sh run_interop.sh run_misc.sh \ run_filter.sh \ run_newformat.sh run_nczarr_fill.sh run_quantize.sh \ run_jsonconvention.sh run_nczfilter.sh run_unknown.sh \ -run_scalar.sh run_strings.sh run_nulls.sh run_notzarr.sh run_external.sh run_unlim_io.sh +run_scalar.sh run_strings.sh run_nulls.sh run_notzarr.sh run_external.sh \ +run_unlim_io.sh run_corrupt.sh EXTRA_DIST += \ ref_ut_map_create.cdl ref_ut_map_writedata.cdl ref_ut_map_writemeta2.cdl ref_ut_map_writemeta.cdl \ @@ -228,6 +232,9 @@ ref_nulls_nczarr.baseline ref_nulls_zarr.baseline ref_nulls.cdl ref_notzarr.tar. EXTRA_DIST += ref_power_901_constants_orig.zip ref_power_901_constants.cdl ref_quotes_orig.zip ref_quotes.cdl \ ref_zarr_test_data.cdl.gz ref_zarr_test_data_2d.cdl.gz +# Additional Files +EXTRA_DIST += ref_noshape.file.zip + CLEANFILES = ut_*.txt ut*.cdl tmp*.nc tmp*.cdl tmp*.txt tmp*.dmp tmp*.zip tmp*.nc tmp*.dump tmp*.tmp tmp*.zmap tmp_ngc.c ref_zarr_test_data.cdl tst_*.nc.zip ref_quotes.zip ref_power_901_constants.zip BUILT_SOURCES = test_quantize.c test_filter_vlen.c test_unlim_vars.c test_endians.c \ diff --git a/nczarr_test/ref_noshape.file.zip b/nczarr_test/ref_noshape.file.zip new file mode 100644 index 0000000000..6f4d9c1503 Binary files /dev/null and b/nczarr_test/ref_noshape.file.zip differ diff --git a/nczarr_test/run_corrupt.sh b/nczarr_test/run_corrupt.sh new file mode 100755 index 0000000000..e9ddc67d57 --- /dev/null +++ b/nczarr_test/run_corrupt.sh @@ -0,0 +1,35 @@ +#!/bin/sh + +# Test various kinds of corrupted files + + +if test "x$srcdir" = x ; then srcdir=`pwd`; fi +. ../test_common.sh + +. "$srcdir/test_nczarr.sh" + +set -e + +s3isolate "testdir_corrupt" +THISDIR=`pwd` +cd $ISOPATH + +export NCLOGGING=WARN + +testnoshape1() { + zext=file + unzip ${srcdir}/ref_noshape.file.zip + fileargs ${ISOPATH}/ref_noshape "mode=zarr,$zext" + rm -f tmp_noshape1_${zext}.cdl + ${NCDUMP} $flags $fileurl > tmp_noshape1_${zext}.cdl +} + +testnoshape2() { + # Test against the original issue URL + rm -f tmp_noshape2_gs.cdl + fileurl="https://storage.googleapis.com/cmip6/CMIP6/CMIP/NASA-GISS/GISS-E2-1-G/historical/r1i1p1f1/day/tasmin/gn/v20181015/#mode=zarr,s3&aws.profile=no" + ${NCDUMP} -h $flags $fileurl > tmp_noshape2_gs.cdl +} + +testnoshape1 +if test "x$FEATURE_S3TESTS" = xyes ; then testnoshape2; fi diff --git a/nczarr_test/run_scalar.sh b/nczarr_test/run_scalar.sh index c6de0ebc81..b7c268ee5b 100755 --- a/nczarr_test/run_scalar.sh +++ b/nczarr_test/run_scalar.sh @@ -7,7 +7,7 @@ if test "x$srcdir" = x ; then srcdir=`pwd`; fi set -e -s3isolate "testdir_nczarr" +s3isolate "testdir_scalar" THISDIR=`pwd` cd $ISOPATH @@ -43,7 +43,7 @@ echo "*** create nczarr file" ${NCGEN} -4 -b -o "$nczarrurl" $top_srcdir/nczarr_test/ref_scalar.cdl echo "*** read purezarr" -${NCDUMP} -n ref_scalar $zarrurl > tmp_scalar_zarr0_${zext}.cdl +${NCDUMP} -n ref_scalar $zarrurl > tmp_scalar_zarr_${zext}.cdl ${ZMD} -h $zarrurl > tmp_scalar_zarr_${zext}.txt echo "*** read nczarr" ${NCDUMP} -n ref_scalar $nczarrurl > tmp_scalar_nczarr_${zext}.cdl @@ -53,8 +53,8 @@ echo "*** verify" diff -bw $top_srcdir/nczarr_test/ref_scalar.cdl tmp_scalar_nczarr_${zext}.cdl # Fixup -zarrscalar tmp_scalar_zarr0_${zext}.cdl tmp_scalar_zarr_${zext}.cdl -diff -bw $top_srcdir/nczarr_test/ref_scalar.cdl tmp_scalar_zarr_${zext}.cdl +zarrscalar tmp_scalar_zarr_${zext}.cdl tmp_rescale_zarr_${zext}.cdl +diff -bw $top_srcdir/nczarr_test/ref_scalar.cdl tmp_rescale_zarr_${zext}.cdl } testcase file diff --git a/netCDFConfig.cmake.in b/netCDFConfig.cmake.in index 3146bb7d7c..db7bb823e6 100644 --- a/netCDFConfig.cmake.in +++ b/netCDFConfig.cmake.in @@ -40,6 +40,10 @@ set(netCDF_HAS_DAP4 @HAS_DAP4@) set(netCDF_HAS_DISKLESS @HAS_DISKLESS@) set(netCDF_HAS_MMAP @HAS_MMAP@) set(netCDF_HAS_JNA @HAS_JNA@) +if (netCDF_HAS_HDF4 OR netCDF_HAS_HDF5) + include(CMakeFindDependencyMacro) + find_dependency(HDF5) +endif () if (@HAS_PARALLEL@) include(CMakeFindDependencyMacro) diff --git a/plugins/H5Zmisc.c b/plugins/H5Zmisc.c index 56e64734ff..8813af59d8 100644 --- a/plugins/H5Zmisc.c +++ b/plugins/H5Zmisc.c @@ -113,7 +113,7 @@ H5Z_filter_test(unsigned int flags, size_t cd_nelmts, break; case TC_ODDSIZE: /* Print out the chunk size */ - fprintf(stderr,"nbytes = %lld chunk size = %lld\n",(long long)nbytes,(long long)*buf_size); + fprintf(stderr,">>> nbytes = %lld chunk size = %lld\n",(long long)nbytes,(long long)*buf_size); fflush(stderr); break; default: break; @@ -122,13 +122,15 @@ H5Z_filter_test(unsigned int flags, size_t cd_nelmts, if (flags & H5Z_FLAG_REVERSE) { /* Decompress */ if(testcase == TC_EXPANDED) { +#ifdef DEBUG int i; float* b = (float*)*buf; -fprintf(stderr,"TC_EXPANDED: decompress: nbytes=%u buf_size=%u xdata[0..8]=|",(unsigned)nbytes,(unsigned)*buf_size); +fprintf(stderr,">>> TC_EXPANDED: decompress: nbytes=%u buf_size=%u xdata[0..8]=|",(unsigned)nbytes,(unsigned)*buf_size); for(i=0;i<8;i++) { fprintf(stderr," %u",(int)(b[1024+i])); } fprintf(stderr,"|\n"); +#endif /* Replace buffer */ newbuf = H5allocate_memory(*buf_size,0); if(newbuf == NULL) abort(); @@ -149,8 +151,8 @@ fprintf(stderr,"TC_EXPANDED: decompress: nbytes=%u buf_size=%u xdata[0..8]=|",(u if(testcase == TC_EXPANDED) { int i; float* b; -#if 0 -fprintf(stderr,"TC_EXPANDED: compress: nbytes=%u buf_size=%u size=%u\n",(unsigned)nbytes,(unsigned)*buf_size,(unsigned)size); +#ifdef DEBUG +fprintf(stderr,">>> TC_EXPANDED: compress: nbytes=%u buf_size=%u size=%u\n",(unsigned)nbytes,(unsigned)*buf_size,(unsigned)size); #endif /* Replace buffer with one that is bigger than the input size */ newbuf = H5allocate_memory(size,0); @@ -218,7 +220,7 @@ extract1(void* field, size_t size, const unsigned int* params) llp = (unsigned long long*)field; *llp = u.ll; break; - default: fprintf(stderr,"insert: unexpected size: %u\n",(unsigned)size); abort(); + default: fprintf(stderr,">>> insert: unexpected size: %u\n",(unsigned)size); abort(); } } @@ -247,7 +249,7 @@ paramcheck(size_t nparams, const unsigned int* params, struct All* extracted) memset(&all,0,sizeof(all)); if(nparams != NPARAMS) { - fprintf(stderr,"Incorrect number of parameters: expected=%ld sent=%ld\n",(unsigned long)NPARAMS,(unsigned long)nparams); + fprintf(stderr,">>> Incorrect number of parameters: expected=%ld sent=%ld\n",(unsigned long)NPARAMS,(unsigned long)nparams); goto fail; } @@ -270,7 +272,7 @@ paramcheck(size_t nparams, const unsigned int* params, struct All* extracted) #ifdef DEBUG { size_t i; - fprintf(stderr,"bigendian=%d nparams=%d params=\n",bigendian,nparams); + fprintf(stderr,">>> nparams=%lu params=\n",nparams); for(i=0;i>> mismatch: %s\n",which); fflush(stderr); } diff --git a/test_common.in b/test_common.in index 8be771e19e..5f0f066db8 100644 --- a/test_common.in +++ b/test_common.in @@ -129,6 +129,9 @@ top_builddir="$TOPBUILDDIR" # Currently not used, but left as a Visual Studio placeholder. # VS=Debug +# Set when using gdb +#DL=".libs/" + # srcdir may or may not be defined, but if not, then create it if test "x$srcdir" = x ; then # we need to figure out our directory @@ -169,11 +172,11 @@ fi # We need to locate certain executables (and other things), # capture absolute paths, and make visible -export NCDUMP="${top_builddir}/ncdump${VS}/ncdump${ext}" -export NCCOPY="${top_builddir}/ncdump${VS}/nccopy${ext}" -export NCGEN="${top_builddir}/ncgen${VS}/ncgen${ext}" -export NCGEN3="${top_builddir}/ncgen3${VS}/ncgen3${ext}" -export NCPATHCVT="${top_builddir}/ncdump${VS}/ncpathcvt${ext}" +export NCDUMP="${top_builddir}/ncdump${VS}/${DL}ncdump${ext}" +export NCCOPY="${top_builddir}/ncdump${VS}/${DL}nccopy${ext}" +export NCGEN="${top_builddir}/ncgen${VS}/${DL}ncgen${ext}" +export NCGEN3="${top_builddir}/ncgen3${VS}/${DL}ncgen3${ext}" +export NCPATHCVT="${top_builddir}/ncdump${VS}/${DL}ncpathcvt${ext}" # Temporary hacks (until we have a test_utils directory) # to locate certain specific test files