diff --git a/.github/workflows/run_tests_win_mingw.yml b/.github/workflows/run_tests_win_mingw.yml index 7b8bfef208..f48e81e7fa 100644 --- a/.github/workflows/run_tests_win_mingw.yml +++ b/.github/workflows/run_tests_win_mingw.yml @@ -27,7 +27,7 @@ jobs: with: msystem: MINGW64 update: true - install: git mingw-w64-x86_64-toolchain automake libtool autoconf make cmake mingw-w64-x86_64-hdf5 unzip mingw-w64-x86_64-libxml2 + install: git mingw-w64-x86_64-toolchain automake libtool autoconf make cmake mingw-w64-x86_64-hdf5 unzip mingw-w64-x86_64-libxml2 mingw-w64-x86_64-zlib ### # Configure and build diff --git a/CMakeLists.txt b/CMakeLists.txt index e8d4b30c1f..875c21df56 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -61,17 +61,17 @@ ENDIF() # Define some Platforms if(osname MATCHES "CYGWIN.*") -SET(ISCYGWIN yes) + SET(ISCYGWIN yes) endif() -if(osname MATCHES "Darwin.*") -SET(ISOSX yes) + if(osname MATCHES "Darwin.*") + SET(ISOSX yes) endif() if(MSVC) -SET(ISMSVC yes) + SET(ISMSVC yes) endif() if(osname MATCHES "MINGW.*" OR osname MATCHES "MSYS.*") -SET(ISMINGW yes) -SET(MINGW yes) + SET(ISMINGW yes) + SET(MINGW yes) endif() ### @@ -210,9 +210,6 @@ SET(EXTRA_DEPS "") ENABLE_TESTING() INCLUDE(CTest) -# Copy the CTest customization file into binary directory, as required. -FILE(COPY ${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) - # Set Memory test program for non-MSVC based builds. # Assume valgrind for now. IF((NOT MSVC) AND (NOT MINGW) AND (NOT ISCYGWIN)) @@ -1327,24 +1324,31 @@ ENDIF() IF(ENABLE_S3) IF(NOT ENABLE_S3_INTERNAL) # See if aws-s3-sdk is available - find_package(AWSSDK REQUIRED COMPONENTS s3;core) + find_package(AWSSDK REQUIRED COMPONENTS s3;transfer) IF(AWSSDK_FOUND) - SET(service s3;core) - AWSSDK_DETERMINE_LIBS_TO_LINK(service AWS_LINK_LIBRARIES) SET(ENABLE_S3_AWS ON CACHE BOOL "S3 AWS" FORCE) - ELSE() + INCLUDE_DIRECTORIES(${AWSSDK_INCLUDE_DIR}) + ELSE(AWSSDK_FOUND) SET(ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE) - ENDIF() - ENDIF() + ENDIF(AWSSDK_FOUND) + ELSE(NOT ENABLE_S3_INTERNAL) + # Find crypto libraries required with testing with the internal s3 api. + #FIND_LIBRARY(SSL_LIB NAMES ssl openssl) + find_package(OpenSSL REQUIRED) + IF(NOT OpenSSL_FOUND) + MESSAGE(FATAL_ERROR "Can't find an ssl library, required by S3_INTERNAL") + ENDIF(NOT OpenSSL_FOUND) + + #find_package(Crypto REQUIRED) + #IF(NOT CRYPTO_LIB) + # MESSAGE(FATAL_ERROR "Can't find a crypto library, required by S3_INTERNAL") + #ENDIF(NOT CRYPTO_LIB) + + ENDIF(NOT ENABLE_S3_INTERNAL) ELSE() SET(ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE) ENDIF() -# Unless/until we get aws-sdk-cpp working for Windows, force use of internal -IF(ENABLE_S3 AND MSVC) - SET(ENABLE_S3_INTERNAL ON CACHE BOOL "S3 Intern" FORCE) -ENDIF() - IF(ENABLE_S3) IF(NOT ENABLE_S3_AWS AND NOT ENABLE_S3_INTERNAL) message(FATAL_ERROR "S3 support library not found; please specify option -DENABLE_S3=NO") @@ -1357,10 +1361,10 @@ IF(ENABLE_S3) ENDIF() IF(NOT ENABLE_S3) -IF(WITH_S3_TESTING STREQUAL "PUBLIC" OR WITH_S3_TESTING) - message(WARNING "S3 support is disabled => WITH_S3_TESTING=OFF") - SET(WITH_S3_TESTING OFF CACHE STRING "" FORCE) -ENDIF() + IF(WITH_S3_TESTING STREQUAL "PUBLIC" OR WITH_S3_TESTING) + message(WARNING "S3 support is disabled => WITH_S3_TESTING=OFF") + SET(WITH_S3_TESTING OFF CACHE STRING "" FORCE) + ENDIF() ENDIF() OPTION(ENABLE_LIBXML2 "Link against libxml2 if it is available, use the packaged tinyxml2 parser otherwise." ON) @@ -1791,6 +1795,7 @@ IF(MSVC) CHECK_INCLUDE_FILE("io.h" HAVE_IO_H) ENDIF(MSVC) CHECK_INCLUDE_FILE("stdlib.h" HAVE_STDLIB_H) +CHECK_INCLUDE_FILE("ctype.h" HAVE_CTYPE_H) CHECK_INCLUDE_FILE("stdarg.h" HAVE_STDARG_H) CHECK_INCLUDE_FILE("strings.h" HAVE_STRINGS_H) CHECK_INCLUDE_FILE("signal.h" HAVE_SIGNAL_H) @@ -1891,6 +1896,10 @@ CHECK_TYPE_SIZE("uintptr_t" SIZEOF_UINTPTR_T) IF(SIZEOF_UINTPTR_T) SET(HAVE_UINTPTR_T TRUE) ENDIF(SIZEOF_UINTPTR_T) +CHECK_TYPE_SIZE("mode_t" SIZEOF_MODE_T) +IF(SIZEOF_MODE_T) + SET(HAVE_MODE_T TRUE) +ENDIF(SIZEOF_MODE_T) # __int64 is used on Windows for large file support. CHECK_TYPE_SIZE("__int64" SIZEOF___INT_64) @@ -1953,16 +1962,16 @@ ENDIF() # Check to see if MAP_ANONYMOUS is defined. IF(MSVC) -MESSAGE(WARNING "mmap not supported under visual studio: disabling MMAP support.") -SET(ENABLE_MMAP OFF) -ELSE() -CHECK_C_SOURCE_COMPILES(" -#include -int main() {int x = MAP_ANONYMOUS;}" HAVE_MAPANON) -IF(NOT HAVE_MMAP OR NOT HAVE_MAPANON) - MESSAGE(WARNING "mmap or MAP_ANONYMOUS not found: disabling MMAP support.") + MESSAGE(WARNING "mmap not supported under visual studio: disabling MMAP support.") SET(ENABLE_MMAP OFF) -ENDIF() +ELSE() + CHECK_C_SOURCE_COMPILES(" + #include + int main() {int x = MAP_ANONYMOUS;}" HAVE_MAPANON) + IF(NOT HAVE_MMAP OR NOT HAVE_MAPANON) + MESSAGE(WARNING "mmap or MAP_ANONYMOUS not found: disabling MMAP support.") + SET(ENABLE_MMAP OFF) + ENDIF() ENDIF() IF(ENABLE_MMAP) @@ -2136,6 +2145,7 @@ MACRO(build_bin_test_no_prefix F) ENDIF() ENDMACRO() +# Build a test and add it to the test list. MACRO(add_bin_test prefix F) ADD_EXECUTABLE(${prefix}_${F} ${F}.c ${ARGN}) TARGET_LINK_LIBRARIES(${prefix}_${F} @@ -2541,7 +2551,9 @@ SET(host_vendor "${osname}") SET(host_os "${osrel}") SET(abs_top_builddir "${CMAKE_CURRENT_BINARY_DIR}") SET(abs_top_srcdir "${CMAKE_CURRENT_SOURCE_DIR}") - +STRING(RANDOM LENGTH 3 ALPHABET "0123456789" PLATFORMUID) +MATH(EXPR PLATFORMUID "${PLATFORMUID} + 1" OUTPUT_FORMAT DECIMAL) + SET(CC_VERSION "${CMAKE_C_COMPILER}") # Build *FLAGS for libnetcdf.settings. @@ -2589,26 +2601,43 @@ is_enabled(HAVE_BZ2 HAS_BZ2) is_enabled(ENABLE_REMOTE_FUNCTIONALITY DO_REMOTE_FUNCTIONALITY) if(ENABLE_S3_INTERNAL) -SET(WHICH_S3_SDK "internal") -SET(NC_WHICH_S3_SDK "internal") + SET(WHICH_S3_SDK "internal") + SET(NC_WHICH_S3_SDK "internal") elseif(ENABLE_S3_AWS) -SET(WHICH_S3_SDK "aws-sdk-cpp") -SET(NC_WHICH_S3_SDK "aws-sdk-cpp") + SET(WHICH_S3_SDK "aws-sdk-cpp") + SET(NC_WHICH_S3_SDK "aws-sdk-cpp") else() -SET(WHICH_S3_SDK "none") -SET(NC_WHICH_S3_SDK "none") + SET(WHICH_S3_SDK "none") + SET(NC_WHICH_S3_SDK "none") endif() if(WITH_S3_TESTING STREQUAL PUBLIC) -SET(DO_S3_TESTING "public") +SET(ENABLE_S3_TESTING "public") elseif(WITH_S3_TESTING) -SET(DO_S3_TESTING "yes") +SET(ENABLE_S3_TESTING "yes") +SET(ENABLE_S3_TESTALL "yes") elseif(NOT WITH_S3_TESTING) -SET(DO_S3_TESTING "no") +SET(ENABLE_S3_TESTING "no") else() -SET(DO_S3_TESTING "no") +SET(ENABLE_S3_TESTING "no") +endif() + +# The Unidata testing S3 bucket +# WARNING: this must match the value in configure.ac +SET(S3TESTBUCKET "unidata-zarr-test-data" CACHE STRING "S3 test bucket") + +# The working S3 path tree within the Unidata bucket. +# WARNING: this must match the value in configure.ac +SET(S3TESTSUBTREE "netcdf-c" CACHE STRING "Working S3 path.") +# Build a unique id based on the date +string(TIMESTAMP TESTUID "%s") +if(ENABLE_S3_TESTING) +FILE(APPEND "${CMAKE_CURRENT_BINARY_DIR}/s3cleanup_${PLATFORMUID}.uids" "${TESTUID}\n") endif() +# Copy the CTest customization file into binary directory, as required. +CONFIGURE_FILE("${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/CTestCustom.cmake") + # Generate file from template. CONFIGURE_FILE("${CMAKE_CURRENT_SOURCE_DIR}/libnetcdf.settings.in" "${CMAKE_CURRENT_BINARY_DIR}/libnetcdf.settings" @@ -2653,6 +2682,15 @@ SET(TOPSRCDIR "${CMAKE_CURRENT_SOURCE_DIR}") SET(TOPBUILDDIR "${CMAKE_CURRENT_BINARY_DIR}") configure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_common.in ${CMAKE_CURRENT_BINARY_DIR}/test_common.sh @ONLY NEWLINE_STYLE LF) +#### +# Build s3cleanup.sh and s3gc.sh +##### +SET(EXTRA_DIST ${EXTRA_DIST} ${CMAKE_CURRENT_SOURCE_DIR}/s3cleanup.in ${CMAKE_CURRENT_SOURCE_DIR}/s3gc.in) +SET(TOPSRCDIR "${CMAKE_CURRENT_SOURCE_DIR}") +SET(TOPBUILDDIR "${CMAKE_CURRENT_BINARY_DIR}") +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/s3cleanup.in ${CMAKE_CURRENT_BINARY_DIR}/s3cleanup.sh @ONLY NEWLINE_STYLE LF) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/s3gc.in ${CMAKE_CURRENT_BINARY_DIR}/s3gc.sh @ONLY NEWLINE_STYLE LF) + ##### # Build and copy nc_test4/findplugin.sh to various places ##### @@ -2679,6 +2717,13 @@ IF(ENABLE_TESTS) # Build dap4_test/pingurl4.c ##### configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ncdap_test/pingurl.c ${CMAKE_CURRENT_BINARY_DIR}/dap4_test/pingurl4.c @ONLY NEWLINE_STYLE LF) + + ##### + # Build CTestCustom.cmake to cleanup S3 after tests are done. + ##### + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/CTestCustom.cmake NEWLINE_STYLE LF) + + ENDIF() if(DEFINED ENV{LIB_FUZZING_ENGINE}) diff --git a/CTestCustom.cmake b/CTestCustom.cmake.in similarity index 74% rename from CTestCustom.cmake rename to CTestCustom.cmake.in index 7483a775f0..3e3700be0e 100644 --- a/CTestCustom.cmake +++ b/CTestCustom.cmake.in @@ -12,3 +12,10 @@ ## for more information. set(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_WARNINGS "500") + +IF(HAVE_BASH) +IF(ENABLE_S3_TESTING) +# Assume run in top-level CMAKE_BINARY_DIR +set(CTEST_CUSTOM_POST_TEST "bash ${CMAKE_BINARY_DIR}/s3cleanup.sh") +ENDIF() +ENDIF() diff --git a/Makefile.am b/Makefile.am index b72b9eee1b..64648d6180 100644 --- a/Makefile.am +++ b/Makefile.am @@ -31,7 +31,7 @@ AM_DISTCHECK_CONFIGURE_FLAGS = --without-plugin-dir EXTRA_DIST = README.md COPYRIGHT INSTALL.md test_prog.c lib_flags.am \ cmake CMakeLists.txt COMPILE.cmake.txt config.h.cmake.in \ cmake_uninstall.cmake.in FixBundle.cmake.in nc-config.cmake.in \ -RELEASE_NOTES.md CTestCustom.cmake CTestConfig.cmake.in \ +RELEASE_NOTES.md CTestConfig.cmake.in CTestCustom.cmake.in \ libnetcdf.settings.in netCDFConfig.cmake.in CMakeInstallation.cmake \ test-driver-verbose test_common.in fuzz @@ -116,10 +116,8 @@ endif # Optionally build test plugins if ENABLE_PLUGINS -if ENABLE_FILTER_TESTING PLUGIN_DIR = plugins endif -endif # If benchmarks were turned on, build and run a bunch more tests. if BUILD_BENCHMARKS @@ -209,7 +207,17 @@ install-data-hook: # Create the VERSION file after the build # in case it is being used by packagers +# Also track the S3 cleanup id all-local: liblib/libnetcdf.la echo ${PACKAGE_VERSION} > VERSION +if ENABLE_S3_TESTALL + echo "@TESTUID@" >> ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids +endif + +if ENABLE_S3_TESTALL +distclean-local: + rm -f ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids +endif + # Remove the VERSION file CLEANFILES = VERSION diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 7c4e85cea9..c92394242d 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -7,6 +7,8 @@ This file contains a high-level description of this package's evolution. Release ## 4.9.3 - TBD +* Mitigate the problem of test interference. See [Github #2755](https://github.com/Unidata/netcdf-c/pull/2755). +* Extend NCZarr to support unlimited dimensions. See [Github #2755](https://github.com/Unidata/netcdf-c/pull/2755). * Fix significant bug in the NCZarr cache management. See [Github #2737](https://github.com/Unidata/netcdf-c/pull/2737). * Fix default parameters for caching of NCZarr. See [Github #2734](https://github.com/Unidata/netcdf-c/pull/2734). * Introducing configure-time options to disable various filters, even if the required libraries are available on the system, in support of [GitHub #2712](https://github.com/Unidata/netcdf-c/pull/2712). diff --git a/config.h.cmake.in b/config.h.cmake.in index cfdadfbe71..04d876ff17 100644 --- a/config.h.cmake.in +++ b/config.h.cmake.in @@ -163,6 +163,12 @@ are set when opening a binary file on Windows. */ /* if true, enable S3 testing*/ #cmakedefine WITH_S3_TESTING "PUBLIC" +/* S3 Test Bucket */ +#define S3TESTBUCKET "${S3TESTBUCKET}" + +/* S3 Working subtree path prefix*/ +#define S3TESTSUBTREE "${S3TESTSUBTREE}" + /* if true, run extra tests which may not work yet */ #cmakedefine EXTRA_TESTS 1 @@ -337,6 +343,9 @@ are set when opening a binary file on Windows. */ /* Define to 1 if you have the `snprintf' function. */ #cmakedefine HAVE_SNPRINTF 1 +/* Define to 1 if the system has the type `mode_t'. */ +#cmakedefine HAVE_MODE_T 1 + /* Define to 1 if the system has the type `ssize_t'. */ #cmakedefine HAVE_SSIZE_T 1 @@ -358,6 +367,9 @@ are set when opening a binary file on Windows. */ /* Define to 1 if you have the header file. */ #cmakedefine HAVE_STDLIB_H 1 +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_CTYPE_H 1 + /* Define to 1 if you have the header file. */ #cmakedefine HAVE_STRINGS_H 1 diff --git a/configure.ac b/configure.ac index 7a67b53d6c..19386fe141 100644 --- a/configure.ac +++ b/configure.ac @@ -903,14 +903,18 @@ unset enable_nczarr_s3 # Note we check for the library after checking for enable_s3 # because for some reason this fails if we unconditionally test for sdk # and it is not available. Fix someday +S3LIBS="" if test "x$enable_s3" = xyes ; then # See if we have the s3 aws library # Check for the AWS S3 SDK library -AC_LANG_PUSH([C++]) -AC_SEARCH_LIBS([aws_allocator_is_valid],[aws-c-common aws-cpp-sdk-s3 aws-cpp-sdk-core], [enable_s3_aws=yes],[enable_s3_aws=no]) -AC_LANG_POP + AC_LANG_PUSH([C++]) + AC_CHECK_LIB([aws-c-common], [aws_string_destroy], [enable_s3_aws=yes],[enable_s3_aws=no]) + if test "x$enable_s3_aws" = "xyes" ; then + S3LIBS="-laws-cpp-sdk-core -laws-cpp-sdk-s3" + fi + AC_LANG_POP else -enable_s3_aws=no + enable_s3_aws=no fi AC_MSG_CHECKING([whether AWS S3 SDK library is available]) @@ -926,7 +930,7 @@ AC_MSG_RESULT($enable_s3_internal) if test "x$enable_s3_aws" = xno && test "x$enable_s3_internal" = xno ; then AC_MSG_WARN([No S3 library available => S3 support disabled]) -enable_S3=no +enable_s3=no fi if test "x$enable_s3_aws" = xyes && test "x$enable_s3_internal" = xyes ; then @@ -963,17 +967,19 @@ if test "x$enable_s3" = xyes ; then fi if test "x$enable_s3_aws" = xyes ; then -AC_DEFINE([ENABLE_S3_AWS], [1], [If true, then use aws S3 library]) + LIBS="$LIBS$S3LIBS" + AC_DEFINE([ENABLE_S3_AWS], [1], [If true, then use aws S3 library]) fi if test "x$enable_s3_internal" = xyes ; then -AC_DEFINE([ENABLE_S3_INTERNAL], [1], [If true, then use internal S3 library]) + AC_DEFINE([ENABLE_S3_INTERNAL], [1], [If true, then use internal S3 library]) fi AC_DEFINE_UNQUOTED([WITH_S3_TESTING], [$with_s3_testing], [control S3 testing.]) if test "x$with_s3_testing" = xyes ; then AC_MSG_WARN([*** DO NOT SPECIFY WITH_S3_TESTING=YES UNLESS YOU HAVE ACCESS TO THE UNIDATA S3 BUCKET! ***]) + AC_DEFINE([ENABLE_S3_TESTALL], [yes], [control S3 testing.]) fi # Check whether we want to enable strict null byte header padding. @@ -1453,6 +1459,8 @@ AC_TYPE_UINTPTR_T AC_C_CHAR_UNSIGNED AC_C_BIGENDIAN +AC_CHECK_TYPES([mode_t]) + AM_CONDITIONAL(ISCYGWIN, [test "x$ISCYGWIN" = xyes]) AM_CONDITIONAL(ISMSVC, [test "x$ISMSVC" = xyes]) AM_CONDITIONAL(ISOSX, [test "x$ISOSX" = xyes]) @@ -2033,7 +2041,7 @@ AC_SUBST(HAS_S3_AWS,[$enable_s3_aws]) AC_SUBST(HAS_S3_INTERNAL,[$enable_s3_internal]) AC_SUBST(HAS_HDF5_ROS3,[$has_hdf5_ros3]) AC_SUBST(HAS_NCZARR,[$enable_nczarr]) -AC_SUBST(DO_S3_TESTING,[$with_s3_testing]) +AC_SUBST(ENABLE_S3_TESTING,[$with_s3_testing]) AC_SUBST(HAS_NCZARR_ZIP,[$enable_nczarr_zip]) AC_SUBST(DO_NCZARR_ZIP_TESTS,[$enable_nczarr_zip]) AC_SUBST(HAS_QUANTIZE,[$enable_quantize]) @@ -2056,6 +2064,26 @@ if test "x$enable_s3_aws" = xno && test "x$enable_s3_internal" = xno; then AC_SUBST(WHICH_S3_SDK,[none]) fi +# The Unidata testing S3 bucket +# WARNING: this must match the value in CMakeLists.txt +AC_DEFINE([S3TESTBUCKET], ["unidata-zarr-test-data"], [S3 test bucket]) +AC_SUBST([S3TESTBUCKET],["unidata-zarr-test-data"]) + +# The working S3 path tree within the Unidata bucket. +# WARNING: this must match the value in CMakeLists.txt +AC_DEFINE([S3TESTSUBTREE], ["netcdf-c"], [S3 test path prefix]) +AC_SUBST([S3TESTSUBTREE],[netcdf-c]) + +# Build a small unique id to avoid interference on same platform +PLATFORMUID="$RANDOM" +# Make sure uid > 0 +PLATFORMUID=$((PLATFORMUID % 1000 + 1)) +# Build a unique id based on the date +TESTUID=`date +%s` +AC_DEFINE_UNQUOTED([TESTUID], [${TESTUID}], [S3 working path]) +AC_SUBST([TESTUID],${TESTUID}) +AC_SUBST([PLATFORMUID],${PLATFORMUID}) + # Always available std_filters="bz2" if test "x$have_deflate" = xyes ; then @@ -2201,18 +2229,21 @@ AC_DEFINE_UNQUOTED([NC_DISPATCH_VERSION], [${NC_DISPATCH_VERSION}], [Dispatch ta # This would be true for a cmake build. AC_SUBST([ISCMAKE], []) -# Provide conditional to temporarily suppress tests and such -AM_CONDITIONAL([AX_IGNORE], [test xno = xyes]) +# Provide true/false conditionals to temporarily suppress tests and such +AM_CONDITIONAL([AX_DISABLE], [test xno = xyes]) +AM_CONDITIONAL([AX_ENABLE], [test xyes = xyes]) # Provide conditional to identify tests that must be run manually AM_CONDITIONAL([AX_MANUAL], [test xno = xyes]) AC_MSG_NOTICE([generating header files and makefiles]) AC_CONFIG_FILES(test_common.sh:test_common.in) -AC_CONFIG_FILES(nc_test4/findplugin.sh:nc_test4/findplugin.in) -AC_CONFIG_FILES(nczarr_test/findplugin.sh:nc_test4/findplugin.in) -AC_CONFIG_FILES(plugins/findplugin.sh:nc_test4/findplugin.in) -AC_CONFIG_FILES(examples/C/findplugin.sh:nc_test4/findplugin.in) -AC_CONFIG_FILES(ncdap_test/findtestserver.c:ncdap_test/findtestserver.c.in) +AC_CONFIG_FILES(s3cleanup.sh:s3cleanup.in, [chmod ugo+x s3cleanup.sh]) +AC_CONFIG_FILES(s3gc.sh:s3gc.in, [chmod ugo+x s3gc.sh]) +AC_CONFIG_FILES(nc_test4/findplugin.sh:nc_test4/findplugin.in, [chmod ugo+x nc_test4/findplugin.sh]) +AC_CONFIG_FILES(nczarr_test/findplugin.sh:nc_test4/findplugin.in, [chmod ugo+x nczarr_test/findplugin.sh]) +AC_CONFIG_FILES(plugins/findplugin.sh:nc_test4/findplugin.in, [chmod ugo+x plugins/findplugin.sh]) +AC_CONFIG_FILES(examples/C/findplugin.sh:nc_test4/findplugin.in, [chmod ugo+x examples/C/findplugin.sh]) +AC_CONFIG_FILES(ncdap_test/findtestserver.c:ncdap_test/findtestserver.c.in, [chmod ugo+x ncdap_test/findtestserver.c]) AC_CONFIG_FILES([nc_test/run_pnetcdf_tests.sh:nc_test/run_pnetcdf_tests.sh.in],[chmod ugo+x nc_test/run_pnetcdf_tests.sh]) AC_CONFIG_FILES(dap4_test/findtestserver4.c:ncdap_test/findtestserver.c.in) AC_CONFIG_FILES(dap4_test/pingurl4.c:ncdap_test/pingurl.c) @@ -2269,6 +2300,7 @@ AC_CONFIG_FILES([Makefile ncdap_test/testdata3/Makefile ncdap_test/expected3/Makefile ncdap_test/expectremote3/Makefile + ncdap_test/expectedhyrax/Makefile dap4_test/Makefile plugins/Makefile nczarr_test/Makefile diff --git a/dap4_test/test_data.c b/dap4_test/test_data.c index 5d89087e17..08c939021b 100644 --- a/dap4_test/test_data.c +++ b/dap4_test/test_data.c @@ -21,7 +21,7 @@ Test the netcdf-4 data building process. #endif #endif -#define DEBUG +#undef DEBUG static struct Options { char* file; diff --git a/docs/cloud.md b/docs/cloud.md index 69e8f50a29..23e6a0ae36 100644 --- a/docs/cloud.md +++ b/docs/cloud.md @@ -82,19 +82,19 @@ Currently the following build cases are known to work. Linux CMake aws-s3-sdk yes Linux CMake nch5s3comms yes OSX Automake aws-s3-sdk unknown -OSX Automake nch5s3comms unknown +OSX Automake nch5s3comms yes OSX CMake aws-s3-sdk unknown -OSX CMake nch5s3comms unknown -Visual Studio CMake aws-s3-sdk no (tests-fail) +OSX CMake nch5s3comms yes +Visual Studio CMake aws-s3-sdk no (tests fail) Visual Studio CMake nch5s3comms yes -Cygwin Automake aws-s3-sdk unknown +Cygwin Automake aws-s3-sdk no (tests fail) Cygwin Automake nch5s3comms yes -Cygwin CMake aws-s3-sdk unknown -Cygwin CMake nch5s3comms unknown +Cygwin CMake aws-s3-sdk no +Cygwin CMake nch5s3comms yes Mingw Automake aws-s3-sdk unknown -Mingw Automake nch5s3comms unknown +Mingw Automake nch5s3comms yes Mingw CMake aws-s3-sdk unknown -Mingw CMake nch5s3comms unknown +Mingw CMake nch5s3comms yes ## Automake @@ -163,7 +163,7 @@ This library, [aws-sdk-cpp library](https://github.com/aws/aws-sdk-cpp.git), has a number of properties of interest: * It is written in C++ * It is available on [GitHub](https://github.com/aws/aws-sdk-cpp.git), -* It uses CMake + ninja as its primary build system. +* It uses CMake as its primary build system. ### *\*nix\** Build @@ -179,18 +179,17 @@ pushd aws-sdk-cpp mkdir build cd build PREFIX=/usr/local -FLAGS="-DCMAKE_INSTALL_PREFIX=${PREFIX} \ - -DCMAKE_INSTALL_LIBDIR=lib \ - -DCMAKE_MODULE_PATH=${PREFIX}/lib/cmake \ +FLAGS="-DCMAKE_INSTALL_PREFIX=/usr/local \ -DCMAKE_POLICY_DEFAULT_CMP0075=NEW \ - -DBUILD_ONLY=s3 \ + -DBUILD_ONLY=s3;transfer \ -DENABLE_UNITY_BUILD=ON \ - -DENABLE_TESTING=OFF \ - -DCMAKE_BUILD_TYPE=$CFG \ - -DSIMPLE_INSTALL=ON" -cmake -GNinja $FLAGS .. -ninja all -ninja install + -DCMAKE_BUILD_TYPE=Release \ + -DSIMPLE_INSTALL=ON \ + -DENABLE_TESTING=OFF + +cmake $FLAGS .. +cmake --build . --config Release +sudo cmake --install . --config Release cd .. popd ```` diff --git a/docs/internal.md b/docs/internal.md index fc6f06618e..2ddf3fb74b 100644 --- a/docs/internal.md +++ b/docs/internal.md @@ -12,6 +12,7 @@ It covers the following issues. * [Managing instances of variable-length data types](#intern_vlens) * [Inferring File Types](#intern_infer) * [Adding a Standard Filter](#intern_filters) +* [Test Interference](#intern_isolation) # 1. Including C++ Code in the netcdf-c Library {#intern_c++} @@ -647,9 +648,124 @@ done: #endif /*HAVE_ZSTD*/ ```` +# 5. Test Interference {#intern_isolation} + +At some point, Unidata switched from running tests serially to running tests in parallel. +It soon became apparent that there were resources shared between tests and that parallel +execution sometimes caused interference between tests. + +In order to fix the inter-test interference, several approaches were used. +1. Renaming resources (primarily files) so that tests would create difference test files. +2. Telling the test system that there were explicit dependencies between tests so that they would not be run in parallel. +3. Isolating test resources by creating independent directories for each test. + +## Test Isolation +The isolation mechanism is currently used mostly in nczarr_tests. +It requires that tests are all executed inside a shell script. +When the script starts, it invokes a shell function called "isolate". +This function looks in current directory for a directory called "testset_\". +If "testset_\ is not found then it creates it. +This directory is then used to isolate all test output. + +After calling "isolate", the script enters the "testset_\" +directory. Then each actual test creates a directory in which to +store any file resources that it creates during execution. +Suppose, for example, that the shell script is called "run_XXXX.sh". +The isolate function creates a directory with the general name "testset_\". +Then the run_XXX.sh script creates a directory "testset_\/testdir_XXX", +enters it and runs the test. +During cleanup, specifically "make clean", all the testset_\ directories are deleted. + +The "\" is a unique identifier created using the "date +%s" command. It returns an integer +representing the number of seconds since the start of the so-called "epoch" basically +"00:00:00 UTC, 1 January 1970". Using a date makes it easier to detect and reclaim obsolete +testset directories. + +## Cloud Test Isolation + +When testing against the cloud (currently Amazon S3), the interference +problem is intensified. +This is because the current cloud testing uses a single S3 bucket, +which means that not only is there inter-test interference, but there +is also potential interference across builds. +This means, for example, that testing by github actions could +interfere with local testing by individual users. +This problem is difficult to solve, but a mostly complete solution has been implemented +possible with cmake, but not (as yet) possible with automake. + +In any case, there is a shell function called s3isolate in nczarr_test/test_nczarr.sh that operates on cloud resources in a way that is similar to the isolate function. +The s3isolate does several things: +1. It invokes isolate to ensure local isolation. +2. It creates a path prefix relative to the Unidata S3 bucket that has the name "testset_\", where this name + is the same as the one created by the isolate function. +3. It appends the uid to a file called s3cleanup_\.uids. This file may accumulate several uids indicating + the keys that need to be cleaned up. The pid is a separate small unique id to avoid s3cleanup interference. + +The test script then ensures that any cloud resources are created as extensions of the path prefix. + +Cleanup of S3 resources is complex. +In configure.ac or the top-level CMakeList.txt files, the path "netcdf-c/testset_\" +is created and via configuration commands, is propagated to various Makefile.am +and specific script files. + +The actual cleanup requires different approaches for cmake and for automake. +In cmake, the CTestCustom.cmake mechanism is used and contains the following command: +```` + IF(ENABLE_S3_TESTING) + # Assume run in top-level CMAKE_BINARY_DIR + set(CTEST_CUSTOM_POST_TEST "bash -x ${CMAKE_BINARY_DIR}/s3cleanup.sh") + ENDIF() +```` + +In automake, the "check-local" extension mechanism is used +because it is invoked after all tests are run in the nczarr_test +directory. So nczarr_test/Makefile.am contains the following +equivalent code: +```` + if ENABLE_S3_TESTALL + check-local: + bash -x ${top_srcdir}/s3cleanup.sh + endif +```` + +### s3cleanup.sh +This script is created by configuring the base file s3cleanup.in. +It is unfortunately complex, but roughly it does the following. +1. It computes a list of all the keys for all objects in the Unidata bucket and stores them in a file + named "s3cleanup_\.keys". +2. Get a list of date based uids created above. +3. Iterate over the keys and the uids to collect the set of keys matching any one of the uids. +4. Divide the keys into sets of 500. This is because the delete-objects command will not accept more than 1000 keys at a time. +5. Convert each set of 500 keys from step 4 into a properly formatted JSON file suitable for use by the "aws delete-objects" command. +This file is called "s3cleanup_\.json". +6. Use the "aws delete-objects" command to delete the keys. +7. Repeat steps 5 and 6 for each set of 500 keys. + +The pid is a small random number to avoid local interference. +It is important to note that this script assumes that the +AWS command line package is installed. +This can be installed, for example, using this command: +````apt install awscli````. + +### s3gc.sh +This script is created by configuring the base file s3gc.in. +It is intended as a way for users to manually cleanup the S3 Unidata bucket. +It takes a single argument, delta, that is the number of days before the present day +and computes a stop date corresponding to "present_day - delta". +All keys for all uids on or before the stop date. + +It operates as follows: +1. Get a list of date based uids created above. +2. Iterate over the keys and collect all that are on or before the stop date. +3. Divide the keys into sets of 500. This is in recognition of the 1000 key limit mentioned previously. +4. Convert each set of 500 keys from step 3 into a properly formatted JSON file suitable for use by the "aws delete-objects" command. +This file is called "s3cleanup_\.json". +5. Use the "aws delete-objects" command to delete the keys. +6. Repeat steps 4 and 5 for each set of 500 keys. + # Point of Contact {#intern_poc} *Author*: Dennis Heimbigner
*Email*: dmh at ucar dot edu
*Initial Version*: 12/22/2021
-*Last Revised*: 5/16/2023 +*Last Revised*: 9/16/2023 diff --git a/docs/nczarr.md b/docs/nczarr.md index 5c92dcf89e..4c2a5595f1 100644 --- a/docs/nczarr.md +++ b/docs/nczarr.md @@ -53,6 +53,7 @@ are also [supported](./md_filters.html "filters"). Specifically, the model supports the following. - "Atomic" types: char, byte, ubyte, short, ushort, int, uint, int64, uint64, string. - Shared (named) dimensions +- Unlimited dimensions - Attributes with specified types -- both global and per-variable - Chunking - Fill values @@ -65,7 +66,6 @@ Specifically, the model supports the following. With respect to full netCDF-4, the following concepts are currently unsupported. - User-defined types (enum, opaque, VLEN, and Compound) -- Unlimited dimensions - Contiguous or compact storage Note that contiguous and compact are not actually supported @@ -375,7 +375,7 @@ Currently it contains the following key(s): _\_nczarr_group\__ -- this key appears in every _.zgroup_ object. It contains any netcdf specific group information. Specifically it contains the following keys: -* "dims" -- the name and size of shared dimensions defined in this group. +* "dims" -- the name and size of shared dimensions defined in this group, as well an optional flag indictating if the dimension is UNLIMITED. * "vars" -- the name of variables defined in this group. * "groups" -- the name of sub-groups defined in this group. These lists allow walking the NCZarr dataset without having to use the potentially costly search operation. @@ -487,9 +487,9 @@ A separate tabulation of S3 support is in the document cloud.md. Linux Automake yes Linux CMake yes Cygwin Automake yes -Cygwin CMake unknown -OSX Automake unknown -OSX CMake unknown +Cygwin CMake yes +OSX Automake yes +OSX CMake yes Visual Studio CMake yes diff --git a/include/ncconfigure.h b/include/ncconfigure.h index 3dccfe9eab..cad59c9e21 100644 --- a/include/ncconfigure.h +++ b/include/ncconfigure.h @@ -33,11 +33,22 @@ defined and missing types defined. */ #ifdef _WIN32 + #ifndef HAVE_SSIZE_T #include typedef SSIZE_T ssize_t; #define HAVE_SSIZE_T 1 #endif + +#ifndef HAVE_MODE_T +typedef int mode_t; +#define HAVE_MODE_T 1 +#endif + +#ifndef F_OK +#define F_OK 00 +#endif + #endif /*Warning: Cygwin with -ansi does not define these functions diff --git a/include/ncjson.h b/include/ncjson.h index 32b050f06c..bbd7707c06 100644 --- a/include/ncjson.h +++ b/include/ncjson.h @@ -44,8 +44,6 @@ and do the command: #define NCJ_NSORTS 8 -/* No flags are currently defined, but the argument is a placeholder */ - /* Define a struct to store primitive values as unquoted strings. The sort will provide more info. Do not bother with a union since the amount of saved space is minimal. diff --git a/include/nclog.h b/include/nclog.h index e7146c162a..51e3f12055 100644 --- a/include/nclog.h +++ b/include/nclog.h @@ -15,11 +15,13 @@ #define NCENVLOGGING "NCLOGGING" #define NCENVTRACING "NCTRACING" +/* Log level: linear order */ /* Suggested tag values */ -#define NCLOGNOTE 0 -#define NCLOGWARN 1 -#define NCLOGERR 2 -#define NCLOGDBG 3 +#define NCLOGOFF (0) /* Stop Logging */ +#define NCLOGERR (1) /* Errors */ +#define NCLOGWARN (2) /* Warnings */ +#define NCLOGNOTE (3) /* General info */ +#define NCLOGDEBUG (4) /* Everything */ /* Support ptr valued arguments that are used to store results */ #define PTRVAL(t,p,d) ((t)((p) == NULL ? (d) : *(p))) @@ -29,12 +31,12 @@ extern "C" { #endif EXTERNL void ncloginit(void); -EXTERNL int ncsetlogging(int tf); +EXTERNL int ncsetloglevel(int level); EXTERNL int nclogopen(FILE* stream); /* The tag value is an arbitrary integer */ EXTERNL void nclog(int tag, const char* fmt, ...); -EXTERNL int ncvlog(int tag, const char* fmt, va_list ap); +EXTERNL void ncvlog(int tag, const char* fmt, va_list ap); EXTERNL void nclogtext(int tag, const char* text); EXTERNL void nclogtextn(int tag, const char* text, size_t count); @@ -49,7 +51,8 @@ EXTERNL int ncbreakpoint(int err); /* Debug support */ #if defined(NCCATCH) -#define NCTHROW(e) ((e) == NC_NOERR ? (e) : ncthrow(e,__FILE__,__LINE__)) +/* Warning: do not evaluate e more than once */ +#define NCTHROW(e) ncthrow(e,__FILE__,__LINE__) #else #define NCTHROW(e) (e) #endif diff --git a/include/ncs3sdk.h b/include/ncs3sdk.h index 2ca05754e9..c06f0e39f1 100644 --- a/include/ncs3sdk.h +++ b/include/ncs3sdk.h @@ -6,6 +6,7 @@ #ifndef NCS3SDK_H #define NCS3SDK_H 1 + typedef struct NCS3INFO { char* host; /* non-null if other*/ char* region; /* region */ @@ -35,7 +36,7 @@ EXTERNL const char* NC_s3dumps3info(NCS3INFO* info); /* From ds3util.c */ EXTERNL int NC_getdefaults3region(NCURI* uri, const char** regionp); -EXTERNL int NC_s3urlprocess(NCURI* url, NCS3INFO* s3); +EXTERNL int NC_s3urlprocess(NCURI* url, NCS3INFO* s3, NCURI** newurlp); EXTERNL int NC_s3clear(NCS3INFO* s3); EXTERNL int NC_s3clone(NCS3INFO* s3, NCS3INFO** news3p); diff --git a/include/netcdf_json.h b/include/netcdf_json.h index e063319d5a..46657548a6 100644 --- a/include/netcdf_json.h +++ b/include/netcdf_json.h @@ -173,8 +173,9 @@ and do the command: #undef NCJDEBUG #ifdef NCJDEBUG +/* Warning: do not evaluate err more than once */ +#define NCJTHROW(err) ncjbreakpoint(err) static int ncjbreakpoint(int err) {return err;} -#define NCJTHROW(err) ((err)==NCJ_ERR?ncjbreakpoint(err):(err)) #else #define NCJTHROW(err) (err) #endif diff --git a/lib_flags.am b/lib_flags.am index c7ce5bfbd6..a54e32c5e0 100644 --- a/lib_flags.am +++ b/lib_flags.am @@ -6,14 +6,13 @@ # libraries for netCDF-4. # -AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include +AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include -I$(top_builddir) -I$(top_srcdir) AM_LDFLAGS = if USE_DAP AM_CPPFLAGS += -I${top_srcdir}/oc2 endif - if ENABLE_NCZARR AM_CPPFLAGS += -I${top_srcdir}/libnczarr endif diff --git a/libdap2/cache.c b/libdap2/cache.c index 1ae4f5a4a2..fb6afdfa7f 100644 --- a/libdap2/cache.c +++ b/libdap2/cache.c @@ -130,7 +130,7 @@ prefetchdata(NCDAPCOMMON* nccomm) /* Should be prefetchable */ nclistpush(vars,(void*)var); if(SHOWFETCH) { -nclog(NCLOGDBG,"prefetch: %s",var->ncfullname); +nclog(NCLOGDEBUG,"prefetch: %s",var->ncfullname); } } } @@ -428,7 +428,7 @@ markprefetch(NCDAPCOMMON* nccomm) { extern char* ocfqn(OCddsnode); char *tmp = ocfqn(var->ocnode); - nclog(NCLOGDBG,"prefetchable: %s=%lu", + nclog(NCLOGDEBUG,"prefetchable: %s=%lu", tmp,(unsigned long)nelems); free(tmp); } diff --git a/libdap2/dapdebug.h b/libdap2/dapdebug.h index ac8a3ea805..830b9f05d3 100644 --- a/libdap2/dapdebug.h +++ b/libdap2/dapdebug.h @@ -55,6 +55,7 @@ extern int dappanic(const char* fmt, ...); #ifdef CATCHERROR /* Place breakpoint on dapbreakpoint to catch errors close to where they occur*/ +/* Warning: do not evaluate more than once */ #define THROW(e) dapthrow(e,__LINE__,__FILE__) #define THROWCHK(e) (void)dapthrow(e,__LINE__,__FILE__) diff --git a/libdap2/ncd2dispatch.c b/libdap2/ncd2dispatch.c index 16e384ca73..e8d433fd77 100644 --- a/libdap2/ncd2dispatch.c +++ b/libdap2/ncd2dispatch.c @@ -419,8 +419,7 @@ fprintf(stderr,"ce=%s\n",dumpconstraint(dapcomm->oc.dapconstraint)); /* Turn on logging; only do this after oc_open*/ if((value = dapparamvalue(dapcomm,"log")) != NULL) { - ncsetlogging(1); - nclogopen(NULL); + ncsetloglevel(NCLOGNOTE); } /* fetch and build the unconstrained DDS for use as diff --git a/libdap4/d4debug.h b/libdap4/d4debug.h index 5ff9db4f0c..adf655a2c2 100644 --- a/libdap4/d4debug.h +++ b/libdap4/d4debug.h @@ -42,6 +42,7 @@ extern int d4panic(const char* fmt, ...); #ifdef D4CATCH /* Place breakpoint on dapbreakpoint to catch errors close to where they occur*/ +/* WARNING: do not evaluate e more than once */ #define THROW(e) d4throw(e) #define THROWCHK(e) (void)d4throw(e) extern int d4breakpoint(int err); diff --git a/libdap4/d4file.c b/libdap4/d4file.c index e8982db1d0..aeccc7423f 100644 --- a/libdap4/d4file.c +++ b/libdap4/d4file.c @@ -117,7 +117,7 @@ NCD4_open(const char * path, int mode, /* Turn on logging; only do this after oc_open*/ if((value = ncurifragmentlookup(d4info->uri,"log")) != NULL) { ncloginit(); - ncsetlogging(1); + ncsetloglevel(NCLOGNOTE); } /* Check env values */ diff --git a/libdap4/d4read.c b/libdap4/d4read.c index cc5fcb277a..3a0e95a1d1 100644 --- a/libdap4/d4read.c +++ b/libdap4/d4read.c @@ -180,7 +180,7 @@ readpacket(NCD4INFO* state, NCURI* url, NCbytes* packet, NCD4mode dxx, NCD4forma fetchurl = ncuribuild(url,NULL,suffix,flags); MEMCHECK(fetchurl); if(FLAGSET(state->controls.flags,NCF_SHOWFETCH)) { - nclog(NCLOGDBG,"fetch url=%s",fetchurl); + nclog(NCLOGDEBUG,"fetch url=%s",fetchurl); #ifdef HAVE_GETTIMEOFDAY gettimeofday(&time0,NULL); #endif @@ -194,7 +194,7 @@ readpacket(NCD4INFO* state, NCURI* url, NCbytes* packet, NCD4mode dxx, NCD4forma gettimeofday(&time1,NULL); secs = deltatime(time0,time1); #endif - nclog(NCLOGDBG,"fetch complete: %0.3f",secs); + nclog(NCLOGDEBUG,"fetch complete: %0.3f",secs); } } #ifdef D4DEBUG @@ -271,7 +271,7 @@ readfile(NCD4INFO* state, const NCURI* uri, NCD4mode dxx, NCD4format fxx, NCbyte gettimeofday(&time0,NULL); #endif surl = ncuribuild((NCURI*)uri,NULL,NULL,NCURIALL); - nclog(NCLOGDBG,"fetch uri=%s file=%s",surl,filename); + nclog(NCLOGDEBUG,"fetch uri=%s file=%s",surl,filename); } switch (dxx) { case NCD4_DMR: @@ -293,7 +293,7 @@ readfile(NCD4INFO* state, const NCURI* uri, NCD4mode dxx, NCD4format fxx, NCbyte gettimeofday(&time1,NULL); secs = deltatime(time0,time1); #endif - nclog(NCLOGDBG,"%s fetch complete: %0.3f",suffix,secs); + nclog(NCLOGDEBUG,"%s fetch complete: %0.3f",suffix,secs); } return THROW(stat); } @@ -326,7 +326,7 @@ readfileDAPDMR(NCD4INFO* state, const NCURI* uri, NCbytes* packet) gettimeofday(&time0,NULL); #endif surl = ncuribuild((NCURI*)uri,NULL,".dap",NCURIALL); - nclog(NCLOGDBG,"fetch uri=%s file=%s",surl,filename); + nclog(NCLOGDEBUG,"fetch uri=%s file=%s",surl,filename); } stat = NC_readfile(filename,packet); @@ -336,7 +336,7 @@ readfileDAPDMR(NCD4INFO* state, const NCURI* uri, NCbytes* packet) gettimeofday(&time1,NULL); secs = deltatime(time0,time1); #endif - nclog(NCLOGDBG,"fetch complete: %0.3f",secs); + nclog(NCLOGDEBUG,"fetch complete: %0.3f",secs); } if(stat != NC_NOERR) goto done; diff --git a/libdispatch/awsincludes.h b/libdispatch/awsincludes.h index b175724054..461cd9a9f7 100644 --- a/libdispatch/awsincludes.h +++ b/libdispatch/awsincludes.h @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/libdispatch/dfilter.c b/libdispatch/dfilter.c index b0d0a1582a..d3b590dcda 100644 --- a/libdispatch/dfilter.c +++ b/libdispatch/dfilter.c @@ -101,6 +101,7 @@ nc_inq_var_filter_info(int ncid, int varid, unsigned int id, size_t* nparamsp, u if((stat = ncp->dispatch->inq_var_filter_info(ncid,varid,id,nparamsp,params))) goto done; done: + if(stat == NC_ENOFILTER) nclog(NCLOGWARN,"Undefined filter: %u",(unsigned)id); return stat; } @@ -131,6 +132,7 @@ nc_def_var_filter(int ncid, int varid, unsigned int id, size_t nparams, const un if((stat = NC_check_id(ncid,&ncp))) return stat; if((stat = ncp->dispatch->def_var_filter(ncid,varid,id,nparams,params))) goto done; done: + if(stat == NC_ENOFILTER) nclog(NCLOGWARN,"Undefined filter: %u",(unsigned)id); return stat; } diff --git a/libdispatch/dhttp.c b/libdispatch/dhttp.c index aa4d03031e..6f12e561fa 100644 --- a/libdispatch/dhttp.c +++ b/libdispatch/dhttp.c @@ -121,7 +121,7 @@ nc_http_open_verbose(const char* path, int verbose, NC_HTTP_STATE** statep) case HTTPS3: { if((state->s3.info = (NCS3INFO*)calloc(1,sizeof(NCS3INFO)))==NULL) {stat = NCTHROW(NC_ENOMEM); goto done;} - if((stat = NC_s3urlprocess(state->url,state->s3.info))) goto done; + if((stat = NC_s3urlprocess(state->url,state->s3.info,NULL))) goto done; if((state->s3.s3client = NC_s3sdkcreateclient(state->s3.info))==NULL) {stat = NCTHROW(NC_EURL); goto done;} } break; diff --git a/libdispatch/drc.c b/libdispatch/drc.c index 1b70b148e6..b896d90bfb 100644 --- a/libdispatch/drc.c +++ b/libdispatch/drc.c @@ -225,7 +225,7 @@ NC_rcload(void) globalstate = NC_getglobalstate(); if(globalstate->rcinfo->ignore) { - nclog(NCLOGDBG,".rc file loading suppressed"); + nclog(NCLOGNOTE,".rc file loading suppressed"); goto done; } if(globalstate->rcinfo->loaded) goto done; @@ -653,7 +653,7 @@ rcsearch(const char* prefix, const char* rcname, char** pathp) /* see if file is readable */ f = NCfopen(path,"r"); if(f != NULL) - nclog(NCLOGDBG, "Found rc file=%s",path); + nclog(NCLOGNOTE, "Found rc file=%s",path); done: if(f == NULL || ret != NC_NOERR) { nullfree(path); diff --git a/libdispatch/ds3util.c b/libdispatch/ds3util.c index a018fc2972..df001f8023 100644 --- a/libdispatch/ds3util.c +++ b/libdispatch/ds3util.c @@ -203,7 +203,7 @@ endswith(const char* s, const char* suffix) /* S3 utilities */ EXTERNL int -NC_s3urlprocess(NCURI* url, NCS3INFO* s3) +NC_s3urlprocess(NCURI* url, NCS3INFO* s3, NCURI** newurlp) { int stat = NC_NOERR; NCURI* url2 = NULL; @@ -228,6 +228,7 @@ NC_s3urlprocess(NCURI* url, NCS3INFO* s3) nullfree(seg); } if((stat = NC_join(pathsegments,&s3->rootkey))) goto done; + if(newurlp) {*newurlp = url2; url2 = NULL;} done: ncurifree(url2); diff --git a/libdispatch/ncexhash.c b/libdispatch/ncexhash.c index d1677805e7..ea3957a054 100644 --- a/libdispatch/ncexhash.c +++ b/libdispatch/ncexhash.c @@ -20,6 +20,7 @@ See LICENSE.txt for license information. #define INLINED #ifdef CATCH +/* Warning: do not evaluate x more than once */ #define THROW(x) throw(x) static void breakpoint(void) {} static int ignore[] = {NC_ENOTFOUND, 0}; diff --git a/libdispatch/ncjson.c b/libdispatch/ncjson.c index 7730f42f72..8c193eb4af 100644 --- a/libdispatch/ncjson.c +++ b/libdispatch/ncjson.c @@ -26,9 +26,12 @@ and do the command: #include "ncjson.h" #undef NCJDEBUG +#define NCJTRACE + #ifdef NCJDEBUG +/* Warning: do not evaluate err more than once */ +#define NCJTHROW(err) ncjbreakpoint(err) static int ncjbreakpoint(int err) {return err;} -#define NCJTHROW(err) ((err)==NCJ_ERR?ncjbreakpoint(err):(err)) #else #define NCJTHROW(err) (err) #endif @@ -63,6 +66,8 @@ typedef struct NCJparser { long long num; int tf; int status; /* NCJ_ERR|NCJ_OK */ + unsigned flags; +# define NCJ_TRACE 1 } NCJparser; typedef struct NCJbuf { @@ -86,7 +91,7 @@ typedef struct NCJbuf { #define nulldup(x) ((x)?strdup(x):(x)) #endif -#ifdef NCJDEBUG +#if defined NCJDEBUG || defined NCJTRACE static char* tokenname(int token); #endif @@ -148,6 +153,7 @@ NCJparsen(size_t len, const char* text, unsigned flags, NCjson** jsonp) parser = calloc(1,sizeof(NCJparser)); if(parser == NULL) {stat = NCJTHROW(NCJ_ERR); goto done;} + parser->flags = flags; parser->text = (char*)malloc(len+1+1); if(parser->text == NULL) {stat = NCJTHROW(NCJ_ERR); goto done;} @@ -429,6 +435,16 @@ fprintf(stderr,"%s(%d): |%s|\n",tokenname(token),token,parser->yytext); done: if(parser->status == NCJ_ERR) token = NCJ_UNDEF; +#ifdef NCJTRACE + if(parser->flags & NCJ_TRACE) { + const char* txt = NULL; + switch(token) { + case NCJ_STRING: case NCJ_INT: case NCJ_DOUBLE: case NCJ_BOOLEAN: txt = parser->yytext; break; + default: break; + } + fprintf(stderr,">>>> token=%s:'%s'\n",tokenname(token),(txt?txt:"")); + } +#endif return token; } @@ -661,7 +677,7 @@ unescape1(int c) return c; } -#ifdef NCJDEBUG +#if defined NCJDEBUG || defined NCJTRACE static char* tokenname(int token) { diff --git a/libdispatch/nclog.c b/libdispatch/nclog.c index 288d93e308..26472fdfe0 100644 --- a/libdispatch/nclog.c +++ b/libdispatch/nclog.c @@ -37,7 +37,7 @@ static int nclogginginitialized = 0; static struct NCLOGGLOBAL { - int nclogging; + int loglevel; int tracelevel; FILE* nclogstream; int depth; @@ -48,11 +48,11 @@ static struct NCLOGGLOBAL { } frames[NC_MAX_FRAMES]; } nclog_global = {0,-1,NULL}; -static const char* nctagset[] = {"Note","Warning","Error","Debug"}; -static const int nctagsize = sizeof(nctagset)/sizeof(char*); +static const char* nctagset[] = {"OFF","ERR","WARN","NOTE","DEBUG",NULL}; /* Forward */ static const char* nctagname(int tag); +static int nctagforname(const char* tag); /*!\defgroup NClog NClog Management @{*/ @@ -68,19 +68,17 @@ ncloginit(void) return; nclogginginitialized = 1; memset(&nclog_global,0,sizeof(nclog_global)); + ncsetloglevel(NCLOGOFF); nclog_global.tracelevel = -1; - ncsetlogging(0); nclog_global.nclogstream = stderr; /* Use environment variables to preset nclogging state*/ - /* I hope this is portable*/ envv = getenv(NCENVLOGGING); if(envv != NULL) { - ncsetlogging(1); + int level = nctagforname(envv); + if(level < 0) ncsetloglevel(level); } envv = getenv(NCENVTRACING); - if(envv != NULL) { - nctracelevel(atoi(envv)); - } + if(envv != NULL) nctracelevel(atoi(envv)); } /*! @@ -92,12 +90,13 @@ Enable/Disable logging. */ int -ncsetlogging(int tf) +ncsetloglevel(int level) { int was; if(!nclogginginitialized) ncloginit(); - was = nclog_global.nclogging; - nclog_global.nclogging = tf; + was = nclog_global.loglevel; + if(level >= 0 && level <= NCLOGDEBUG) + nclog_global.loglevel = level; if(nclog_global.nclogstream == NULL) nclogopen(NULL); return was; } @@ -131,23 +130,21 @@ nclog(int tag, const char* fmt, ...) } } -int -ncvlog(int tag, const char* fmt, va_list ap) +void +ncvlog(int level, const char* fmt, va_list ap) { const char* prefix; - int was = -1; if(!nclogginginitialized) ncloginit(); - if(tag == NCLOGERR) was = ncsetlogging(1); - if(!nclog_global.nclogging || nclog_global.nclogstream == NULL) return was; - prefix = nctagname(tag); + if(nclog_global.loglevel > level || nclog_global.nclogstream == NULL) + return; + prefix = nctagname(level); fprintf(nclog_global.nclogstream,"%s: ",prefix); if(fmt != NULL) { vfprintf(nclog_global.nclogstream, fmt, ap); } fprintf(nclog_global.nclogstream, "\n" ); fflush(nclog_global.nclogstream); - return was; } void @@ -164,10 +161,10 @@ Each line will be sent using nclog with the specified tag. */ void -nclogtextn(int tag, const char* text, size_t count) +nclogtextn(int level, const char* text, size_t count) { - NC_UNUSED(tag); - if(!nclog_global.nclogging || nclog_global.nclogstream == NULL) return; + if(nclog_global.loglevel > level || nclog_global.nclogstream == NULL) + return; fwrite(text,1,count,nclog_global.nclogstream); fflush(nclog_global.nclogstream); } @@ -175,11 +172,22 @@ nclogtextn(int tag, const char* text, size_t count) static const char* nctagname(int tag) { - if(tag < 0 || tag >= nctagsize) + if(tag < NCLOGOFF || tag >= NCLOGDEBUG) return "unknown"; return nctagset[tag]; } +static int +nctagforname(const char* tag) +{ + int level; + const char** p = NULL; + for(level=0,p=nctagset;*p;p++,level++) { + if(strcasecmp(*p,tag)==0) return level; + } + return -1; +} + /*! Send trace messages. \param[in] level Indicate the level of trace @@ -194,10 +202,8 @@ nctracelevel(int level) oldlevel = nclog_global.tracelevel; if(level < 0) { nclog_global.tracelevel = level; - ncsetlogging(0); } else { /*(level >= 0)*/ nclog_global.tracelevel = level; - ncsetlogging(1); nclogopen(NULL); /* use stderr */ } return oldlevel; @@ -226,7 +232,6 @@ ncvtrace(int level, const char* fcn, const char* fmt, va_list ap) { struct Frame* frame; if(!nclogginginitialized) ncloginit(); - if(nclog_global.tracelevel < 0) ncsetlogging(0); if(fcn != NULL) { frame = &nclog_global.frames[nclog_global.depth]; frame->fcn = fcn; diff --git a/libdispatch/ncs3sdk_aws.cpp b/libdispatch/ncs3sdk_aws.cpp index 5e3c3a3e9a..806edae8a8 100644 --- a/libdispatch/ncs3sdk_aws.cpp +++ b/libdispatch/ncs3sdk_aws.cpp @@ -5,8 +5,13 @@ /* WARNING: changes to this file may need to be propagated to libsrc/s3sdk.cpp */ -#define NOOP -#define DEBUG +#undef DEBUG + +/* Use Aws::Transfer instead of direct REST API */ +#undef TRANSFER + +/* Disable bucket creation/deletion */ +#define NOOPBUCKET #include "awsincludes.h" #include @@ -16,6 +21,14 @@ #include "netcdf.h" #include "ncrc.h" +#ifdef TRANSFER +#include +#include +#include +#include +#include +#endif + #include "ncs3sdk.h" #undef NCTRACING @@ -39,6 +52,14 @@ extern char* strdup(const char*); #define size64_t unsigned long long +#ifdef TRANSFER +#define AWSS3CLIENT std::shared_ptr* +#define AWSS3GET(x) (((AWSS3CLIENT)(x))->get()) +#else +#define AWSS3CLIENT Aws::S3::S3Client* +#define AWSS3GET(x) ((AWSS3CLIENT)(x)) +#endif + struct KeySet { size_t nkeys; size_t alloc; @@ -110,13 +131,19 @@ EXTERNL int NC_s3sdkinitialize(void) { if(!ncs3_initialized) { - ncs3_initialized = 1; - ncs3_finalized = 0; - NCTRACE(11,NULL); - Aws::InitAPI(ncs3options); + ncs3_initialized = 1; + ncs3_finalized = 0; + + #ifdef DEBUG - ncs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Debug; + //ncs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Debug; + ncs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Trace; + ncs3options.httpOptions.installSigPipeHandler = true; + ncs3options.loggingOptions.logger_create_fn = [] { return std::make_shared(Aws::Utils::Logging::LogLevel::Trace); }; + #endif + Aws::InitAPI(ncs3options); + } return NCUNTRACE(NC_NOERR); } @@ -159,6 +186,8 @@ s3sdkcreateconfig(NCS3INFO* info) if(info->profile) config.profileName = info->profile; config.scheme = Aws::Http::Scheme::HTTPS; + //config.connectTimeoutMs = 1000; + //config.requestTimeoutMs = 0; config.connectTimeoutMs = 300000; config.requestTimeoutMs = 600000; if(info->region) config.region = info->region; @@ -169,26 +198,54 @@ s3sdkcreateconfig(NCS3INFO* info) return config; } +static AWSS3CLIENT +buildclient(Aws::Client::ClientConfiguration* config, Aws::Auth::AWSCredentials* creds) +{ + AWSS3CLIENT s3client = NULL; +#ifdef TRANSFER + std::shared_ptr client; + if(creds != NULL) { + client = Aws::MakeShared("S3Client",*creds,*config, + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::RequestDependent, + false); + } else { + client = Aws::MakeShared("S3Client",*config, + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::RequestDependent, + false); + } + // Create allocated client space + s3client = new std::shared_ptr; + *s3client = client; +#else + if(creds != NULL) { + s3client = new Aws::S3::S3Client(*creds,*config, + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::RequestDependent, + false); + } else { + s3client = new Aws::S3::S3Client(*config, + Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::RequestDependent, + false); + } +#endif + return s3client; +} + EXTERNL void* NC_s3sdkcreateclient(NCS3INFO* info) { - int stat = NC_NOERR; NCTRACE(11,NULL); Aws::Client::ClientConfiguration config = s3sdkcreateconfig(info); - Aws::S3::S3Client *s3client; + AWSS3CLIENT s3client; if(info->profile == NULL || strcmp(info->profile,"none")==0) { Aws::Auth::AWSCredentials creds; creds.SetAWSAccessKeyId(Aws::String("")); creds.SetAWSSecretKey(Aws::String("")); - s3client = new Aws::S3::S3Client(creds,config, - Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::RequestDependent, - false); + + s3client = buildclient(&config,&creds); } else { - s3client = new Aws::S3::S3Client(config, - Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::RequestDependent, - false); + s3client = buildclient(&config,NULL); } // delete config; NCUNTRACENOOP(NC_NOERR); @@ -200,7 +257,7 @@ NC_s3sdkbucketexists(void* s3client0, const char* bucket, int* existsp, char** e { int stat = NC_NOERR; int exists = 0; - Aws::S3::S3Client* s3client = (Aws::S3::S3Client*)s3client0; + AWSS3CLIENT s3client = (AWSS3CLIENT)s3client0; NCTRACE(11,"bucket=%s",bucket); if(errmsgp) *errmsgp = NULL; @@ -223,7 +280,7 @@ NC_s3sdkbucketexists(void* s3client0, const char* bucket, int* existsp, char** e #else Aws::S3::Model::HeadBucketRequest request; request.SetBucket(bucket); - auto result = s3client->HeadBucket(request); + auto result = AWSS3GET(s3client)->HeadBucket(request); exists = result.IsSuccess() ? 1 : 0; #endif if(existsp) *existsp = exists; @@ -236,7 +293,7 @@ NC_s3sdkbucketcreate(void* s3client0, const char* region, const char* bucket, ch int stat = NC_NOERR; NCTRACE(11,"region=%s bucket=%s",region,bucket); - Aws::S3::S3Client* s3client = (Aws::S3::S3Client*)s3client0; + AWSS3CLIENT s3client = (AWSS3CLIENT)s3client0; if(errmsgp) *errmsgp = NULL; const Aws::S3::Model::BucketLocationConstraint &awsregion = s3findregion(region); if(awsregion == Aws::S3::Model::BucketLocationConstraint::NOT_SET) @@ -250,9 +307,9 @@ NC_s3sdkbucketcreate(void* s3client0, const char* region, const char* bucket, ch bucket_config.SetLocationConstraint(awsregion); create_request.SetCreateBucketConfiguration(bucket_config); } -#ifdef NOOP +#ifdef NOOPBUCKET /* Create the bucket */ - auto create_result = s3client->CreateBucket(create_request); + auto create_result = AWSS3GET(s3client)->CreateBucket(create_request); if(!create_result.IsSuccess()) { if(errmsgp) *errmsgp = makeerrmsg(create_result.GetError()); stat = NC_ES3;; @@ -271,7 +328,7 @@ NC_s3sdkbucketdelete(void* s3client0, NCS3INFO* info, char** errmsgp) NCTRACE(11,"info=%s%s",dumps3info(info)); - Aws::S3::S3Client* s3client = (Aws::S3::S3Client*)s3client0; + AWSS3CLIENT s3client = (AWSS3CLIENT)s3client0; if(errmsgp) *errmsgp = NULL; const Aws::S3::Model::BucketLocationConstraint &awsregion = s3findregion(info->region); @@ -281,9 +338,9 @@ NC_s3sdkbucketdelete(void* s3client0, NCS3INFO* info, char** errmsgp) Aws::S3::Model::DeleteBucketRequest request; request.SetBucket(info->bucket); -#ifdef NOOP +#ifdef NOOPBUCKET /* Delete the bucket */ - auto result = s3client->DeleteBucket(request); + auto result = AWSS3GET(s3client)->DeleteBucket(request); if(!result.IsSuccess()) { if(errmsgp) *errmsgp = makeerrmsg(result.GetError()); stat = NC_ES3;; @@ -311,7 +368,7 @@ NC_s3sdkinfo(void* s3client0, const char* bucket, const char* pathkey, size64_t* NCTRACE(11,"bucket=%s pathkey=%s",bucket,pathkey); - Aws::S3::S3Client* s3client = (Aws::S3::S3Client*)s3client0; + AWSS3CLIENT s3client = (AWSS3CLIENT)s3client0; Aws::S3::Model::HeadObjectRequest head_request; if(*pathkey != '/') return NC_EINTERNAL; /* extract the true s3 key*/ @@ -320,7 +377,7 @@ NC_s3sdkinfo(void* s3client0, const char* bucket, const char* pathkey, size64_t* if(errmsgp) *errmsgp = NULL; head_request.SetBucket(bucket); head_request.SetKey(key); - auto head_outcome = s3client->HeadObject(head_request); + auto head_outcome = AWSS3GET(s3client)->HeadObject(head_request); if(head_outcome.IsSuccess()) { long long l = head_outcome.GetResult().GetContentLength(); if(lenp) *lenp = (size64_t)l; @@ -343,6 +400,27 @@ NC_s3sdkinfo(void* s3client0, const char* bucket, const char* pathkey, size64_t* return NCUNTRACEX(stat,"len=%d",(int)(lenp?*lenp:-1)); } +/** + * In-memory stream implementations + */ +class DownLoadStream : public Aws::IOStream +{ + public: + using Base = Aws::IOStream; + // Provide a customer-controlled streambuf to hold data from the bucket. + explicit DownLoadStream(std::streambuf* buf) : Base(buf) {} + ~DownLoadStream() override = default; +}; + +class UpLoadStream : public Aws::IOStream +{ + public: + using Base = Aws::IOStream; + // Provide a customer-controlled streambuf to hold data from the bucket. + explicit UpLoadStream(std::streambuf* buf) : Base(buf) {} + ~UpLoadStream() override = default; +}; + /* @return NC_NOERR if success @return NC_EXXX if fail @@ -351,26 +429,49 @@ EXTERNL int NC_s3sdkread(void* s3client0, const char* bucket, const char* pathkey, size64_t start, size64_t count, void* content, char** errmsgp) { int stat = NC_NOERR; - char range[1024]; const char* key = NULL; - size64_t rangeend; NCTRACE(11,"bucket=%s pathkey=%s start=%llu count=%llu content=%p",bucket,pathkey,start,count,content); - Aws::S3::S3Client* s3client = (Aws::S3::S3Client*)s3client0; - Aws::S3::Model::GetObjectRequest object_request; + AWSS3CLIENT s3client = (AWSS3CLIENT)s3client0; if(count == 0) return NCUNTRACE(stat); - + if(errmsgp) *errmsgp = NULL; if(*pathkey != '/') return NC_EINTERNAL; if((stat = makes3key(pathkey,&key))) return NCUNTRACE(stat); +#ifdef TRANSFER + auto executor = Aws::MakeShared("executor", 25); + Aws::Transfer::TransferManagerConfiguration transfer_config(executor.get()); + transfer_config.s3Client = *s3client; + // The local variable downloadBuffer' is captured by reference in a lambda. + // It must persist until all downloading by the 'transfer_manager' is complete. + Aws::Utils::Stream::PreallocatedStreamBuf downloadBuffer((unsigned char*)content, count - start); + auto transfer_manager = Aws::Transfer::TransferManager::Create(transfer_config); + auto creationFunction = [&downloadBuffer]() { //Define a lambda expression for the callback method parameter to stream back the data. + return Aws::New("downloadHandle", &downloadBuffer);}; + auto downloadHandle = transfer_manager->DownloadFile(bucket, key, start, count, creationFunction); + downloadHandle->WaitUntilFinished(); + bool success = downloadHandle->GetStatus() == Aws::Transfer::TransferStatus::COMPLETED; + if (!success) { + auto err = downloadHandle->GetLastError(); + if(errmsgp) *errmsgp = makeerrmsg(err,key); + stat = NC_ES3; + } else { + size64_t transferred = downloadHandle->GetBytesTransferred(); + size64_t totalsize = downloadHandle->GetBytesTotalSize(); + assert(count == totalsize && transferred == totalsize); + } +#else + char range[1024]; + size64_t rangeend; + Aws::S3::Model::GetObjectRequest object_request; object_request.SetBucket(bucket); object_request.SetKey(key); rangeend = (start+count)-1; snprintf(range,sizeof(range),"bytes=%llu-%llu",start,rangeend); object_request.SetRange(range); - auto get_object_result = s3client->GetObject(object_request); + auto get_object_result = AWSS3GET(s3client)->GetObject(object_request); if(!get_object_result.IsSuccess()) { if(errmsgp) *errmsgp = makeerrmsg(get_object_result.GetError(),key); stat = NC_ES3; @@ -385,6 +486,7 @@ NC_s3sdkread(void* s3client0, const char* bucket, const char* pathkey, size64_t if(content) memcpy(content,s,slen); } +#endif return NCUNTRACE(stat); } @@ -397,28 +499,50 @@ NC_s3sdkwriteobject(void* s3client0, const char* bucket, const char* pathkey, s { int stat = NC_NOERR; const char* key = NULL; - + + const char* mcontent = (char*)content; NCTRACE(11,"bucket=%s pathkey=%s count=%lld content=%p",bucket,pathkey,count,content); - Aws::S3::S3Client* s3client = (Aws::S3::S3Client*)s3client0; - Aws::S3::Model::PutObjectRequest put_request; + AWSS3CLIENT s3client = (AWSS3CLIENT)s3client0; + if(errmsgp) *errmsgp = NULL; if(*pathkey != '/') return NCUNTRACE(NC_EINTERNAL); if((stat = makes3key(pathkey,&key))) return NCUNTRACE(stat); - - if(errmsgp) *errmsgp = NULL; + +#ifdef TRANSFER + auto executor = Aws::MakeShared("executor", 25); + Aws::Transfer::TransferManagerConfiguration transfer_config(executor.get()); + transfer_config.s3Client = *s3client; + auto transfer_manager = Aws::Transfer::TransferManager::Create(transfer_config); + std::shared_ptr uploadStream = std::shared_ptr(new Aws::StringStream()); + uploadStream->rdbuf()->pubsetbuf((char*)content,count); + auto uploadHandle = transfer_manager->UploadFile(uploadStream, bucket, key, "application/octet-stream", Aws::Map()); + uploadHandle->WaitUntilFinished(); + bool success = uploadHandle->GetStatus() == Aws::Transfer::TransferStatus::COMPLETED; + if (!success) { + auto err = uploadHandle->GetLastError(); + if(errmsgp) *errmsgp = makeerrmsg(err,key); + stat = NC_ES3; + } else { + size64_t transferred = uploadHandle->GetBytesTransferred(); + size64_t totalsize = uploadHandle->GetBytesTotalSize(); + assert(count == totalsize && transferred == totalsize); + } +#else + Aws::S3::Model::PutObjectRequest put_request; put_request.SetBucket(bucket); put_request.SetKey(key); put_request.SetContentLength((long long)count); - + std::shared_ptr data = std::shared_ptr(new Aws::StringStream()); data->rdbuf()->pubsetbuf((char*)content,count); put_request.SetBody(data); - auto put_result = s3client->PutObject(put_request); + auto put_result = AWSS3GET(s3client)->PutObject(put_request); if(!put_result.IsSuccess()) { if(errmsgp) *errmsgp = makeerrmsg(put_result.GetError(),key); stat = NC_ES3; } +#endif return NCUNTRACE(stat); } @@ -429,7 +553,7 @@ NC_s3sdkclose(void* s3client0, NCS3INFO* info, int deleteit, char** errmsgp) NCTRACE(11,"info=%s rootkey=%s deleteit=%d",dumps3info(info),deleteit); - Aws::S3::S3Client* s3client = (Aws::S3::S3Client*)s3client0; + AWSS3CLIENT s3client = (AWSS3CLIENT)s3client0; if(deleteit) { /* Delete the root key; ok it if does not exist */ switch (stat = NC_s3sdkdeletekey(s3client0,info->bucket,info->rootkey,errmsgp)) { @@ -438,7 +562,11 @@ NC_s3sdkclose(void* s3client0, NCS3INFO* info, int deleteit, char** errmsgp) default: break; } } +#ifdef TRANSFER + delete s3client; +#else delete s3client; +#endif return NCUNTRACE(stat); } @@ -455,7 +583,7 @@ getkeys(void* s3client0, const char* bucket, const char* prefixkey0, const char* char* prefixdir = NULL; bool istruncated = false; char* continuetoken = NULL; - Aws::S3::S3Client* s3client = NULL; + AWSS3CLIENT s3client = NULL; KeySet commonkeys; KeySet realkeys; KeySet allkeys; @@ -465,7 +593,7 @@ getkeys(void* s3client0, const char* bucket, const char* prefixkey0, const char* if(*prefixkey0 != '/') {stat = NC_EINTERNAL; goto done;} if(errmsgp) *errmsgp = NULL; - s3client = (Aws::S3::S3Client*)s3client0; + s3client = (AWSS3CLIENT)s3client0; do { Aws::S3::Model::ListObjectsV2Request objects_request; @@ -485,7 +613,7 @@ getkeys(void* s3client0, const char* bucket, const char* prefixkey0, const char* objects_request.SetContinuationToken(continuetoken); free(continuetoken); continuetoken = NULL; } - auto objects_outcome = s3client->ListObjectsV2(objects_request); + auto objects_outcome = AWSS3GET(s3client)->ListObjectsV2(objects_request); if(objects_outcome.IsSuccess()) { const Aws::S3::Model::ListObjectsV2Result& result = objects_outcome.GetResult(); istruncated = result.GetIsTruncated(); @@ -548,7 +676,7 @@ NC_s3sdkdeletekey(void* s3client0, const char* bucket, const char* pathkey, char NCTRACE(11,"bucket=%s pathkey=%s",bucket,pathkey); - Aws::S3::S3Client* s3client = (Aws::S3::S3Client*)s3client0; + AWSS3CLIENT s3client = (AWSS3CLIENT)s3client0; Aws::S3::Model::DeleteObjectRequest delete_request; assert(pathkey != NULL && *pathkey == '/'); @@ -556,7 +684,7 @@ NC_s3sdkdeletekey(void* s3client0, const char* bucket, const char* pathkey, char /* Delete this key object */ delete_request.SetBucket(bucket); delete_request.SetKey(key); - auto delete_result = s3client->DeleteObject(delete_request); + auto delete_result = AWSS3GET(s3client)->DeleteObject(delete_request); if(!delete_result.IsSuccess()) { if(errmsgp) *errmsgp = makeerrmsg(delete_result.GetError(),key); stat = NC_ES3; diff --git a/libdispatch/ncs3sdk_h5.c b/libdispatch/ncs3sdk_h5.c index 15e96cefdc..e9da587f11 100644 --- a/libdispatch/ncs3sdk_h5.c +++ b/libdispatch/ncs3sdk_h5.c @@ -427,7 +427,7 @@ EXTERNL int NC_s3sdkgetkeys(void* s3client0, const char* bucket, const char* prefixkey0, size_t* nkeysp, char*** keysp, char** errmsgp) { NCTRACE(11,"bucket=%s prefixkey0=%s",bucket,prefixkey0); - return getkeys(s3client0, bucket, prefixkey0, "/", nkeysp, keysp, errmsgp); + return NCUNTRACE(getkeys(s3client0, bucket, prefixkey0, "/", nkeysp, keysp, errmsgp)); } /* @@ -439,7 +439,7 @@ EXTERNL int NC_s3sdksearch(void* s3client0, const char* bucket, const char* prefixkey0, size_t* nkeysp, char*** keysp, char** errmsgp) { NCTRACE(11,"bucket=%s prefixkey0=%s",bucket,prefixkey0); - return getkeys(s3client0, bucket, prefixkey0, NULL, nkeysp, keysp, errmsgp); + return NCUNTRACE(getkeys(s3client0, bucket, prefixkey0, NULL, nkeysp, keysp, errmsgp)); } EXTERNL int @@ -561,7 +561,8 @@ makes3prefix(const char* prefix, char** prefixdirp) return NC_NOERR; } -/* Copy keys1 concat keys2 into merge; note that merge list may not be empty. */ +/* Move keys1 concat keys2 into merge; note that merge list may not be empty. */ +/* Will leave keys1 and keys2 empty */ static int mergekeysets(NClist* keys1, NClist* keys2, NClist* merge) { diff --git a/libdispatch/ncxcache.c b/libdispatch/ncxcache.c index ee1602d6f6..40b82f81c4 100644 --- a/libdispatch/ncxcache.c +++ b/libdispatch/ncxcache.c @@ -21,6 +21,7 @@ #define SMALLTABLE #ifdef CATCH +/* Warning: do not evalue x more than once */ #define THROW(x) throw(x) static void breakpoint(void) {} static int ignore[] = {0}; diff --git a/libhdf5/hdf5debug.h b/libhdf5/hdf5debug.h index 996e787107..34f6c218cd 100644 --- a/libhdf5/hdf5debug.h +++ b/libhdf5/hdf5debug.h @@ -12,6 +12,7 @@ #ifdef H5CATCH /* Place breakpoint to catch errors close to where they occur*/ +/* Warning: do not evaluate e more than once */ #define THROW(e) nch5throw(e,__LINE__) #define THROWCHK(e) (void)nch5throw(e) extern int nch5breakpoint(int err); diff --git a/liblib/CMakeLists.txt b/liblib/CMakeLists.txt index ce2bd85af4..03547f0d5d 100644 --- a/liblib/CMakeLists.txt +++ b/liblib/CMakeLists.txt @@ -138,10 +138,17 @@ IF(ENABLE_PNETCDF AND PNETCDF) SET(TLL_LIBS ${TLL_LIBS} ${PNETCDF}) ENDIF() -IF(ENABLE_S3_AWS) - TARGET_LINK_DIRECTORIES(netcdf PUBLIC ${AWSSDK_LIB_DIR}) - TARGET_LINK_LIBRARIES(netcdf ${AWS_LINK_LIBRARIES}) -ENDIF() + +IF(ENABLE_S3) + IF(ENABLE_S3_AWS) + TARGET_LINK_DIRECTORIES(netcdf PUBLIC ${AWSSDK_LIB_DIR}) + SET(TLL_LIBS ${AWSSDK_LINK_LIBRARIES} ${TLL_LIBS}) + ELSEIF(ENABLE_S3_INTERNAL) + IF(OPENSSL_FOUND) + SET(TLL_LIBS ${OPENSSL_SSL_LIBRARIES} ${OPENSSL_CRYPTO_LIBRARIES} ${TLL_LIBS}) + ENDIF(OPENSSL_FOUND) + ENDIF(ENABLE_S3_AWS) +ENDIF(ENABLE_S3) IF(HAVE_LIBXML2) SET(TLL_LIBS ${TLL_LIBS} ${LIBXML2_LIBRARIES}) diff --git a/libnczarr/zarr.c b/libnczarr/zarr.c index 67232e74b8..966adfbfbe 100644 --- a/libnczarr/zarr.c +++ b/libnczarr/zarr.c @@ -49,7 +49,7 @@ ncz_create_dataset(NC_FILE_INFO_T* file, NC_GRP_INFO_T* root, const char** contr zgrp->common.file = file; /* Fill in NCZ_FILE_INFO_T */ - zinfo->created = 1; + zinfo->creating = 1; zinfo->common.file = file; zinfo->native_endianness = (NCZ_isLittleEndian() ? NC_ENDIAN_LITTLE : NC_ENDIAN_BIG); if((zinfo->envv_controls=NCZ_clonestringvec(0,controls)) == NULL) @@ -123,7 +123,7 @@ ncz_open_dataset(NC_FILE_INFO_T* file, const char** controls) zinfo = file->format_file_info; /* Fill in NCZ_FILE_INFO_T */ - zinfo->created = 0; + zinfo->creating = 0; zinfo->common.file = file; zinfo->native_endianness = (NCZ_isLittleEndian() ? NC_ENDIAN_LITTLE : NC_ENDIAN_BIG); if((zinfo->envv_controls = NCZ_clonestringvec(0,controls))==NULL) /*0=>envv style*/ @@ -339,7 +339,7 @@ applycontrols(NCZ_FILE_INFO_T* zinfo) /* Process other controls */ if((value = controllookup((const char**)zinfo->envv_controls,"log")) != NULL) { zinfo->controls.flags |= FLAG_LOGGING; - ncsetlogging(1); + ncsetloglevel(NCLOGNOTE); } if((value = controllookup((const char**)zinfo->envv_controls,"show")) != NULL) { if(strcasecmp(value,"fetch")==0) diff --git a/libnczarr/zarr.h b/libnczarr/zarr.h index 6957bdd144..8b3b37ca0e 100644 --- a/libnczarr/zarr.h +++ b/libnczarr/zarr.h @@ -81,7 +81,6 @@ EXTERNL int NCZ_comma_parse(const char* s, NClist* list); EXTERNL int NCZ_swapatomicdata(size_t datalen, void* data, int typesize); EXTERNL char** NCZ_clonestringvec(size_t len, const char** vec); EXTERNL void NCZ_freestringvec(size_t len, char** vec); -EXTERNL int NCZ_s3clear(NCS3INFO* s3map); EXTERNL int NCZ_ischunkname(const char* name,char dimsep); EXTERNL char* NCZ_chunkpath(struct ChunkKey key); EXTERNL int NCZ_reclaim_fill_value(NC_VAR_INFO_T* var); @@ -89,7 +88,7 @@ EXTERNL int NCZ_copy_fill_value(NC_VAR_INFO_T* var, void** dstp); EXTERNL int NCZ_get_maxstrlen(NC_OBJ* obj); EXTERNL int NCZ_fixed2char(const void* fixed, char** charp, size_t count, int maxstrlen); EXTERNL int NCZ_char2fixed(const char** charp, void* fixed, size_t count, int maxstrlen); -EXTERNL int NCZ_copy_data(NC_FILE_INFO_T* file, NC_TYPE_INFO_T* xtype, const void* memory, size_t count, int nofill, void* copy); +EXTERNL int NCZ_copy_data(NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, const void* memory, size_t count, int reading, void* copy); EXTERNL int NCZ_iscomplexjson(NCjson* value, nc_type typehint); /* zwalk.c */ diff --git a/libnczarr/zcache.h b/libnczarr/zcache.h index c578052f3b..f30e2bd01f 100644 --- a/libnczarr/zcache.h +++ b/libnczarr/zcache.h @@ -67,5 +67,6 @@ extern size64_t NCZ_cache_size(NCZChunkCache* cache); extern int NCZ_buildchunkpath(NCZChunkCache* cache, const size64_t* chunkindices, struct ChunkKey* key); extern int NCZ_ensure_fill_chunk(NCZChunkCache* cache); extern int NCZ_reclaim_fill_chunk(NCZChunkCache* cache); +extern int NCZ_chunk_cache_modify(NCZChunkCache* cache, const size64_t* indices); #endif /*ZCACHE_H*/ diff --git a/libnczarr/zchunking.c b/libnczarr/zchunking.c index c5fdd14152..d2defd230a 100644 --- a/libnczarr/zchunking.c +++ b/libnczarr/zchunking.c @@ -10,7 +10,7 @@ static int pcounter = 0; /* Forward */ -static int compute_intersection(const NCZSlice* slice, const size64_t chunklen, NCZChunkRange* range); +static int compute_intersection(const NCZSlice* slice, const size64_t chunklen, unsigned char isunlimited, NCZChunkRange* range); static void skipchunk(const NCZSlice* slice, NCZProjection* projection); static int verifyslice(const NCZSlice* slice); @@ -20,19 +20,25 @@ static int verifyslice(const NCZSlice* slice); absolute position) of the first chunk that intersects the slice and the index of the last chunk that intersects the slice. In practice, the count = last - first + 1 is stored instead of the last index. + Note that this n-dim array of indices may have holes in it if the slice stride + is greater than the chunk length. + @param rank variable rank + @param slices the complete set of slices |slices| == R + @param ncr (out) the vector of computed chunk ranges. + @return NC_EXXX error code */ int NCZ_compute_chunk_ranges( - int rank, /* variable rank */ + struct Common* common, const NCZSlice* slices, /* the complete set of slices |slices| == R*/ - const size64_t* chunklen, /* the chunk length corresponding to the dimensions */ NCZChunkRange* ncr) { int stat = NC_NOERR; int i; + int rank = common->rank; for(i=0;ichunklens[i],common->isunlimited[i],&ncr[i]))) goto done; } @@ -40,10 +46,18 @@ NCZ_compute_chunk_ranges( return stat; } +/** +@param Compute chunk range for a single slice. +@param chunklen size of the chunk +@param isunlimited if corresponding dim is unlimited +@param range (out) the range of chunks covered by this slice +@return NC_EXX error code +*/ static int compute_intersection( const NCZSlice* slice, - const size64_t chunklen, + size64_t chunklen, + unsigned char isunlimited, NCZChunkRange* range) { range->start = floordiv(slice->start, chunklen); @@ -53,6 +67,9 @@ compute_intersection( /** Compute the projection of a slice as applied to n'th chunk. +A projection defines the set of grid points touched within a +chunk by a slice. This set of points is the "projection" +of the slice onto the chunk. This is somewhat complex because: 1. for the first projection, the start is the slice start, but after that, we have to take into account that for @@ -295,4 +312,3 @@ clearallprojections(NCZAllProjections* nap) } } #endif - diff --git a/libnczarr/zchunking.h b/libnczarr/zchunking.h index 78afde0654..f3a14703eb 100644 --- a/libnczarr/zchunking.h +++ b/libnczarr/zchunking.h @@ -8,8 +8,6 @@ #include "ncexternl.h" -/* Callback functions so we can use with unit tests */ - typedef int (*NCZ_reader)(void* source, size64_t* chunkindices, void** chunkdata); struct Reader {void* source; NCZ_reader read;}; @@ -29,6 +27,10 @@ typedef struct NCZSlice { size64_t len; /* full dimension length */ } NCZSlice; +/* A projection defines the set of grid points + for a given set of slices as projected onto + a single chunk. +*/ typedef struct NCProjection { int id; int skip; /* Should this projection be skipped? */ @@ -54,7 +56,7 @@ typedef struct NCZSliceProjections { the chunk */ } NCZSliceProjections; -/* Combine some values to simplify internal argument lists */ +/* Combine some values to avoid having to pass long argument lists*/ struct Common { NC_FILE_INFO_T* file; NC_VAR_INFO_T* var; @@ -62,22 +64,23 @@ struct Common { int reading; /* 1=> read, 0 => write */ int rank; int scalar; /* 1 => scalar variable */ - size64_t* dimlens; - size64_t* chunklens; - size64_t* memshape; + size64_t dimlens[NC_MAX_VAR_DIMS]; + unsigned char isunlimited[NC_MAX_VAR_DIMS]; + size64_t chunklens[NC_MAX_VAR_DIMS]; + size64_t memshape[NC_MAX_VAR_DIMS]; void* memory; size_t typesize; size64_t chunkcount; /* computed product of chunklens; warning indices, not bytes */ int swap; /* var->format_info_file->native_endianness == var->endianness */ size64_t shape[NC_MAX_VAR_DIMS]; /* shape of the output hyperslab */ NCZSliceProjections* allprojections; - /* Parametric chunk reader so we can do unittests */ + /* Parametric chunk reader */ struct Reader reader; }; /**************************************************/ /* From zchunking.c */ -EXTERNL int NCZ_compute_chunk_ranges(int rank, const NCZSlice*, const size64_t*, NCZChunkRange* ncr); +EXTERNL int NCZ_compute_chunk_ranges(struct Common*, const NCZSlice*, NCZChunkRange* ncr); EXTERNL int NCZ_compute_projections(struct Common*, int r, size64_t chunkindex, const NCZSlice* slice, size_t n, NCZProjection* projections); EXTERNL int NCZ_compute_per_slice_projections(struct Common*, int rank, const NCZSlice*, const NCZChunkRange*, NCZSliceProjections* slp); EXTERNL int NCZ_compute_all_slice_projections(struct Common*, const NCZSlice* slices, const NCZChunkRange*, NCZSliceProjections*); @@ -94,10 +97,7 @@ EXTERNL size64_t NCZ_computelinearoffset(size_t, const size64_t*, const size64_t /* Special entry points for unit testing */ struct Common; struct NCZOdometer; -EXTERNL int NCZ_projectslices(size64_t* dimlens, - size64_t* chunklens, - NCZSlice* slices, - struct Common*, struct NCZOdometer**); +EXTERNL int NCZ_projectslices(struct Common*, NCZSlice* slices, struct NCZOdometer**); EXTERNL int NCZ_chunkindexodom(int rank, const NCZChunkRange* ranges, size64_t*, struct NCZOdometer** odom); EXTERNL void NCZ_clearsliceprojections(int count, NCZSliceProjections* slpv); EXTERNL void NCZ_clearcommon(struct Common* common); diff --git a/libnczarr/zclose.c b/libnczarr/zclose.c index eda6fcc2b0..b70e86f794 100644 --- a/libnczarr/zclose.c +++ b/libnczarr/zclose.c @@ -47,7 +47,7 @@ ncz_close_file(NC_FILE_INFO_T* file, int abort) zinfo = file->format_file_info; - if((stat = nczmap_close(zinfo->map,(abort && zinfo->created)?1:0))) + if((stat = nczmap_close(zinfo->map,(abort && zinfo->creating)?1:0))) goto done; NCZ_freestringvec(0,zinfo->envv_controls); NC_authfree(zinfo->auth); diff --git a/libnczarr/zcreate.c b/libnczarr/zcreate.c index 02da9a3309..3243b6f8c7 100644 --- a/libnczarr/zcreate.c +++ b/libnczarr/zcreate.c @@ -50,9 +50,6 @@ ncz_create_file(const char *path, int cmode, size_t initialsz, const char** cont /* Do format specific setup */ - /* Should check if file already exists, and if NC_NOCLOBBER is specified, - return an error; but defer to the map */ - if((retval = ncz_create_dataset(h5,h5->root_grp,controls))) BAIL(retval); diff --git a/libnczarr/zdebug.h b/libnczarr/zdebug.h index 71a9e43537..8c19d9f47a 100644 --- a/libnczarr/zdebug.h +++ b/libnczarr/zdebug.h @@ -5,7 +5,7 @@ #ifndef ZDEBUG_H #define ZDEBUG_H -#undef ZCATCH /* Warning: significant performance impact */ +#define ZCATCH /* Warning: significant performance impact */ #undef ZTRACING /* Warning: significant performance impact */ #undef ZDEBUG /* general debug */ @@ -14,14 +14,11 @@ #include "ncexternl.h" #include "nclog.h" -#ifdef LOGGING #define ZLOG(tag,...) nclog(tag,__VA_ARGS__) -#else -#define ZLOG(tag,...) -#endif #ifdef ZCATCH /* Place breakpoint on zbreakpoint to catch errors close to where they occur*/ +/* WARNING: Do not evaluate e more than once */ #define THROW(e) zthrow((e),__FILE__, __func__, __LINE__) #define REPORT(e,msg) zreport((e),(msg),__FILE__, __func__, __LINE__) #define ZCHECK(e) if((e)) {THROW(stat); goto done;} else {} diff --git a/libnczarr/zdim.c b/libnczarr/zdim.c index 4ff48bfe85..352d4a2e25 100644 --- a/libnczarr/zdim.c +++ b/libnczarr/zdim.c @@ -83,8 +83,8 @@ NCZ_def_dim(int ncid, const char *name, size_t len, int *idp) if ((stat = nc4_check_name(name, norm_name))) return stat; - /* Since unlimited is not supported, len > 0 */ - if(len <= 0) + /* Since unlimited is supported, len >= 0 */ + if(len < 0) return NC_EDIMSIZE; /* For classic model: dim length has to fit in a 32-bit unsigned @@ -110,10 +110,14 @@ NCZ_def_dim(int ncid, const char *name, size_t len, int *idp) if ((stat = nc4_dim_list_add(grp, norm_name, len, -1, &dim))) return stat; - /* Create struct for NCZ-specific dim info. */ - if (!(dim->format_dim_info = calloc(1, sizeof(NCZ_DIM_INFO_T)))) - return NC_ENOMEM; - ((NCZ_DIM_INFO_T*)dim->format_dim_info)->common.file = h5; + { + NCZ_DIM_INFO_T* diminfo = NULL; + /* Create struct for NCZ-specific dim info. */ + if (!(diminfo = calloc(1, sizeof(NCZ_DIM_INFO_T)))) + return NC_ENOMEM; + dim->format_dim_info = diminfo; + diminfo->common.file = h5; + } /* Pass back the dimid. */ if (idp) @@ -269,10 +273,3 @@ NCZ_rename_dim(int ncid, int dimid, const char *name) return NC_NOERR; } - -int -NCZ_inq_unlimdims(int ncid, int *ndimsp, int *unlimdimidsp) -{ - if(ndimsp) *ndimsp = 0; - return NC_NOERR; -} diff --git a/libnczarr/zdispatch.c b/libnczarr/zdispatch.c index 8f06e613ca..cdf80af05d 100644 --- a/libnczarr/zdispatch.c +++ b/libnczarr/zdispatch.c @@ -38,7 +38,7 @@ static const NC_Dispatch NCZ_dispatcher = { NCZ_def_dim, NCZ_inq_dimid, NCZ_inq_dim, - NCZ_inq_unlimdim, + NC4_inq_unlimdim, NCZ_rename_dim, NCZ_inq_att, @@ -65,7 +65,7 @@ static const NC_Dispatch NCZ_dispatcher = { NCZ_def_var_fill, NCZ_show_metadata, - NCZ_inq_unlimdims, + NC4_inq_unlimdims, NCZ_inq_ncid, NCZ_inq_grps, diff --git a/libnczarr/zfilter.c b/libnczarr/zfilter.c index 9d2bc68f23..e3896bf9a4 100644 --- a/libnczarr/zfilter.c +++ b/libnczarr/zfilter.c @@ -10,7 +10,6 @@ * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * * distribution tree, or in https://support.hdfgroup.org/ftp/hdf5/releases. * * If you do not have access to either file, you may request a copy from * * help@hdfgroup.org. * @@ -368,7 +367,6 @@ NCZ_addfilter(NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, unsigned int id, size_t /* Before anything else, find the matching plugin */ if((stat = NCZ_plugin_loaded(id,&plugin))) goto done; if(plugin == NULL) { - ZLOG(NCLOGWARN,"no such plugin: %u",(unsigned)id); stat = THROW(NC_ENOFILTER); goto done; } @@ -438,7 +436,6 @@ NCZ_filter_remove(NC_VAR_INFO_T* var, unsigned int id) goto done; } } - ZLOG(NCLOGERR,"no such filter: %u",(unsigned)id); stat = THROW(NC_ENOFILTER); done: return ZUNTRACE(stat); @@ -567,10 +564,12 @@ NCZ_def_var_filter(int ncid, int varid, unsigned int id, size_t nparams, /* See if deflate &/or szip is defined */ if((stat = NCZ_filter_lookup(var,H5Z_FILTER_DEFLATE,&tmp))) goto done; havedeflate = (tmp == NULL ? 0 : 1); + stat = NC_NOERR; /* reset */ if((stat = NCZ_filter_lookup(var,H5Z_FILTER_SZIP,&tmp))) goto done; haveszip = (tmp == NULL ? 0 : 1); - + stat = NC_NOERR; /* reset */ + /* If incoming filter not already defined, then check for conflicts */ if(oldspec == NULL) { if(id == H5Z_FILTER_DEFLATE) { @@ -711,7 +710,6 @@ NCZ_inq_var_filter_info(int ncid, int varid, unsigned int id, size_t* nparamsp, if(params && spec->hdf5.visible.nparams > 0) memcpy(params,spec->hdf5.visible.params,sizeof(unsigned int)*spec->hdf5.visible.nparams); } else { - ZLOG(NCLOGWARN,"no such filter: %u",(unsigned)id); stat = THROW(NC_ENOFILTER); } done: @@ -846,7 +844,7 @@ NCZ_applyfilterchain(const NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, NClist* cha { int i, stat = NC_NOERR; void* lastbuffer = NULL; /* if not null, then last allocated buffer */ - + ZTRACE(6,"|chain|=%u inlen=%u indata=%p encode=%d", (unsigned)nclistlength(chain), (unsigned)inlen, indata, encode); /* Make sure all the filters are loaded && setup */ @@ -992,7 +990,6 @@ NCZ_filter_build(const NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, const NCjson* j /* Get the id of this codec filter */ if(NCJdictget(jfilter,"id",&jvalue)<0) {stat = NC_EFILTER; goto done;} if(NCJsort(jvalue) != NCJ_STRING) { - ZLOG(NCLOGERR,"no such filter: %s",NCJstring(jvalue)); stat = THROW(NC_ENOFILTER); goto done; } @@ -1155,7 +1152,7 @@ static int NCZ_load_all_plugins(void) { int i,j,ret = NC_NOERR; - const char* pluginroots = NULL; + char* pluginroots = NULL; struct stat buf; NClist* dirs = nclistnew(); char* defaultpluginpath = NULL; @@ -1185,7 +1182,7 @@ NCZ_load_all_plugins(void) pluginroots = getenv(PLUGIN_ENV); /* Usually HDF5_PLUGIN_PATH */ if(pluginroots != NULL && strlen(pluginroots) == 0) pluginroots = NULL; if(pluginroots == NULL) { - pluginroots = strdup(defaultpluginpath); + pluginroots = defaultpluginpath; } assert(pluginroots != NULL); ZTRACEMORE(6,"pluginroots=%s",(pluginroots?pluginroots:"null")); diff --git a/libnczarr/zinternal.h b/libnczarr/zinternal.h index d5fcd5ceeb..50ff91e446 100644 --- a/libnczarr/zinternal.h +++ b/libnczarr/zinternal.h @@ -105,8 +105,8 @@ Inserted into any .zattrs ? or should it go into the container? #define NCZ_MAXSTR_DEFAULT 128 /* Mnemonics */ -#define ZCLEAR 0 /* For NCZ_copy_data */ -#define ZCLOSE 1 /* this is closeorabort as opposed to enddef */ +#define ZCLOSE 1 /* this is closeorabort as opposed to enddef */ +#define ZREADING 1 /* this is reading data rather than writing */ /* Useful macro */ #define ncidforx(file,grpid) ((file)->controller->ext_ncid | (grpid)) @@ -142,7 +142,7 @@ typedef struct NCZ_FILE_INFO { unsigned long release; } nczarr_version; } zarr; - int created; /* 1=> created 0=>open */ + int creating; /* 1=> created 0=>open */ int native_endianness; /* NC_ENDIAN_LITTLE | NC_ENDIAN_BIG */ char** envv_controls; /* Envv format */ struct Controls { diff --git a/libnczarr/zmap.c b/libnczarr/zmap.c index 63844ae4ab..eb808d431d 100644 --- a/libnczarr/zmap.c +++ b/libnczarr/zmap.c @@ -41,6 +41,11 @@ nczmap_create(NCZM_IMPL impl, const char *path, int mode, size64_t flags, void* if(mapp) *mapp = NULL; + if((mode & NC_NOCLOBBER) == 0) { + /* Truncate the file */ + if((stat = nczmap_truncate(impl,path))) goto done; + } + switch (impl) { case NCZM_FILE: stat = zmap_file.create(path, mode, flags, parameters, &map); @@ -108,6 +113,31 @@ nczmap_open(NCZM_IMPL impl, const char *path, int mode, size64_t flags, void* pa return THROW(stat); } +int +nczmap_truncate(NCZM_IMPL impl, const char *path) +{ + int stat = NC_NOERR; + switch (impl) { + case NCZM_FILE: + if((stat = zmap_file.truncate(path))) goto done; + break; +#ifdef ENABLE_NCZARR_ZIP + case NCZM_ZIP: + if((stat = zmap_zip.truncate(path))) goto done; + break; +#endif +#ifdef ENABLE_S3 + case NCZM_S3: + if((stat = zmap_s3sdk.truncate(path))) goto done; + break; +#endif + default: + {stat = REPORT(NC_ENOTBUILT,"nczmap_truncate"); goto done;} + } +done: + return stat; +} + /**************************************************/ /* API Wrapper */ @@ -139,9 +169,9 @@ nczmap_read(NCZMAP* map, const char* key, size64_t start, size64_t count, void* } int -nczmap_write(NCZMAP* map, const char* key, size64_t start, size64_t count, const void* content) +nczmap_write(NCZMAP* map, const char* key, size64_t count, const void* content) { - return map->api->write(map, key, start, count, content); + return map->api->write(map, key, count, content); } /* Define a static qsort comparator for strings for use with qsort */ diff --git a/libnczarr/zmap.h b/libnczarr/zmap.h index 18962b1d06..451959bcbf 100644 --- a/libnczarr/zmap.h +++ b/libnczarr/zmap.h @@ -141,7 +141,7 @@ of the implementation. #define NCZM_DOT '.' -/*Mnemonic*/ +/*Mnemonics*/ #define LOCALIZE 1 /* Forward */ @@ -163,7 +163,6 @@ typedef size64_t NCZM_FEATURES; /* powers of 2 */ #define NCZM_UNIMPLEMENTED 1 /* Unknown/ unimplemented */ #define NCZM_WRITEONCE 2 /* Objects can only be written once */ -#define NCZM_ZEROSTART 4 /* Objects can only be written using a start count of zero */ /* For each dataset, we create what amounts to a class @@ -200,7 +199,7 @@ struct NCZMAP_API { int (*exists)(NCZMAP* map, const char* key); int (*len)(NCZMAP* map, const char* key, size64_t* sizep); int (*read)(NCZMAP* map, const char* key, size64_t start, size64_t count, void* content); - int (*write)(NCZMAP* map, const char* key, size64_t start, size64_t count, const void* content); + int (*write)(NCZMAP* map, const char* key, size64_t count, const void* content); int (*search)(NCZMAP* map, const char* prefix, struct NClist* matches); }; @@ -210,6 +209,7 @@ typedef struct NCZMAP_DS_API { NCZM_FEATURES features; int (*create)(const char *path, int mode, size64_t constraints, void* parameters, NCZMAP** mapp); int (*open)(const char *path, int mode, size64_t constraints, void* parameters, NCZMAP** mapp); + int (*truncate)(const char* url); } NCZMAP_DS_API; extern NCZMAP_DS_API zmap_file; @@ -276,16 +276,18 @@ EXTERNL int nczmap_read(NCZMAP* map, const char* key, size64_t start, size64_t c /** Write the content of a specified content-bearing object. +This assumes that it is not possible to write a subset of an object. +Any such partial writes must be handled at a higher level by +reading the object, modifying it, and then writing the whole object. @param map -- the containing map @param key -- the key specifying the content-bearing object -@param start -- offset into the content to start writing @param count -- number of bytes to write @param content -- write the data from this memory @return NC_NOERR if the operation succeeded @return NC_EXXX if the operation failed for one of several possible reasons Note that this makes the key a content-bearing object. */ -EXTERNL int nczmap_write(NCZMAP* map, const char* key, size64_t start, size64_t count, const void* content); +EXTERNL int nczmap_write(NCZMAP* map, const char* key, size64_t count, const void* content); /** Return a vector of names (not keys) representing the @@ -298,6 +300,15 @@ next segment of legal objects that are immediately contained by the prefix key. */ EXTERNL int nczmap_search(NCZMAP* map, const char* prefix, struct NClist* matches); +/** +"Truncate" the storage associated with a map. Delete all contents except +the root, which is sized to zero. +@param url -- the url specifying the root object. +@return NC_NOERR if the truncation succeeded +@return NC_EXXX if the operation failed for one of several possible reasons +*/ +EXTERNL int nczmap_truncate(NCZM_IMPL impl, const char* url); + /** Close a map @param map -- the map to close diff --git a/libnczarr/zmap_file.c b/libnczarr/zmap_file.c index 1cd51c5392..4884e3eec8 100755 --- a/libnczarr/zmap_file.c +++ b/libnczarr/zmap_file.c @@ -128,17 +128,17 @@ static void zfrelease(ZFMAP* zfmap, FD* fd); static void zfunlink(const char* canonpath); static int platformerr(int err); -static int platformcreatefile(ZFMAP* map, const char* truepath,FD*); -static int platformcreatedir(ZFMAP* map, const char* truepath); -static int platformopenfile(ZFMAP* zfmap, const char* truepath, FD* fd); -static int platformopendir(ZFMAP* map, const char* truepath); -static int platformdircontent(ZFMAP* map, const char* path, NClist* contents); -static int platformdelete(ZFMAP* map, const char* path, int delroot); -static int platformseek(ZFMAP* map, FD* fd, int pos, size64_t* offset); -static int platformread(ZFMAP* map, FD* fd, size64_t count, void* content); -static int platformwrite(ZFMAP* map, FD* fd, size64_t count, const void* content); -static void platformrelease(ZFMAP* zfmap, FD* fd); -static int platformtestcontentbearing(ZFMAP* zfmap, const char* truepath); +static int platformcreatefile(mode_t mode, const char* truepath,FD*); +static int platformcreatedir(mode_t mode, const char* truepath); +static int platformopenfile(mode_t mode, const char* truepath, FD* fd); +static int platformopendir(mode_t mode, const char* truepath); +static int platformdircontent(const char* path, NClist* contents); +static int platformdelete(const char* path, int delroot); +static int platformseek(FD* fd, int pos, size64_t* offset); +static int platformread(FD* fd, size64_t count, void* content); +static int platformwrite(FD* fd, size64_t count, const void* content); +static void platformrelease(FD* fd); +static int platformtestcontentbearing(const char* truepath); #ifdef VERIFY static int verify(const char* path, int isdir); @@ -226,10 +226,10 @@ zfilecreate(const char *path, int mode, size64_t flags, void* parameters, NCZMAP /* If NC_CLOBBER, then delete below file tree */ if(!fIsSet(mode,NC_NOCLOBBER)) - platformdelete(zfmap,zfmap->root,0); + platformdelete(zfmap->root,0); /* make sure we can access the root directory; create if necessary */ - if((stat = platformcreatedir(zfmap, zfmap->root))) + if((stat = platformcreatedir(zfmap->map.mode, zfmap->root))) goto done; /* Dataset superblock will be written by higher layer */ @@ -298,7 +298,7 @@ zfileopen(const char *path, int mode, size64_t flags, void* parameters, NCZMAP** abspath = NULL; /* Verify root dir exists */ - if((stat = platformopendir(zfmap,zfmap->root))) + if((stat = platformopendir(zfmap->map.mode,zfmap->root))) goto done; /* Dataset superblock will be read by higher layer */ @@ -313,6 +313,21 @@ zfileopen(const char *path, int mode, size64_t flags, void* parameters, NCZMAP** return ZUNTRACE(stat); } +static int +zfiletruncate(const char* surl) +{ + int stat = NC_NOERR; + NCURI* url = NULL; + + ZTRACE(6,"url=%s",surl); + ncuriparse(surl,&url); + if(url == NULL) {stat = NC_EURL; goto done;} + platformdelete(url->path,0); /* leave root; ignore errors */ +done: + ncurifree(url); + return stat; +} + /**************************************************/ /* Object API */ @@ -347,7 +362,7 @@ zfilelen(NCZMAP* map, const char* key, size64_t* lenp) switch (stat=zflookupobj(zfmap,key,&fd)) { case NC_NOERR: /* Get file size */ - if((stat=platformseek(zfmap, &fd, SEEK_END, &len))) goto done; + if((stat=platformseek(&fd, SEEK_END, &len))) goto done; break; case NC_ENOOBJECT: stat = NC_EEMPTY; case NC_EEMPTY: break; @@ -376,8 +391,8 @@ zfileread(NCZMAP* map, const char* key, size64_t start, size64_t count, void* co switch (stat = zflookupobj(zfmap,key,&fd)) { case NC_NOERR: - if((stat = platformseek(zfmap, &fd, SEEK_SET, &start))) goto done; - if((stat = platformread(zfmap, &fd, count, content))) goto done; + if((stat = platformseek(&fd, SEEK_SET, &start))) goto done; + if((stat = platformread(&fd, count, content))) goto done; break; case NC_ENOOBJECT: stat = NC_EEMPTY; case NC_EEMPTY: break; @@ -390,12 +405,13 @@ zfileread(NCZMAP* map, const char* key, size64_t start, size64_t count, void* co } static int -zfilewrite(NCZMAP* map, const char* key, size64_t start, size64_t count, const void* content) +zfilewrite(NCZMAP* map, const char* key, size64_t count, const void* content) { int stat = NC_NOERR; FD fd = FDNUL; ZFMAP* zfmap = (ZFMAP*)map; /* cast to true type */ char* truepath = NULL; + size64_t start = 0; ZTRACE(5,"map=%s key=%s start=%llu count=%llu",map->url,key,start,count); @@ -413,11 +429,11 @@ zfilewrite(NCZMAP* map, const char* key, size64_t start, size64_t count, const v /* Create truepath */ if((stat = zffullpath(zfmap,key,&truepath))) goto done; /* Create file */ - if((stat = platformcreatefile(zfmap,truepath,&fd))) goto done; + if((stat = platformcreatefile(zfmap->map.mode,truepath,&fd))) goto done; /* Fall thru to write the object */ case NC_NOERR: - if((stat = platformseek(zfmap, &fd, SEEK_SET, &start))) goto done; - if((stat = platformwrite(zfmap, &fd, count, content))) goto done; + if((stat = platformseek(&fd, SEEK_SET, &start))) goto done; + if((stat = platformwrite(&fd, count, content))) goto done; break; default: break; } @@ -439,7 +455,7 @@ zfileclose(NCZMAP* map, int delete) /* Delete the subtree below the root and the root */ if(delete) { - stat = platformdelete(zfmap,zfmap->root,1); + stat = platformdelete(zfmap->root,1); zfunlink(zfmap->root); } nczm_clear(map); @@ -472,7 +488,7 @@ zfilesearch(NCZMAP* map, const char* prefixkey, NClist* matches) else if((stat = nczm_concat(zfmap->root,prefixkey,&fullpath))) goto done; /* get names of the next level path entries */ - switch (stat = platformdircontent(zfmap, fullpath, nextlevel)) { + switch (stat = platformdircontent(fullpath, nextlevel)) { case NC_NOERR: /* ok */ break; case NC_EEMPTY: /* not a dir */ @@ -529,7 +545,7 @@ zfcreategroup(ZFMAP* zfmap, const char* key, int nskip) ncbytescat(path,"/"); ncbytescat(path,seg); /* open and optionally create the directory */ - stat = platformcreatedir(zfmap,ncbytescontents(path)); + stat = platformcreatedir(zfmap->map.mode,ncbytescontents(path)); if(stat) goto done; } done: @@ -556,11 +572,11 @@ zflookupobj(ZFMAP* zfmap, const char* key, FD* fd) {goto done;} /* See if this is content-bearing */ - if((stat = platformtestcontentbearing(zfmap,path))) + if((stat = platformtestcontentbearing(path))) goto done; /* Open the file */ - if((stat = platformopenfile(zfmap,path,fd))) + if((stat = platformopenfile(zfmap->map.mode,path,fd))) goto done; done: @@ -574,7 +590,7 @@ static void zfrelease(ZFMAP* zfmap, FD* fd) { ZTRACE(5,"map=%s fd=%d",zfmap->map.url,(fd?fd->fd:-1)); - platformrelease(zfmap,fd); + platformrelease(fd); (void)ZUNTRACE(NC_NOERR); } @@ -586,6 +602,7 @@ NCZMAP_DS_API zmap_file = { 0, zfilecreate, zfileopen, + zfiletruncate, }; static NCZMAP_API zapi = { @@ -663,7 +680,7 @@ platformerr(int err) @return NC_ENOOBJECT if not found */ static int -platformtestcontentbearing(ZFMAP* zfmap, const char* canonpath) +platformtestcontentbearing(const char* canonpath) { int ret = 0; struct stat buf; @@ -685,12 +702,11 @@ platformtestcontentbearing(ZFMAP* zfmap, const char* canonpath) /* Create a file */ static int -platformcreatefile(ZFMAP* zfmap, const char* canonpath, FD* fd) +platformcreatefile(mode_t mode, const char* canonpath, FD* fd) { int stat = NC_NOERR; int ioflags = 0; int createflags = 0; - int mode = zfmap->map.mode; int permissions = NC_DEFAULT_ROPEN_PERMS; ZTRACE(6,"map=%s canonpath=%s",zfmap->map.url,canonpath); @@ -727,11 +743,10 @@ platformcreatefile(ZFMAP* zfmap, const char* canonpath, FD* fd) /* Open a file; fail if it does not exist */ static int -platformopenfile(ZFMAP* zfmap, const char* canonpath, FD* fd) +platformopenfile(mode_t mode, const char* canonpath, FD* fd) { int stat = NC_NOERR; int ioflags = 0; - int mode = zfmap->map.mode; int permissions = 0; ZTRACE(6,"map=%s canonpath=%s",zfmap->map.url,canonpath); @@ -764,10 +779,9 @@ platformopenfile(ZFMAP* zfmap, const char* canonpath, FD* fd) /* Create a dir */ static int -platformcreatedir(ZFMAP* zfmap, const char* canonpath) +platformcreatedir(mode_t mode, const char* canonpath) { int ret = NC_NOERR; - int mode = zfmap->map.mode; ZTRACE(6,"map=%s canonpath=%s",zfmap->map.url,canonpath); @@ -795,7 +809,7 @@ platformcreatedir(ZFMAP* zfmap, const char* canonpath) /* Open a dir; fail if it does not exist */ static int -platformopendir(ZFMAP* zfmap, const char* canonpath) +platformopendir(mode_t mode, const char* canonpath) { int ret = NC_NOERR; @@ -826,7 +840,7 @@ There are several possibilities: #ifdef _WIN32 static int -platformdircontent(ZFMAP* zfmap, const char* canonpath, NClist* contents) +platformdircontent(const char* canonpath, NClist* contents) { int ret = NC_NOERR; errno = 0; @@ -839,7 +853,7 @@ platformdircontent(ZFMAP* zfmap, const char* canonpath, NClist* contents) ZTRACE(6,"map=%s canonpath=%s",zfmap->map.url,canonpath); - switch (ret = platformtestcontentbearing(zfmap, canonpath)) { + switch (ret = platformtestcontentbearing(canonpath)) { case NC_EEMPTY: ret = NC_NOERR; break; /* directory */ case NC_NOERR: ret = NC_EEMPTY; goto done; default: goto done; @@ -893,7 +907,7 @@ platformdircontent(ZFMAP* zfmap, const char* canonpath, NClist* contents) #else /*!_WIN32*/ static int -platformdircontent(ZFMAP* zfmap, const char* canonpath, NClist* contents) +platformdircontent(const char* canonpath, NClist* contents) { int ret = NC_NOERR; errno = 0; @@ -901,9 +915,10 @@ platformdircontent(ZFMAP* zfmap, const char* canonpath, NClist* contents) ZTRACE(6,"map=%s canonpath=%s",zfmap->map.url,canonpath); - switch (ret = platformtestcontentbearing(zfmap, canonpath)) { + switch (ret = platformtestcontentbearing(canonpath)) { case NC_EEMPTY: ret = NC_NOERR; break; /* directory */ case NC_NOERR: ret = NC_EEMPTY; goto done; + case NC_ENOOBJECT: ret = NC_EEMPTY; goto done; default: goto done; } @@ -929,70 +944,8 @@ platformdircontent(ZFMAP* zfmap, const char* canonpath, NClist* contents) } #endif /*_WIN32*/ -#if 0 -static int -platformdeleter(ZFMAP* zfmap, NClist* segments, int depth) -{ - int ret = NC_NOERR; - struct stat statbuf; - struct dirent* entry = NULL; - DIR* dir = NULL; - char* path = NULL; - char* tmp = NULL; - - if((ret = nczm_join(segments,&path))) goto done; - - /* When running on any platform that can accept drive letters */ - if((ret = nczm_fixpath(path,&tmp))) goto done; - nullfree(path); path = NULL; - if((path = NCpathcvt(tmp))==NULL) {ret = NC_ENOMEM; goto done;} - - errno = 0; - ret = NCstat(path, &statbuf); - if(ret < 0) { - if(errno == ENOENT) {ret = NC_NOERR; goto done;} - else {ret = platformerr(errno); goto done;} - } - /* process this file */ - if(S_ISDIR(statbuf.st_mode)) { - if((dir = NCopendir(path)) == NULL) - {ret = platformerr(errno); goto done;} - for(;;) { - char* seg = NULL; - errno = 0; - entry = readdir(dir); - if(entry == NULL) {ret = platformerr(errno); break;} - /* Ignore "." and ".." */ - if(strcmp(entry->d_name,".")==0) continue; - if(strcmp(entry->d_name,"..")==0) continue; - /* append name to segments */ - if((seg = strdup(entry->d_name)) == NULL) - {ret = NC_ENOMEM; goto done;} - nclistpush(segments,seg); - /* recurse */ - if((ret = platformdeleter(zfmap, segments, depth+1))) goto done; - /* remove+reclaim last segment */ - nclistpop(segments); - nullfree(seg); - } - /* Delete this file */ - rmdir(path); - } else { - assert(S_ISREG(statbuf.st_mode)); - unlink(path); - } -done: - if(dir) NCclosedir(dir); - /* delete this file|dir */ - ret = NCremove(path); - nullfree(path); - nullfree(tmp); - return ZUNTRACE(ret); -} -#endif /*0*/ - static int -platformdeleter(ZFMAP* zfmap, NCbytes* canonpath, int delroot, int depth) +platformdeleter(NCbytes* canonpath, int depth) { int ret = NC_NOERR; int i; @@ -1003,7 +956,7 @@ platformdeleter(ZFMAP* zfmap, NCbytes* canonpath, int delroot, int depth) local = ncbytescontents(canonpath); ZTRACE(6,"map=%s canonpath=%s delroot=%d depth=%d",zfmap->map.url,local,delroot,depth); - ret = platformdircontent(zfmap, local, subfiles); + ret = platformdircontent(local, subfiles); #ifdef DEBUG {int i; fprintf(stderr,"xxx: contents:\n"); @@ -1020,12 +973,12 @@ platformdeleter(ZFMAP* zfmap, NCbytes* canonpath, int delroot, int depth) ncbytescat(canonpath, "/"); ncbytescat(canonpath, name); /* recurse */ - if ((ret = platformdeleter(zfmap, canonpath,delroot,depth+1))) goto done; + if ((ret = platformdeleter(canonpath,depth+1))) goto done; ncbytessetlength(canonpath,tpathlen); /* reset */ ncbytesnull(canonpath); local = ncbytescontents(canonpath); } - if(depth > 0 || delroot) { + if(depth > 0) { #ifdef DEBUG fprintf(stderr,"xxx: remove: %s\n",canonpath); #endif @@ -1043,13 +996,13 @@ fprintf(stderr,"xxx: remove: errno=%d|%s\n",errno,nc_strerror(errno)); #ifdef DEBUG fprintf(stderr,"xxx: remove: %s\n",canonpath); #endif - if(NCremove(local) < 0) {/* kill this file */ + if(NCremove(local) < 0) {/* kill this file */ #ifdef DEBUG fprintf(stderr,"xxx: remove: errno=%d|%s\n",errno,nc_strerror(errno)); #endif - ret = errno; - goto done; - } + ret = errno; + goto done; + } break; case NC_ENOTFOUND: default: @@ -1066,7 +1019,7 @@ fprintf(stderr,"xxx: remove: errno=%d|%s\n",errno,nc_strerror(errno)); /* Deep file/dir deletion; depth first */ static int -platformdelete(ZFMAP* zfmap, const char* rootpath, int delroot) +platformdelete(const char* rootpath, int delroot) { int stat = NC_NOERR; NCbytes* canonpath = ncbytesnew(); @@ -1077,7 +1030,19 @@ platformdelete(ZFMAP* zfmap, const char* rootpath, int delroot) ncbytescat(canonpath,rootpath); if(rootpath[strlen(rootpath)-1] == '/') /* elide trailing '/' */ ncbytessetlength(canonpath,ncbyteslength(canonpath)-1); - if((stat = platformdeleter(zfmap,canonpath,delroot,0))) goto done; + /* See if file even exists */ + stat = NCaccess(ncbytescontents(canonpath),F_OK); + if(stat < 0) { + stat = errno; + goto done; + } + if((stat = platformdeleter(canonpath,0))) goto done; + if(delroot) { + if(NCrmdir(rootpath) < 0) { /* kill this dir */ + stat = errno; + goto done; + } + } done: ncbytesfree(canonpath); errno = 0; @@ -1085,7 +1050,7 @@ platformdelete(ZFMAP* zfmap, const char* rootpath, int delroot) } static int -platformseek(ZFMAP* zfmap, FD* fd, int pos, size64_t* sizep) +platformseek(FD* fd, int pos, size64_t* sizep) { int ret = NC_NOERR; off_t size, newsize; @@ -1108,7 +1073,7 @@ platformseek(ZFMAP* zfmap, FD* fd, int pos, size64_t* sizep) } static int -platformread(ZFMAP* zfmap, FD* fd, size64_t count, void* content) +platformread(FD* fd, size64_t count, void* content) { int stat = NC_NOERR; size_t need = count; @@ -1131,7 +1096,7 @@ platformread(ZFMAP* zfmap, FD* fd, size64_t count, void* content) } static int -platformwrite(ZFMAP* zfmap, FD* fd, size64_t count, const void* content) +platformwrite(FD* fd, size64_t count, const void* content) { int ret = NC_NOERR; size_t need = count; @@ -1169,7 +1134,7 @@ platformcwd(char** cwdp) equivalent to closing the file descriptor. */ static void -platformrelease(ZFMAP* zfmap, FD* fd) +platformrelease(FD* fd) { ZTRACE(6,"map=%s fd=%d",zfmap->map.url,(fd?fd->fd:-1)); if(fd->fd >=0) NCclose(fd->fd); @@ -1181,7 +1146,7 @@ platformrelease(ZFMAP* zfmap, FD* fd) /* Close FD => return typ to FDNONE */ */ static void -platformclose(ZFMAP* zfmap, FD* fd) +platformclose(FD* fd) { if(fd->typ == FDFILE) { if(fd->fd >=0) close(fd->u,fd); diff --git a/libnczarr/zmap_s3sdk.c b/libnczarr/zmap_s3sdk.c index 76faf56d51..19526b04c5 100644 --- a/libnczarr/zmap_s3sdk.c +++ b/libnczarr/zmap_s3sdk.c @@ -55,7 +55,7 @@ static int zs3len(NCZMAP* map, const char* key, size64_t* lenp); static void freevector(size_t nkeys, char** list); static void zs3initialize(void); -static int s3clear(ZS3MAP* z3map, const char* key); +static int s3clear(void* s3client, const char* bucket, const char* key); static int maketruekey(const char* rootpath, const char* key, char** truekeyp); @@ -136,7 +136,7 @@ zs3create(const char *path, int mode, size64_t flags, void* parameters, NCZMAP** {stat = NC_EURL; goto done;} /* Convert to canonical path-style */ - if((stat = NC_s3urlprocess(url,&z3map->s3))) goto done; + if((stat = NC_s3urlprocess(url,&z3map->s3,NULL))) goto done; /* Verify the root path */ if(z3map->s3.rootkey == NULL) {stat = NC_EURL; goto done;} @@ -152,18 +152,19 @@ zs3create(const char *path, int mode, size64_t flags, void* parameters, NCZMAP** if((stat = NC_s3sdkbucketcreate(z3map->s3client,z3map->s3.region,z3map->s3.bucket,&z3map->errmsg))) goto done; } - /* The root object should not exist */ + /* The root object may or may not already exist */ switch (stat = NC_s3sdkinfo(z3map->s3client,z3map->s3.bucket,z3map->s3.rootkey,NULL,&z3map->errmsg)) { case NC_EEMPTY: /* no such object */ stat = NC_NOERR; /* which is what we want */ errclear(z3map); break; - case NC_NOERR: stat = NC_EOBJECT; goto done; /* already exists */ - default: reporterr(z3map); goto done; - } - if(!stat) { + case NC_NOERR: + stat = NC_EOBJECT; + errclear(z3map); /* Delete objects inside root object tree */ - s3clear(z3map,z3map->s3.rootkey); + s3clear(z3map->s3client,z3map->s3.bucket,z3map->s3.rootkey); + goto done; /* already exists */ + default: reporterr(z3map); goto done; } } @@ -214,7 +215,7 @@ zs3open(const char *path, int mode, size64_t flags, void* parameters, NCZMAP** m {stat = NC_EURL; goto done;} /* Convert to canonical path-style */ - if((stat = NC_s3urlprocess(url,&z3map->s3))) goto done; + if((stat = NC_s3urlprocess(url,&z3map->s3,NULL))) goto done; /* Verify root path */ if(z3map->s3.rootkey == NULL) {stat = NC_EURL; goto done;} @@ -240,6 +241,30 @@ zs3open(const char *path, int mode, size64_t flags, void* parameters, NCZMAP** m return ZUNTRACE(stat); } +/* This uses url so we can get bucket */ +static int +zs3truncate(const char *s3url) +{ + int stat = NC_NOERR; + void* s3client = NULL; + NCURI* url = NULL; + NCURI* purl = NULL; + NCS3INFO info; + + ZTRACE(6,"url=%s",s3url); + ncuriparse(s3url,&url); + if(url == NULL) {stat = NC_EURL; goto done;} + if((stat=NC_s3urlprocess(url,&info,&purl))) goto done; + if((s3client = NC_s3sdkcreateclient(&info))) {stat = NC_ES3; goto done;} + if((stat = s3clear(s3client,info.bucket,purl->path))) goto done; +done: + if(s3client) {stat=NC_s3sdkclose(s3client,&info,1,NULL);} + ncurifree(url); + ncurifree(purl); + (void)NC_s3clear(&info); + return stat; +} + /**************************************************/ /* Object API */ @@ -328,56 +353,37 @@ zs3read(NCZMAP* map, const char* key, size64_t start, size64_t count, void* cont @return NC_EXXX return true error */ static int -zs3write(NCZMAP* map, const char* key, size64_t start, size64_t count, const void* content) +zs3write(NCZMAP* map, const char* key, size64_t count, const void* content) { int stat = NC_NOERR; ZS3MAP* z3map = (ZS3MAP*)map; /* cast to true type */ char* chunk = NULL; /* use char* so we can do arithmetic with it */ - size64_t objsize = 0; - size64_t memsize = 0; - size64_t endwrite = start+count; /* first pos just above overwritten data */ char* truekey = NULL; - int isempty = 0; + size64_t objsize; - ZTRACE(6,"map=%s key=%s start=%llu count=%llu",map->url,key,start,count); + ZTRACE(6,"map=%s key=%s count=%llu",map->url,key,count); if((stat = maketruekey(z3map->s3.rootkey,key,&truekey))) goto done; /* Apparently S3 has no write byterange operation, so we need to read the whole object, copy data, and then rewrite */ switch (stat=NC_s3sdkinfo(z3map->s3client, z3map->s3.bucket, truekey, &objsize, &z3map->errmsg)) { - case NC_NOERR: /* Figure out the memory size of the object */ - memsize = (endwrite > objsize ? endwrite : objsize); + case NC_NOERR: /* Figure out the new size of the object */ break; case NC_EEMPTY: - memsize = endwrite; - isempty = 1; + stat = NC_NOERR; /* reset */ break; default: reporterr(z3map); goto done; } - if(isempty) - chunk = (char*)calloc(1,memsize); /* initialize it */ - else - chunk = (char*)malloc(memsize); + chunk = (char*)calloc(1,count); /* initialize it */ if(chunk == NULL) {stat = NC_ENOMEM; goto done;} - if(start > 0 && objsize > 0) { /* must read to preserve data before start */ - if((stat = NC_s3sdkread(z3map->s3client, z3map->s3.bucket, truekey, 0, objsize, (void*)chunk, &z3map->errmsg))) - goto done; - } -#if 0 - if(newsize > objsize) { - /* Zeroize the part of the object added */ - memset(((char*)chunk)+objsize,0,(newsize-objsize)); - objsize = newsize; - } -#endif /* overwrite the relevant part of the memory with the contents */ if(count > 0) - memcpy(((char*)chunk)+start,content,count); /* there may be data above start+count */ + memcpy((char*)chunk,content,count); /* (re-)write */ - if((stat = NC_s3sdkwriteobject(z3map->s3client, z3map->s3.bucket, truekey, memsize, (void*)chunk, &z3map->errmsg))) + if((stat = NC_s3sdkwriteobject(z3map->s3client, z3map->s3.bucket, truekey, count, (void*)chunk, &z3map->errmsg))) goto done; done: @@ -396,8 +402,8 @@ zs3close(NCZMAP* map, int deleteit) ZTRACE(6,"map=%s deleteit=%d",map->url, deleteit); if(deleteit) - s3clear(z3map,z3map->s3.rootkey); - if(z3map->s3client && z3map->s3.bucket && z3map->s3.rootkey) { + s3clear(z3map->s3client,z3map->s3.bucket,z3map->s3.rootkey); + if(z3map->s3client && z3map->s3.bucket && z3map->s3.rootkey) { NC_s3sdkclose(z3map->s3client, &z3map->s3, deleteit, &z3map->errmsg); } reporterr(z3map); @@ -485,18 +491,19 @@ zs3search(NCZMAP* map, const char* prefix, NClist* matches) /* S3 Utilities */ /* -Remove all objects with keys which have +Remove all objects in bucket with keys which have rootkey as prefix; rootkey is a truekey */ static int -s3clear(ZS3MAP* z3map, const char* rootkey) +s3clear(void* s3client, const char* bucket, const char* rootkey) { int stat = NC_NOERR; char** list = NULL; char** p; size_t nkeys = 0; - if(z3map->s3client && z3map->s3.bucket && rootkey) { - if((stat = NC_s3sdksearch(z3map->s3client, z3map->s3.bucket, rootkey, &nkeys, &list, &z3map->errmsg))) + + if(s3client && bucket && rootkey) { + if((stat = NC_s3sdksearch(s3client, bucket, rootkey, &nkeys, &list, NULL))) goto done; if(list != NULL) { for(p=list;*p;p++) { @@ -505,14 +512,13 @@ s3clear(ZS3MAP* z3map, const char* rootkey) #ifdef S3DEBUG fprintf(stderr,"s3clear: %s\n",*p); #endif - if((stat = NC_s3sdkdeletekey(z3map->s3client, z3map->s3.bucket, *p, &z3map->errmsg))) + if((stat = NC_s3sdkdeletekey(s3client, bucket, *p, NULL))) goto done; } } } done: - reporterr(z3map); NCZ_freestringvec(nkeys,list); return THROW(stat); } @@ -568,6 +574,7 @@ NCZMAP_DS_API zmap_s3sdk = { ZS3_PROPERTIES, zs3create, zs3open, + zs3truncate, }; static NCZMAP_API diff --git a/libnczarr/zmap_zip.c b/libnczarr/zmap_zip.c index a0ad1a58ed..465fc3b982 100755 --- a/libnczarr/zmap_zip.c +++ b/libnczarr/zmap_zip.c @@ -29,7 +29,7 @@ #define NCZM_ZIP_V1 1 -#define ZIP_PROPERTIES (NCZM_WRITEONCE|NCZM_ZEROSTART) +#define ZIP_PROPERTIES (NCZM_WRITEONCE) /* Do a simple mapping of our simplified map model @@ -269,6 +269,58 @@ zipopen(const char *path, int mode, size64_t flags, void* parameters, NCZMAP** m return ZUNTRACE(stat); } +static int +ziptruncate(const char* surl) +{ + int stat = NC_NOERR; + NCURI* url = NULL; + int errorp = 0; + zip_t *zip = NULL; + + ZTRACE(6,"url=%s",surl); + ncuriparse(surl,&url); + if(url == NULL) {stat = NC_EURL; goto done;} + zip = zip_open(url->path, ZIP_CREATE | ZIP_TRUNCATE, &errorp); + zip_close(zip); +done: + ncurifree(url); + return stat; +} + +/**************************************************/ +/* Map API */ + +static int +zipclose(NCZMAP* map, int delete) +{ + int stat = NC_NOERR; + int zerrno = 0; + ZZMAP* zzmap = (ZZMAP*)map; + + if(zzmap == NULL) return NC_NOERR; + + ZTRACE(6,"map=%s delete=%d",map->url,delete); + + /* Close the zip */ + if(delete) + zip_discard(zzmap->archive); + else { + if((zerrno=zip_close(zzmap->archive))) + stat = ziperrno(zerrno); + } + if(delete) + NCremove(zzmap->root); + + zzmap->archive = NULL; + nczm_clear(map); + nullfree(zzmap->root); + nullfree(zzmap->dataset); + zzmap->root = NULL; + freesearchcache(zzmap->searchcache); + free(zzmap); + return ZUNTRACE(stat); +} + /**************************************************/ /* Object API */ @@ -375,7 +427,7 @@ zipread(NCZMAP* map, const char* key, size64_t start, size64_t count, void* cont } static int -zipwrite(NCZMAP* map, const char* key, size64_t start, size64_t count, const void* content) +zipwrite(NCZMAP* map, const char* key, size64_t count, const void* content) { int stat = NC_NOERR; ZZMAP* zzmap = (ZZMAP*)map; /* cast to true type */ @@ -387,13 +439,10 @@ zipwrite(NCZMAP* map, const char* key, size64_t start, size64_t count, const voi zip_error_t zerror; void* localbuffer = NULL; - ZTRACE(6,"map=%s key=%s start=%llu count=%llu",map->url,key,start,count); + ZTRACE(6,"map=%s key=%s count=%llu",map->url,key,count); zip_error_init(&zerror); - if(start != 0 && (ZIP_PROPERTIES & NCZM_ZEROSTART)) - {stat = NC_EEDGE; goto done;} - /* Create directories */ if((stat = zzcreategroup(zzmap,key,SKIPLAST))) goto done; @@ -445,37 +494,6 @@ zipwrite(NCZMAP* map, const char* key, size64_t start, size64_t count, const voi return ZUNTRACE(stat); } -static int -zipclose(NCZMAP* map, int delete) -{ - int stat = NC_NOERR; - int zerrno = 0; - ZZMAP* zzmap = (ZZMAP*)map; - - if(zzmap == NULL) return NC_NOERR; - - ZTRACE(6,"map=%s delete=%d",map->url,delete); - - /* Close the zip */ - if(delete) - zip_discard(zzmap->archive); - else { - if((zerrno=zip_close(zzmap->archive))) - stat = ziperrno(zerrno); - } - if(delete) - NCremove(zzmap->root); - - zzmap->archive = NULL; - nczm_clear(map); - nullfree(zzmap->root); - nullfree(zzmap->dataset); - zzmap->root = NULL; - freesearchcache(zzmap->searchcache); - free(zzmap); - return ZUNTRACE(stat); -} - /* Return a list of full keys immediately under a specified prefix key. In theory, the returned list should be sorted in lexical order, @@ -719,6 +737,7 @@ NCZMAP_DS_API zmap_zip = { ZIP_PROPERTIES, zipcreate, zipopen, + ziptruncate, }; static NCZMAP_API zapi = { diff --git a/libnczarr/zodom.c b/libnczarr/zodom.c index 79e7040205..7cde2834e3 100644 --- a/libnczarr/zodom.c +++ b/libnczarr/zodom.c @@ -25,8 +25,8 @@ nczodom_new(int rank, const size64_t* start, const size64_t* stop, const size64_ odom->properties.start0 = 1; /* assume */ for(i=0;istart[i] = (size64_t)start[i]; - odom->stop[i] = (size64_t)stop[i]; odom->stride[i] = (size64_t)stride[i]; + odom->stop[i] = (size64_t)stop[i]; odom->len[i] = (size64_t)len[i]; if(odom->start[i] != 0) odom->properties.start0 = 0; if(odom->stride[i] != 1) odom->properties.stride1 = 0; @@ -131,11 +131,11 @@ buildodom(int rank, NCZOdometer** odomp) if((odom = calloc(1,sizeof(NCZOdometer))) == NULL) goto done; odom->rank = rank; - if((odom->start=malloc(sizeof(size64_t)*rank))==NULL) goto nomem; - if((odom->stop=malloc(sizeof(size64_t)*rank))==NULL) goto nomem; - if((odom->stride=malloc(sizeof(size64_t)*rank))==NULL) goto nomem; - if((odom->len=malloc(sizeof(size64_t)*rank))==NULL) goto nomem; - if((odom->index=malloc(sizeof(size64_t)*rank))==NULL) goto nomem; + if((odom->start=calloc(1,(sizeof(size64_t)*rank)))==NULL) goto nomem; + if((odom->stop=calloc(1,(sizeof(size64_t)*rank)))==NULL) goto nomem; + if((odom->stride=calloc(1,(sizeof(size64_t)*rank)))==NULL) goto nomem; + if((odom->len=calloc(1,(sizeof(size64_t)*rank)))==NULL) goto nomem; + if((odom->index=calloc(1,(sizeof(size64_t)*rank)))==NULL) goto nomem; *odomp = odom; odom = NULL; } done: @@ -168,7 +168,6 @@ nczodom_skipavail(NCZOdometer* odom) odom->index[odom->rank-1] = odom->stop[odom->rank-1]; } -#if 0 size64_t nczodom_laststride(const NCZOdometer* odom) { @@ -182,4 +181,26 @@ nczodom_lastlen(const NCZOdometer* odom) assert(odom != NULL && odom->rank > 0); return odom->len[odom->rank-1]; } -#endif + +void +nczodom_print(const NCZOdometer* odom) +{ + size_t i; + fprintf(stderr,"odom{rank=%d offset=%llu avail=%llu",odom->rank,nczodom_offset(odom),nczodom_avail(odom)); + fprintf(stderr," start=("); + for(i=0;irank;i++) {fprintf(stderr,"%s%llu",(i==0?"":" "),(unsigned long long)odom->start[i]);} + fprintf(stderr,")"); + fprintf(stderr," stride=("); + for(i=0;irank;i++) {fprintf(stderr,"%s%llu",(i==0?"":" "),(unsigned long long)odom->stride[i]);} + fprintf(stderr,")"); + fprintf(stderr," stop=("); + for(i=0;irank;i++) {fprintf(stderr,"%s%llu",(i==0?"":" "),(unsigned long long)odom->stop[i]);} + fprintf(stderr,")"); + fprintf(stderr," len=("); + for(i=0;irank;i++) {fprintf(stderr,"%s%llu",(i==0?"":" "),(unsigned long long)odom->len[i]);} + fprintf(stderr,")"); + fprintf(stderr," index=("); + for(i=0;irank;i++) {fprintf(stderr,"%s%llu",(i==0?"":" "),(unsigned long long)odom->index[i]);} + fprintf(stderr,")"); + fprintf(stderr,"}\n"); +} diff --git a/libnczarr/zodom.h b/libnczarr/zodom.h index 196d307602..36c52c0158 100644 --- a/libnczarr/zodom.h +++ b/libnczarr/zodom.h @@ -11,8 +11,8 @@ struct NCZSlice; typedef struct NCZOdometer { int rank; /*rank */ size64_t* start; - size64_t* stop; /* start + (count*stride) */ size64_t* stride; + size64_t* stop; /* start + (count*stride) */ size64_t* len; /* for computing offset */ size64_t* index; /* current value of the odometer*/ struct NCZOprop { @@ -34,5 +34,8 @@ extern void nczodom_reset(NCZOdometer* odom); extern void nczodom_free(NCZOdometer*); extern size64_t nczodom_avail(const NCZOdometer*); extern void nczodom_skipavail(NCZOdometer* odom); +extern size64_t nczodom_laststride(const NCZOdometer* odom); +extern size64_t nczodom_lastlen(const NCZOdometer* odom); +extern void nczodom_print(const NCZOdometer* odom); #endif /*ZODOM_H*/ diff --git a/libnczarr/zsync.c b/libnczarr/zsync.c index cf3b61ae60..0d2596890c 100644 --- a/libnczarr/zsync.c +++ b/libnczarr/zsync.c @@ -94,6 +94,8 @@ ncz_collect_dims(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NCjson** jdimsp) { int i, stat=NC_NOERR; NCjson* jdims = NULL; + NCjson* jdimsize = NULL; + NCjson* jdimargs = NULL; LOG((3, "%s: ", __func__)); ZTRACE(3,"file=%s grp=%s",file->controller->path,grp->hdr.name); @@ -102,9 +104,26 @@ ncz_collect_dims(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NCjson** jdimsp) for(i=0; idim); i++) { NC_DIM_INFO_T* dim = (NC_DIM_INFO_T*)ncindexith(grp->dim,i); char slen[128]; - snprintf(slen,sizeof(slen),"%llu",(unsigned long long)dim->len); + + snprintf(slen,sizeof(slen),"%llu",(unsigned long long)dim->len); + if((stat = NCJnewstring(NCJ_INT,slen,&jdimsize))) goto done; + + /* If dim is not unlimited, then write in the old format to provide + maximum back compatibility. + */ + if(dim->unlimited) { + NCJnew(NCJ_DICT,&jdimargs); + if((stat = NCJaddstring(jdimargs,NCJ_STRING,"size"))) goto done; + if((stat = NCJappend(jdimargs,jdimsize))) goto done; + jdimsize = NULL; + if((stat = NCJaddstring(jdimargs,NCJ_STRING,"unlimited"))) goto done; + if((stat = NCJaddstring(jdimargs,NCJ_INT,"1"))) goto done; + } else { /* !dim->unlimited */ + jdimargs = jdimsize; + jdimsize = NULL; + } if((stat = NCJaddstring(jdims,NCJ_STRING,dim->hdr.name))) goto done; - if((stat = NCJaddstring(jdims,NCJ_INT,slen))) goto done; + if((stat = NCJappend(jdims,jdimargs))) goto done; } if(jdimsp) {*jdimsp = jdims; jdims = NULL;} done: @@ -348,10 +367,8 @@ ncz_sync_var_meta(NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, int isclose) } /* chunks key */ - /* It is not clear if the zarr format supports the concept - of contiguous, so we will simulate by: - 1. setting a flag in _nczvar (below) - 2. making the chunk sizes all be same as the max dim size (here) + /* The zarr format does not support the concept + of contiguous (or compact), so it will never appear in the read case. */ /* list of chunk sizes */ if((stat = NCJaddstring(jvar,NCJ_STRING,"chunks"))) goto done; @@ -360,7 +377,7 @@ ncz_sync_var_meta(NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, int isclose) if(zvar->scalar) { NCJaddstring(jtmp,NCJ_INT,"1"); /* one chunk of size 1 */ } else for(i=0;indims;i++) { - size64_t len = (var->storage == NC_CONTIGUOUS ? shape[i] : var->chunksizes[i]); + size64_t len = var->chunksizes[i]; snprintf(number,sizeof(number),"%lld",len); NCJaddstring(jtmp,NCJ_INT,number); } @@ -475,16 +492,14 @@ ncz_sync_var_meta(NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, int isclose) jdimrefs = NULL; /* Avoid memory problems */ /* Add the _Storage flag */ - /* Record if this is a scalar; use the storage field */ + /* Record if this is a scalar */ if(var->ndims == 0) { - if((stat = NCJnewstring(NCJ_STRING,"scalar",&jtmp)))goto done; - } else if(var->storage == NC_CONTIGUOUS) { - if((stat = NCJnewstring(NCJ_STRING,"contiguous",&jtmp)))goto done; - } else if(var->storage == NC_COMPACT) { - if((stat = NCJnewstring(NCJ_STRING,"compact",&jtmp)))goto done; - } else {/* chunked */ - if((stat = NCJnewstring(NCJ_STRING,"chunked",&jtmp)))goto done; + if((stat = NCJnewstring(NCJ_INT,"1",&jtmp)))goto done; + if((stat = NCJinsert(jncvar,"scalar",jtmp))) goto done; + jtmp = NULL; } + /* everything looks like it is chunked */ + if((stat = NCJnewstring(NCJ_STRING,"chunked",&jtmp)))goto done; if((stat = NCJinsert(jncvar,"storage",jtmp))) goto done; jtmp = NULL; @@ -1378,7 +1393,7 @@ ncz_read_atts(NC_FILE_INFO_T* file, NC_OBJ* container) * * @param file Pointer to file info struct. * @param grp Pointer to grp info struct. - * @param diminfo List of (name,length) pairs + * @param diminfo List of (name,length,isunlimited) triples * * @return ::NC_NOERR No error. * @author Dennis Heimbigner @@ -1391,18 +1406,23 @@ define_dims(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* diminfo) ZTRACE(3,"file=%s grp=%s |diminfo|=%u",file->controller->path,grp->hdr.name,nclistlength(diminfo)); /* Reify each dim in turn */ - for(i = 0; i < nclistlength(diminfo); i+=2) { + for(i = 0; i < nclistlength(diminfo); i+=3) { NC_DIM_INFO_T* dim = NULL; size64_t len = 0; + long long isunlim = 0; const char* name = nclistget(diminfo,i); - const char* value = nclistget(diminfo,i+1); + const char* slen = nclistget(diminfo,i+1); + const char* sisunlimited = nclistget(diminfo,i+2); /* Create the NC_DIM_INFO_T object */ - sscanf(value,"%lld",&len); /* Get length */ - if(len <= 0) - {stat = NC_EDIMSIZE; goto done;} + sscanf(slen,"%lld",&len); /* Get length */ + if(sisunlimited != NULL) + sscanf(sisunlimited,"%lld",&isunlim); /* Get unlimited flag */ + else + isunlim = 0; if((stat = nc4_dim_list_add(grp, name, (size_t)len, -1, &dim))) goto done; + dim->unlimited = (isunlim ? 1 : 0); if((dim->format_dim_info = calloc(1,sizeof(NCZ_DIM_INFO_T))) == NULL) {stat = NC_ENOMEM; goto done;} ((NCZ_DIM_INFO_T*)dim->format_dim_info)->common.file = file; @@ -1451,8 +1471,6 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) NCjson* jfilter = NULL; int chainindex; #endif - int varsized; - int suppressvar = 0; /* 1 => make this variable invisible */ ZTRACE(3,"file=%s grp=%s |varnames|=%u",file->controller->path,grp->hdr.name,nclistlength(varnames)); @@ -1535,9 +1553,6 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) } } - /* See if this variable is variable sized */ - varsized = NC4_var_varsized(var); - if(!purezarr) { /* Extract the _NCZARR_ARRAY values */ /* Do this first so we know about storage esp. scalar */ @@ -1557,20 +1572,18 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) } if(jncvar == NULL) {stat = NC_ENCZARR; goto done;} assert((NCJsort(jncvar) == NCJ_DICT)); + /* Extract scalar flag */ + if((stat = NCJdictget(jncvar,"scalar",&jvalue))) + goto done; + if(jvalue != NULL) { + var->storage = NC_CHUNKED; + zvar->scalar = 1; + } /* Extract storage flag */ if((stat = NCJdictget(jncvar,"storage",&jvalue))) goto done; if(jvalue != NULL) { - if(strcmp(NCJstring(jvalue),"chunked") == 0) { - var->storage = NC_CHUNKED; - } else if(strcmp(NCJstring(jvalue),"compact") == 0) { - var->storage = NC_COMPACT; - } else if(strcmp(NCJstring(jvalue),"scalar") == 0) { - var->storage = NC_CONTIGUOUS; - zvar->scalar = 1; - } else { /*storage = NC_CONTIGUOUS;*/ - var->storage = NC_CONTIGUOUS; - } + var->storage = NC_CHUNKED; } /* Extract dimrefs list */ switch ((stat = NCJdictget(jncvar,"dimrefs",&jdimrefs))) { @@ -1675,7 +1688,7 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) /* validate the chunk sizes */ zvar->chunkproduct = 1; for(j=0;j shapes[j]) + if(chunks[j] == 0) {stat = (THROW(NC_ENCZARR)); goto done;} var->chunksizes[j] = (size_t)chunks[j]; zvar->chunkproduct *= chunks[j]; @@ -1703,11 +1716,11 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) #ifdef ENABLE_NCZARR_FILTERS if(var->filters == NULL) var->filters = (void*)nclistnew(); if(zvar->incompletefilters == NULL) zvar->incompletefilters = (void*)nclistnew(); - { int k; chainindex = 0; /* track location of filter in the chain */ if((stat = NCZ_filter_initialize())) goto done; if((stat = NCJdictget(jvar,"filters",&jvalue))) goto done; if(jvalue != NULL && NCJsort(jvalue) != NCJ_NULL) { + int k; if(NCJsort(jvalue) != NCJ_ARRAY) {stat = NC_EFILTER; goto done;} for(k=0;;k++) { jfilter = NULL; @@ -1717,15 +1730,14 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) if((stat = NCZ_filter_build(file,var,jfilter,chainindex++))) goto done; } } - } #endif } /* compressor key */ /* From V2 Spec: A JSON object identifying the primary compression codec and providing configuration parameters, or ``null`` if no compressor is to be used. */ - if(!varsized) { /* Only process if variable is fixed-size */ #ifdef ENABLE_NCZARR_FILTERS + { if(var->filters == NULL) var->filters = (void*)nclistnew(); if((stat = NCZ_filter_initialize())) goto done; if((stat = NCJdictget(jvar,"compressor",&jfilter))) goto done; @@ -1733,12 +1745,8 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) if(NCJsort(jfilter) != NCJ_DICT) {stat = NC_EFILTER; goto done;} if((stat = NCZ_filter_build(file,var,jfilter,chainindex++))) goto done; } -#endif } - /* Suppress variable if there are filters and var is not fixed-size */ - if(varsized && nclistlength((NClist*)var->filters) > 0) - suppressvar = 1; - +#endif if((stat = computedimrefs(file, var, purezarr, xarray, rank, dimnames, shapes, var->dim))) goto done; @@ -1749,16 +1757,10 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames) } #ifdef ENABLE_NCZARR_FILTERS - if(!suppressvar) { - /* At this point, we can finalize the filters */ - if((stat = NCZ_filter_setup(var))) goto done; - } + /* At this point, we can finalize the filters */ + if((stat = NCZ_filter_setup(var))) goto done; #endif - if(suppressvar) { - if((stat = NCZ_zclose_var1(var))) goto done; - } - /* Clean up from last cycle */ nclistfreeall(dimnames); dimnames = nclistnew(); nullfree(varpath); varpath = NULL; @@ -1917,18 +1919,30 @@ parse_group_content(NCjson* jcontent, NClist* dimdefs, NClist* varnames, NClist* /* Extract the dimensions defined in this group */ for(i=0;i>>> uploadjson: %s: %s\n",key,content); + /* Write the metadata */ - if((stat = nczmap_write(zmap, key, 0, strlen(content), content))) + if((stat = nczmap_write(zmap, key, strlen(content), content))) goto done; done: @@ -1044,9 +1047,11 @@ NCZ_char2fixed(const char** charp, void* fixed, size_t count, int maxstrlen) Wrap NC_copy_data, but take string value into account when overwriting */ int -NCZ_copy_data(NC_FILE_INFO_T* file, NC_TYPE_INFO_T* xtype, const void* memory, size_t count, int noclear, void* copy) +NCZ_copy_data(NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, const void* memory, size_t count, int reading, void* copy) { - if(xtype->hdr.id == NC_STRING && !noclear) { + int stat = NC_NOERR; + NC_TYPE_INFO_T* xtype = var->type_info; + if(xtype->hdr.id == NC_STRING && !reading) { size_t i; char** scopy = (char**)copy; /* Reclaim any string fill values in copy */ @@ -1055,7 +1060,8 @@ NCZ_copy_data(NC_FILE_INFO_T* file, NC_TYPE_INFO_T* xtype, const void* memory, s scopy[i] = NULL; } } - return NC_copy_data(file->controller,xtype->hdr.id,memory,count,copy); + stat = NC_copy_data(file->controller,xtype->hdr.id,memory,count,copy); + return stat; } #if 0 diff --git a/libnczarr/zvar.c b/libnczarr/zvar.c index aad9aba97b..27e30a9f19 100644 --- a/libnczarr/zvar.c +++ b/libnczarr/zvar.c @@ -132,14 +132,11 @@ ncz_find_default_chunksizes2(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var) assert(var->dim[d]); if (! var->dim[d]->unlimited) num_values *= (float)var->dim[d]->len; -#ifdef UNLIMITED else { num_unlim++; var->chunksizes[d] = 1; /* overwritten below, if all dims are unlimited */ } -#endif } -#ifdef UNLIMITED /* Special case to avoid 1D vars with unlim dim taking huge amount of space (DEFAULT_CHUNK_SIZE bytes). Instead we limit to about 4KB */ @@ -163,7 +160,6 @@ ncz_find_default_chunksizes2(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var) "chunksize %ld", __func__, var->hdr.name, d, DEFAULT_CHUNK_SIZE, num_values, type_size, var->chunksizes[d])); } } -#endif /* Pick a chunk length for each dimension, if one has not already * been picked above. */ @@ -427,11 +423,9 @@ var->type_info->rc++; if ((retval = nc4_find_dim(grp, dimidsp[d], &dim, &dim_grp))) BAIL(retval); assert(dim && dim->format_dim_info); -#ifdef UNLIMITED /* Check for unlimited dimension and turn off contiguous storage. */ if (dim->unlimited) - var->contiguous = NC_FALSE; -#endif + var->storage = NC_CHUNKED; /* Track dimensions for variable */ var->dimids[d] = dimidsp[d]; var->dim[d] = dim; @@ -528,6 +522,7 @@ ncz_def_var_extra(int ncid, int varid, int *shuffle, int *unused1, int d; int retval = NC_NOERR; int storage = NC_CHUNKED; + size_t contigchunksizes[NC_MAX_VAR_DIMS]; /* Fake chunksizes if storage is contiguous or compact */ LOG((2, "%s: ncid 0x%x varid %d", __func__, ncid, varid)); @@ -599,7 +594,7 @@ ncz_def_var_extra(int ncid, int varid, int *shuffle, int *unused1, {retval = NC_EINVAL; goto done;} /* Set the deflate settings. */ - var->contiguous = NC_FALSE; + var->storage = NC_CONTIGUOUS; var->deflate = *deflate; if (*deflate) var->deflate_level = *deflate_level; @@ -638,13 +633,15 @@ ncz_def_var_extra(int ncid, int varid, int *shuffle, int *unused1, if (nclistlength(((NClist*)var->filters)) > 0) {retval = NC_EINVAL; goto done;} #endif - for (d = 0; d < var->ndims; d++) + for (d = 0; d < var->ndims; d++) { if (var->dim[d]->unlimited) {retval = NC_EINVAL; goto done;} + contigchunksizes[d] = var->dim[d]->len; /* Fake a single big chunk */ + } + chunksizes = (const size_t*)contigchunksizes; storage = NC_CHUNKED; /*only chunked supported */ } - /* Handle chunked storage settings. */ if (storage == NC_CHUNKED && var->ndims == 0) { {retval = NC_EINVAL; goto done;} } else if (storage == NC_CHUNKED && var->ndims > 0) { @@ -665,10 +662,6 @@ ncz_def_var_extra(int ncid, int varid, int *shuffle, int *unused1, {retval = NC_EBADCHUNK; goto done;} } } - else if (storage == NC_CONTIGUOUS || storage == NC_COMPACT) - { - var->storage = NC_CHUNKED; - } /* Is this a variable with a chunksize greater than the current * cache size? */ @@ -1576,6 +1569,9 @@ NCZ_put_vars(int ncid, int varid, const size_t *startp, const size_t *countp, LOG((3, "%s: var->hdr.name %s mem_nc_type %d", __func__, var->hdr.name, mem_nc_type)); + if(h5->no_write) + return NC_EPERM; + zvar = (NCZ_VAR_INFO_T*)var->format_var_info; /* Cannot convert to user-defined types. */ @@ -1601,29 +1597,20 @@ NCZ_put_vars(int ncid, int varid, const size_t *startp, const size_t *countp, if (stridep && stridep[i] <= 0) return NC_ESTRIDE; + fdims[i] = var->dim[i]->len; start[i] = startp[i]; - count[i] = countp ? countp[i] : var->dim[i]->len; + count[i] = countp ? countp[i] : fdims[i]; stride[i] = stridep ? stridep[i] : 1; /* Check to see if any counts are zero. */ if (!count[i]) zero_count++; - fdims[i] = var->dim[i]->len; } } -#ifdef LOOK - /* Get file space of data. */ - if ((file_spaceid = H5Dget_space(ncz_var->hdf_datasetid)) < 0) - BAIL(NC_EHDFERR); - - /* Get the sizes of all the dims and put them in fdims. */ - if (H5Sget_simple_extent_dims(file_spaceid, fdims, fmaxdims) < 0) - BAIL(NC_EHDFERR); -#endif #ifdef LOGGING - log_dim_info(var, fdims, fmaxdims, start, count); + log_dim_info(var, fdims, fdims, start, count); #endif /* Check dimension bounds. Remember that unlimited dimensions can @@ -1638,37 +1625,13 @@ NCZ_put_vars(int ncid, int varid, const size_t *startp, const size_t *countp, if (!dim->unlimited) { /* Allow start to equal dim size if count is zero. */ - if (start[d2] > fdims[d2] || - (start[d2] == fdims[d2] && count[d2] > 0)) + if (start[d2] > fdims[d2] || (start[d2] == fdims[d2] && count[d2] > 0)) BAIL_QUIET(NC_EINVALCOORDS); if (!zero_count && endindex >= fdims[d2]) BAIL_QUIET(NC_EEDGE); } } -#ifdef LOOK - /* Now you would think that no one would be crazy enough to write - a scalar dataspace with one of the array function calls, but you - would be wrong. So let's check to see if the dataset is - scalar. If it is, we won't try to set up a hyperslab. */ - if (H5Sget_simple_extent_type(file_spaceid) == H5S_SCALAR) - { - if ((mem_spaceid = H5Screate(H5S_SCALAR)) < 0) - BAIL(NC_EHDFERR); - } - else - { - if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET, start, stride, - count, NULL) < 0) - BAIL(NC_EHDFERR); - - /* Create a space for the memory, just big enough to hold the slab - we want. */ - if ((mem_spaceid = H5Screate_simple(var->ndims, count, NULL)) < 0) - BAIL(NC_EHDFERR); - } -#endif - /* Are we going to convert any data? (No converting of compound or * opaque or vlen types.) We also need to call this code if we are doing * quantization. */ @@ -1707,19 +1670,11 @@ NCZ_put_vars(int ncid, int varid, const size_t *startp, const size_t *countp, else bufr = (void *)data; -#ifdef LOOK - /* Create the data transfer property list. */ - if ((xfer_plistid = H5Pcreate(H5P_DATASET_XFER)) < 0) - BAIL(NC_EHDFERR); - -#endif /*LOOK*/ - - /* Read this hyperslab from memory. Does the dataset have to be + /* Write this hyperslab from memory to file. Does the dataset have to be extended? If it's already extended to the required size, it will do no harm to reextend it to that size. */ if (var->ndims) { -#ifdef UNLIMITED for (d2 = 0; d2 < var->ndims; d2++) { size64_t endindex = start[d2] + stride[d2] * (count[d2] - 1); /* last index written */ @@ -1731,11 +1686,10 @@ NCZ_put_vars(int ncid, int varid, const size_t *startp, const size_t *countp, { if (!zero_count && endindex >= fdims[d2]) { - xtend_size[d2] = (long long unsigned)(endindex+1); - need_to_extend++; + dim->len = (endindex+1); } else - xtend_size[d2] = (long long unsigned)fdims[d2]; + dim->len = fdims[d2]; if (!zero_count && endindex >= dim->len) { @@ -1743,34 +1697,7 @@ NCZ_put_vars(int ncid, int varid, const size_t *startp, const size_t *countp, dim->extended = NC_TRUE; } } - else - { - xtend_size[d2] = (size64_t)dim->len; - } } -#endif - -#ifdef LOOK - /* If we need to extend it, we also need a new file_spaceid - to reflect the new size of the space. */ - if (need_to_extend) - { - LOG((4, "extending dataset")); - /* Convert xtend_size back to hsize_t for use with - * H5Dset_extent. */ - for (d2 = 0; d2 < var->ndims; d2++) - fdims[d2] = (size64_t)xtend_size[d2]; - if (H5Dset_extent(ncz_var->hdf_datasetid, fdims) < 0) - BAIL(NC_EHDFERR); - if (file_spaceid > 0 && H5Sclose(file_spaceid) < 0) - BAIL2(NC_EHDFERR); - if ((file_spaceid = H5Dget_space(ncz_var->hdf_datasetid)) < 0) - BAIL(NC_EHDFERR); - if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET, - start, stride, count, NULL) < 0) - BAIL(NC_EHDFERR); - } -#endif } /* Do we need to convert the data? */ @@ -1870,11 +1797,11 @@ NCZ_get_vars(int ncid, int varid, const size_t *startp, const size_t *countp, NC_DIM_INFO_T *dim; size_t file_type_size; size64_t count[NC_MAX_VAR_DIMS]; - size64_t fdims[NC_MAX_VAR_DIMS]; - size64_t fmaxdims[NC_MAX_VAR_DIMS]; + size64_t fdims[NC_MAX_VAR_DIMS]; /* size of the dimensions */ size64_t start[NC_MAX_VAR_DIMS]; size64_t stride[NC_MAX_VAR_DIMS]; - int no_read = 0, provide_fill = 0; + int no_read = 0; + int provide_fill = 0; int fill_value_size[NC_MAX_VAR_DIMS]; int retval, range_error = 0, i, d2; void *bufr = NULL; @@ -1882,8 +1809,6 @@ NCZ_get_vars(int ncid, int varid, const size_t *startp, const size_t *countp, size_t len = 1; NCZ_VAR_INFO_T* zvar = NULL; - NC_UNUSED(fmaxdims); - /* Find info for this file, group, and var. */ if ((retval = nc4_find_grp_h5_var(ncid, varid, &h5, &grp, &var))) return THROW(retval); @@ -1917,29 +1842,16 @@ NCZ_get_vars(int ncid, int varid, const size_t *startp, const size_t *countp, start[i] = startp[i]; count[i] = countp[i]; stride[i] = stridep ? stridep[i] : 1; - /* if any of the count values are zero don't actually read. */ - if (count[i] == 0) - no_read++; /* Get dimension sizes also */ fdims[i] = var->dim[i]->len; - fmaxdims[i] = fdims[i]; + /* if any of the counts are zero don't actually read. */ + if (count[i] == 0) + no_read++; } } -#ifdef LOOK - /* Get file space of data. */ - if ((file_spaceid = H5Dget_space(ncz_var->hdf_datasetid)) < 0) - BAIL(NC_EHDFERR); - - /* Check to ensure the user selection is - * valid. H5Sget_simple_extent_dims gets the sizes of all the dims - * and put them in fdims. */ - if (H5Sget_simple_extent_dims(file_spaceid, fdims, fmaxdims) < 0) - BAIL(NC_EHDFERR); -#endif - #ifdef LOGGING - log_dim_info(var, fdims, fmaxdims, start, count); + log_dim_info(var, fdims, fdims, start, count); #endif /* Check the type_info fields. */ @@ -1978,7 +1890,7 @@ NCZ_get_vars(int ncid, int varid, const size_t *startp, const size_t *countp, bufr = data; /* Check dimension bounds. Remember that unlimited dimensions can - * put data beyond their current length. */ + * read/write data beyond their largest current length. */ for (d2 = 0; d2 < var->ndims; d2++) { size64_t endindex = start[d2] + stride[d2] * (count[d2] - 1); /* last index read */ @@ -1988,17 +1900,10 @@ NCZ_get_vars(int ncid, int varid, const size_t *startp, const size_t *countp, endindex = start[d2]; /* fixup for zero read count */ if (dim->unlimited) { - size_t ulen; - - /* We can't go beyond the largest current extent of - the unlimited dim. */ - if ((retval = NCZ_inq_dim(ncid, dim->hdr.id, NULL, &ulen))) - BAIL(retval); - + size64_t ulen = (size64_t)dim->len; /* Check for out of bound requests. */ /* Allow start to equal dim size if count is zero. */ - if (start[d2] > (size64_t)ulen || - (start[d2] == (size64_t)ulen && count[d2] > 0)) + if (start[d2] > ulen || (start[d2] == ulen && count[d2] > 0)) BAIL_QUIET(NC_EINVALCOORDS); if (count[d2] && endindex >= ulen) BAIL_QUIET(NC_EEDGE); @@ -2236,14 +2141,17 @@ NCZ_inq_var_all(int ncid, int varid, char *name, nc_type *xtypep, /* Short-circuit the filter-related inquiries */ if(shufflep) { *shufflep = 0; - if((retval = NCZ_inq_var_filter_info(ncid,varid,2,NULL,NULL))==NC_NOERR) + if((retval = NCZ_inq_var_filter_info(ncid,varid,H5Z_FILTER_SHUFFLE,NULL,NULL))==NC_NOERR) *shufflep = 1; } + retval = NC_NOERR; /* reset */ + if(fletcher32p) { *fletcher32p = 0; - if((retval = NCZ_inq_var_filter_info(ncid,varid,3,NULL,NULL))==NC_NOERR) + if((retval = NCZ_inq_var_filter_info(ncid,varid,H5Z_FILTER_FLETCHER32,NULL,NULL))==NC_NOERR) *fletcher32p = 1; } + retval = NC_NOERR; /* reset */ /* Now that lazy atts have been read, use the libsrc4 function to * get the answers. */ @@ -2369,3 +2277,40 @@ ncz_gettype(NC_FILE_INFO_T* h5, NC_GRP_INFO_T* container, int xtype, NC_TYPE_INF nullfree(ztype); return THROW(retval); } + +#if 0 +/** +Given start+count+stride+dim vectors, determine the largest +index touched per dimension. If that index is greater-than +the dimension size, then do one of two things: +1. If the dimension is fixed size, then return NC_EDIMSIZE. +2. If the dimension is unlimited, then extend the size of that + dimension to cover that maximum point. + +@param var +@param start vector +@param count vector +@param stride vector +@param reading vs writing +@return NC_EXXX error code +*/ +int +NCZ_update_dim_extents(NC_VAR_INFO_T* var, size64_t* start, size64_t* count, size64_t* stride, int reading) +{ + int r; + int rank = var->ndims; + + NC_UNUSED(reading); + + for(r=0;rdim[r]; + size64_t endpoint; /* compute last point touched */ + endpoint = start[r] + stride[r]*count[r] - stride[r]; + if(dim->len < endpoint) { + if(!dim->unlimited) return NC_EDIMSIZE; + /*else*/ dim->len = endpoint+1; + } + } + return NC_NOERR; +} +#endif diff --git a/libnczarr/zwalk.c b/libnczarr/zwalk.c index bc423706d0..1a7faba2dd 100644 --- a/libnczarr/zwalk.c +++ b/libnczarr/zwalk.c @@ -1,8 +1,3 @@ -/* -Additonal optimizations: -1. slice covers all of exactly one chunk: we can just tranfer whole chunk to/from memory - -*/ /********************************************************************* * Copyright 2018, UCAR/Unidata * See netcdf/COPYRIGHT file for copying and redistribution conditions. @@ -12,6 +7,8 @@ Additonal optimizations: #define WDEBUG #undef DFALTOPTIMIZE +#define TRANSFERN + static int initialized = 0; static unsigned int optimize = 0; @@ -27,13 +24,17 @@ static int rangecount(NCZChunkRange range); static int readfromcache(void* source, size64_t* chunkindices, void** chunkdata); static int iswholechunk(struct Common* common,NCZSlice*); static int wholechunk_indices(struct Common* common, NCZSlice* slices, size64_t* chunkindices); +#ifdef TRANSFERN +static int transfern(const struct Common* common, unsigned char* slpptr, unsigned char* memptr, size_t avail, size_t slpstride, void* chunkdata); +#endif -const char* +#if 0 +static const char* astype(int typesize, void* ptr) { switch(typesize) { case 4: { - static char is[8]; + static char is[8]; snprintf(is,sizeof(is),"%u",*((unsigned int*)ptr)); return is; } break; @@ -41,6 +42,7 @@ astype(int typesize, void* ptr) } return "?"; } +#endif /**************************************************/ int @@ -88,6 +90,7 @@ NCZ_transferslice(NC_VAR_INFO_T* var, int reading, { int r,stat = NC_NOERR; size64_t dimlens[NC_MAX_VAR_DIMS]; + unsigned char isunlimited[NC_MAX_VAR_DIMS]; size64_t chunklens[NC_MAX_VAR_DIMS]; size64_t memshape[NC_MAX_VAR_DIMS]; NCZSlice slices[NC_MAX_VAR_DIMS]; @@ -122,7 +125,7 @@ NCZ_transferslice(NC_VAR_INFO_T* var, int reading, common.typesize = typesize; common.cache = zvar->cache; - /* We need to talk scalar into account */ + /* We need to take scalar into account */ common.rank = var->ndims; common.scalar = zvar->scalar; common.swap = (zfile->native_endianness == var->endianness ? 0 : 1); @@ -130,6 +133,7 @@ NCZ_transferslice(NC_VAR_INFO_T* var, int reading, common.chunkcount = 1; if(common.scalar) { dimlens[0] = 1; + isunlimited[0] = 0; chunklens[0] = 1; slices[0].start = 0; slices[0].stride = 1; @@ -138,12 +142,15 @@ NCZ_transferslice(NC_VAR_INFO_T* var, int reading, common.chunkcount = 1; memshape[0] = 1; } else for(r=0;rdim[r]->len; + dimlens[r] = var->dim[r]->len; + isunlimited[r] = var->dim[r]->unlimited; chunklens[r] = var->chunksizes[r]; slices[r].start = start[r]; slices[r].stride = stride[r]; - slices[r].stop = minimum(start[r]+(count[r]*stride[r]),dimlens[r]); - slices[r].len = dimlens[r]; + slices[r].stop = start[r]+(count[r]*stride[r]); + if(!isunlimited[r]) + slices[r].stop = minimum(slices[r].stop,dimlens[r]); + slices[r].len = var->dim[r]->len; common.chunkcount *= chunklens[r]; memshape[r] = count[r]; } @@ -151,15 +158,19 @@ NCZ_transferslice(NC_VAR_INFO_T* var, int reading, if(wdebug >= 1) { fprintf(stderr,"\trank=%d",common.rank); if(!common.scalar) { - fprintf(stderr," dimlens=%s",nczprint_vector(common.rank,dimlens)); + fprintf(stderr," dimlens=%s",nczprint_vector(common.rank,dimlens)); fprintf(stderr," chunklens=%s",nczprint_vector(common.rank,chunklens)); fprintf(stderr," memshape=%s",nczprint_vector(common.rank,memshape)); } fprintf(stderr,"\n"); } - common.dimlens = dimlens; /* BAD: storing stack vector in a pointer; do not free */ - common.chunklens = chunklens; /* ditto */ - common.memshape = memshape; /* ditto */ + + /* Transfer data */ + memcpy(common.dimlens,dimlens,sizeof(size64_t)*common.rank); + memcpy(common.isunlimited,isunlimited,sizeof(unsigned char)*common.rank); + memcpy(common.chunklens,chunklens,sizeof(size64_t)*common.rank); + memcpy(common.memshape,memshape,sizeof(size64_t)*common.rank); + common.reader.source = ((NCZ_VAR_INFO_T*)(var->format_var_info))->cache; common.reader.read = readfromcache; @@ -196,17 +207,16 @@ NCZ_transfer(struct Common* common, NCZSlice* slices) combinations of chunkranges over all dimensions. 2. For each chunk odometer set of indices, we need a projection odometer that walks the set of projection slices for a given - set of chunk ranges over all dimensions. + set of chunk ranges over all dimensions. Note that this is where + we detect unlimited extensions. 3. A memory odometer that walks the memory data to specify the locations in memory for read/write */ - if(wdebug >= 2) { + if(wdebug >= 2) fprintf(stderr,"slices=%s\n",nczprint_slices(common->rank,slices)); - } - if((stat = NCZ_projectslices(common->dimlens, common->chunklens, slices, - common, &chunkodom))) + if((stat = NCZ_projectslices(common, slices, &chunkodom))) goto done; if(wdebug >= 4) { @@ -237,14 +247,24 @@ NCZ_transfer(struct Common* common, NCZSlice* slices) /* Figure out memory address */ memptr = ((unsigned char*)common->memory); slpptr = ((unsigned char*)chunkdata); +#ifdef TRANSFERN + transfern(common,slpptr,memptr,common->chunkcount,1,chunkdata); + if(!common->reading) { + if((stat=NCZ_chunk_cache_modify(common->cache, chunkindices))) goto done; + } +#else if(common->reading) { - if((stat=NCZ_copy_data(common->file,common->var->type_info,slpptr,common->chunkcount,!ZCLEAR,memptr))) goto done; + if((stat=NCZ_copy_data(common->file,common->var,slpptr,common->chunkcount,!ZCLEAR,memptr))) goto done; } else { - if((stat=NCZ_copy_data(common->file,common->var->type_info,memptr,common->chunkcount,ZCLEAR,slpptr))) goto done; + if((stat=NCZ_copy_data(common->file,common->var,memptr,common->chunkcount,ZCLEAR,slpptr))) goto done; + } -// transfern(common,slpptr,memptr,common->chunkcount,1,chunkdata); +#endif + +#ifdef UTTEST if(zutest && zutest->tests & UTEST_WHOLECHUNK) zutest->print(UTEST_WHOLECHUNK, common, chunkindices); +#endif goto done; } @@ -259,9 +279,8 @@ NCZ_transfer(struct Common* common, NCZSlice* slices) size64_t shape[NC_MAX_VAR_DIMS]; chunkindices = nczodom_indices(chunkodom); - if(wdebug >= 1) { + if(wdebug >= 1) fprintf(stderr,"chunkindices: %s\n",nczprint_vector(common->rank,chunkindices)); - } for(r=0;rrank;r++) { NCZSliceProjections* slp = &common->allprojections[r]; @@ -294,8 +313,10 @@ NCZ_transfer(struct Common* common, NCZSlice* slices) slpslices[r] = proj[r]->chunkslice; memslices[r] = proj[r]->memslice; } +#ifdef UTTEST if(zutest && zutest->tests & UTEST_TRANSFER) zutest->print(UTEST_TRANSFER, common, chunkodom, slpslices, memslices); +#endif /* Read from cache */ stat = common->reader.read(common->reader.source, chunkindices, &chunkdata); @@ -311,9 +332,12 @@ NCZ_transfer(struct Common* common, NCZSlice* slices) { /* walk with odometer */ if(wdebug >= 1) - fprintf(stderr,"case: odometer:\n"); + fprintf(stderr,"case: odometer:\n"); /* This is the key action: walk this set of slices and transfer data */ if((stat = NCZ_walk(proj,chunkodom,slpodom,memodom,common,chunkdata))) goto done; + if(!common->reading) { + if((stat=NCZ_chunk_cache_modify(common->cache, chunkindices))) goto done; + } } next: nczodom_free(slpodom); slpodom = NULL; @@ -327,7 +351,6 @@ NCZ_transfer(struct Common* common, NCZSlice* slices) return stat; } - #ifdef WDEBUG static void wdebug2(const struct Common* common, unsigned char* slpptr, unsigned char* memptr, size_t avail, size_t stride, void* chunkdata) @@ -360,6 +383,8 @@ wdebug2(const struct Common* common, unsigned char* slpptr, unsigned char* mempt #endif /* +Walk a set of slices and transfer data. + @param projv @param chunkodom @param slpodom @@ -382,53 +407,58 @@ NCZ_walk(NCZProjection** projv, NCZOdometer* chunkodom, NCZOdometer* slpodom, NC unsigned char* memptr0 = NULL; unsigned char* slpptr0 = NULL; - if(!nczodom_more(slpodom)) break; - if(wdebug >= 3) { - fprintf(stderr,"xx.slp: odom: %s\n",nczprint_odom(slpodom)); - fprintf(stderr,"xx.mem: odom: %s\n",nczprint_odom(memodom)); - } + if(wdebug >= 3) { + fprintf(stderr,"xx.slp: odom: %s\n",nczprint_odom(slpodom)); + fprintf(stderr,"xx.mem: odom: %s\n",nczprint_odom(memodom)); + } + + /* Convert the indices to a linear offset WRT to chunk indices */ + slpoffset = nczodom_offset(slpodom); + memoffset = nczodom_offset(memodom); + + /* transfer data between these addresses */ + memptr0 = ((unsigned char*)common->memory)+(memoffset * common->typesize); + slpptr0 = ((unsigned char*)chunkdata)+(slpoffset * common->typesize); - /* Convert the indices to a linear offset WRT to chunk indices */ - slpoffset = nczodom_offset(slpodom); - memoffset = nczodom_offset(memodom); - - /* transfer data between these addresses */ - memptr0 = ((unsigned char*)common->memory)+(memoffset * common->typesize); - slpptr0 = ((unsigned char*)chunkdata)+(slpoffset * common->typesize); - - LOG((1,"%s: slpptr0=%p memptr0=%p slpoffset=%llu memoffset=%lld",__func__,slpptr0,memptr0,slpoffset,memoffset)); - if(zutest && zutest->tests & UTEST_WALK) - zutest->print(UTEST_WALK, common, chunkodom, slpodom, memodom); - /* See if we can transfer multiple values at one shot */ - laststride = slpodom->stride[common->rank-1]; - if(laststride == 1) { - slpavail = nczodom_avail(slpodom); /* How much can we read? */ - memavail = nczodom_avail(memodom); - assert(memavail == slpavail); - nczodom_skipavail(slpodom); - nczodom_skipavail(memodom); + LOG((1,"%s: slpptr0=%p memptr0=%p slpoffset=%llu memoffset=%lld",__func__,slpptr0,memptr0,slpoffset,memoffset)); +#ifdef UTTEST + if(zutest && zutest->tests & UTEST_WALK) + zutest->print(UTEST_WALK, common, chunkodom, slpodom, memodom); +#endif + /* See if we can transfer multiple values at one shot */ + laststride = slpodom->stride[common->rank-1]; + if(laststride == 1) { + slpavail = nczodom_avail(slpodom); /* How much can we read? */ + memavail = nczodom_avail(memodom); + assert(memavail == slpavail); + nczodom_skipavail(slpodom); + nczodom_skipavail(memodom); + } else { + slpavail = 1; + } + if(slpavail > 0) { +if(wdebug > 0) {wdebug2(common,slpptr0,memptr0,slpavail,laststride,chunkdata);} +#ifdef TRANSFERN + if((stat = transfern(common,slpptr0,memptr0,slpavail,nczodom_laststride(slpodom),chunkdata))) goto done; +#else + if(common->reading) { + if((stat=NCZ_copy_data(common->file,common->var,slpptr0,slpavail,!ZCLEAR,memptr0))) goto done; } else { - slpavail = 1; - } - if(slpavail > 0) { -if(wdebug > 0) wdebug2(common,slpptr0,memptr0,slpavail,laststride,chunkdata); - if(common->reading) { - if((stat=NCZ_copy_data(common->file,common->var->type_info,slpptr0,slpavail,!ZCLEAR,memptr0))) goto done; - } else { - if((stat=NCZ_copy_data(common->file,common->var->type_info,memptr0,slpavail,ZCLEAR,slpptr0))) goto done; - } + if((stat=NCZ_copy_data(common->file,common->var,memptr0,slpavail,ZCLEAR,slpptr0))) goto done; } -// if((stat = transfern(common,slpptr0,memptr0,avail,nczodom_laststride(slpodom),chunkdata)))goto done; - nczodom_next(memodom); - nczodom_next(slpodom); +#endif + } + nczodom_next(memodom); + nczodom_next(slpodom); } done: return stat; } #if 0 +#ifdef WDEBUG static void wdebug1(const struct Common* common, unsigned char* srcptr, unsigned char* dstptr, size_t count, size_t stride, void* chunkdata, const char* tag) { @@ -436,7 +466,7 @@ wdebug1(const struct Common* common, unsigned char* srcptr, unsigned char* dstpt unsigned char* srcbase = (common->reading?chunkdata:common->memory); unsigned dstoff = (unsigned)(dstptr - dstbase); unsigned srcoff = (unsigned)(srcptr - srcbase); - unsigned srcidx = srcoff / sizeof(unsigned); +// unsigned srcidx = srcoff / sizeof(unsigned); fprintf(stderr,"%s: %s: [%u/%d] %u->%u", tag, @@ -462,47 +492,54 @@ wdebug1(const struct Common* common, unsigned char* srcptr, unsigned char* dstpt #else #define wdebug1(common,srcptr,dstptr,count,srcstride,dststride,chunkdata,tag) #endif +#endif /*0*/ -#if 0 +#ifdef TRANSFERN static int transfern(const struct Common* common, unsigned char* slpptr, unsigned char* memptr, size_t avail, size_t slpstride, void* chunkdata) { int stat = NC_NOERR; + nc_type xtype = common->var->type_info->hdr.id; size_t typesize = common->typesize; size_t len = typesize*avail; size_t m,s; if(common->reading) { - if(slpstride == 1) - memcpy(memptr,slpptr,len); /* straight copy */ - else { + if(slpstride == 1) { + if((stat=NCZ_copy_data(common->file,common->var,slpptr,avail,common->reading,memptr))) goto done; +/// memcpy(memptr,slpptr,len); /* straight copy */ + } else { for(m=0,s=0;sfile,common->var,slpptr+soffset,1,common->reading,memptr+moffset))) goto done; +/// memcpy(memptr+moffset,slpptr+soffset,typesize); } } - if(common->swap) + if(common->swap && xtype < NC_STRING) NCZ_swapatomicdata(len,memptr,common->typesize); } else { /*writing*/ unsigned char* srcbase = (common->reading?chunkdata:common->memory); unsigned srcoff = (unsigned)(memptr - srcbase); unsigned srcidx = srcoff / sizeof(unsigned); (void)srcidx; - if(slpstride == 1) - memcpy(slpptr,memptr,len); /* straight copy */ - else { + if(slpstride == 1) { + if((stat=NCZ_copy_data(common->file,common->var,memptr,avail,common->reading,slpptr))) goto done; +/// memcpy(slpptr,memptr,len); /* straight copy */ + } else { for(m=0,s=0;sfile,common->var,memptr+moffset,1,common->reading,slpptr+soffset))) goto done; +/// memcpy(slpptr+soffset,memptr+moffset,typesize); } } - if(common->swap) + if(common->swap && xtype < NC_STRING) NCZ_swapatomicdata(len,slpptr,common->typesize); } +done: return THROW(stat); } -#endif +#endif /*TRANSFERN*/ #if 0 /* This function may not be necessary if code in zvar does it instead */ @@ -528,11 +565,15 @@ NCZ_fillchunk(void* chunkdata, struct Common* common) #endif /* Break out this piece so we can use it for unit testing */ +/** +@param slices +@param common +@param odomp +@return err code +*/ int -NCZ_projectslices(size64_t* dimlens, - size64_t* chunklens, +NCZ_projectslices(struct Common* common, NCZSlice* slices, - struct Common* common, NCZOdometer** odomp) { int stat = NC_NOERR; @@ -549,11 +590,8 @@ NCZ_projectslices(size64_t* dimlens, {stat = NC_ENOMEM; goto done;} memset(ranges,0,sizeof(ranges)); - /* Package common arguments */ - common->dimlens = dimlens; - common->chunklens = chunklens; /* Compute the chunk ranges for each slice in a given dim */ - if((stat = NCZ_compute_chunk_ranges(common->rank,slices,common->chunklens,ranges))) + if((stat = NCZ_compute_chunk_ranges(common,slices,ranges))) goto done; /* Compute the slice index vector */ @@ -740,11 +778,15 @@ NCZ_transferscalar(struct Common* common) /* Figure out memory address */ memptr = ((unsigned char*)common->memory); slpptr = ((unsigned char*)chunkdata); +#ifdef TRANSFERN + if((stat = transfern(common,slpptr,memptr,1,1,chunkdata))) goto done; +#else if(common->reading) { - if((stat=NCZ_copy_data(common->file,common->var->type_info,slpptr,common->chunkcount,!ZCLEAR,memptr))) goto done; + if((stat=NCZ_copy_data(common->file,common->var,slpptr,common->chunkcount,!ZCLEAR,memptr))) goto done; } else { - if((stat=NCZ_copy_data(common->file,common->var->type_info,memptr,common->chunkcount,ZCLEAR,slpptr))) goto done; + if((stat=NCZ_copy_data(common->file,common->var,memptr,common->chunkcount,ZCLEAR,slpptr))) goto done; } +#endif done: return stat; diff --git a/libnczarr/zxcache.c b/libnczarr/zxcache.c index fe8c7a686a..7a469587a3 100644 --- a/libnczarr/zxcache.c +++ b/libnczarr/zxcache.c @@ -30,6 +30,12 @@ static int verifycache(NCZChunkCache* cache); static int flushcache(NCZChunkCache* cache); static int constraincache(NCZChunkCache* cache, size64_t needed); +static void +setmodified(NCZCacheEntry* e, int tf) +{ + e->modified = tf; +} + /**************************************************/ /* Dispatch table per-var cache functions */ @@ -166,7 +172,6 @@ NCZ_create_chunk_cache(NC_VAR_INFO_T* var, size64_t chunksize, char dimsep, NCZC if(chunksize == 0) return NC_EINVAL; zvar = (NCZ_VAR_INFO_T*)var->format_var_info; - if((cache = calloc(1,sizeof(NCZChunkCache))) == NULL) {stat = NC_ENOMEM; goto done;} cache->var = var; @@ -342,7 +347,7 @@ NCZ_write_cache_chunk(NCZChunkCache* cache, const size64_t* indices, void* conte {stat = NC_ENOMEM; goto done;} memcpy(entry->data,content,cache->chunksize); } - entry->modified = 1; + setmodified(entry,1); nclistpush(cache->mru,entry); /* MRU order */ #ifdef DEBUG fprintf(stderr,"|cache.write|=%ld\n",nclistlength(cache->mru)); @@ -470,7 +475,7 @@ NCZ_flush_chunk_cache(NCZChunkCache* cache) if((stat=put_chunk(cache,entry))) goto done; } - entry->modified = 0; + setmodified(entry,0); } /* Re-compute space used */ cache->used = 0; @@ -554,28 +559,23 @@ NCZ_reclaim_fill_chunk(NCZChunkCache* zcache) return stat; } -#if 0 int -NCZ_chunk_cache_modified(NCZChunkCache* cache, const size64_t* indices) +NCZ_chunk_cache_modify(NCZChunkCache* cache, const size64_t* indices) { int stat = NC_NOERR; - char* key = NULL; + ncexhashkey_t hkey = 0; NCZCacheEntry* entry = NULL; - int rank = cache->ndims; - /* Create the key for this cache */ - if((stat=NCZ_buildchunkkey(rank, indices, &key))) goto done; + /* the hash key */ + hkey = ncxcachekey(indices,sizeof(size64_t)*cache->ndims); /* See if already in cache */ - if(NC_hashmapget(cache->mru, key, strlen(key), (uintptr_t*)entry)) { /* found */ - entry->modified = 1; - } + if((stat=ncxcachelookup(cache->xcache, hkey, (void**)&entry))) {stat = NC_EINTERNAL; goto done;} + setmodified(entry,1); done: - nullfree(key); return THROW(stat); } -#endif /**************************************************/ /* @@ -695,7 +695,7 @@ put_chunk(NCZChunkCache* cache, NCZCacheEntry* entry) #endif path = NCZ_chunkpath(entry->key); - stat = nczmap_write(map,path,0,entry->size,entry->data); + stat = nczmap_write(map,path,entry->size,entry->data); nullfree(path); path = NULL; switch(stat) { @@ -779,7 +779,7 @@ get_chunk(NCZChunkCache* cache, NCZCacheEntry* entry) } if(empty) { /* fake the chunk */ - entry->modified = (file->no_write?0:1); + setmodified(entry,(file->no_write?0:1)); entry->size = cache->chunksize; entry->data = NULL; entry->isfixedstring = 0; @@ -788,7 +788,7 @@ get_chunk(NCZChunkCache* cache, NCZCacheEntry* entry) if(cache->fillchunk == NULL) {if((stat = NCZ_ensure_fill_chunk(cache))) goto done;} if((entry->data = calloc(1,entry->size))==NULL) {stat = NC_ENOMEM; goto done;} - if((stat = NCZ_copy_data(file,xtype,cache->fillchunk,cache->chunkcount,!ZCLEAR,entry->data))) goto done; + if((stat = NCZ_copy_data(file,cache->var,cache->fillchunk,cache->chunkcount,ZREADING,entry->data))) goto done; stat = NC_NOERR; } #ifdef ENABLE_NCZARR_FILTERS diff --git a/libsrc/s3io.c b/libsrc/s3io.c index 6c180e821f..f8bd941b42 100644 --- a/libsrc/s3io.c +++ b/libsrc/s3io.c @@ -176,7 +176,7 @@ s3io_open(const char* path, {status = NC_EURL; goto done;} /* Convert to canonical path-style */ - if((status = NC_s3urlprocess(url,&s3io->s3))) goto done; + if((status = NC_s3urlprocess(url,&s3io->s3,NULL))) goto done; /* Verify root path */ if(s3io->s3.rootkey == NULL) {status = NC_EURL; goto done;} diff --git a/nc_test/test_byterange.sh b/nc_test/test_byterange.sh index 59fc07ea68..5fb9f77065 100755 --- a/nc_test/test_byterange.sh +++ b/nc_test/test_byterange.sh @@ -20,9 +20,9 @@ URL4e="http://noaa-goes16.s3.amazonaws.com/ABI-L1b-RadF/2022/001/18/OR_ABI-L1b-R fi if test "x$FEATURE_S3TESTS" = xyes ; then # Requires auth -URL3b="s3://unidata-zarr-test-data/byterangefiles/upload3.nc#bytes" +URL3b="s3://${S3TESTBUCKET}/byterangefiles/upload3.nc#bytes" # Requires auth -URL4d="s3://unidata-zarr-test-data/byterangefiles/upload4.nc#bytes&aws.profile=unidata" +URL4d="s3://${S3TESTBUCKET}/byterangefiles/upload4.nc#bytes&aws.profile=unidata" fi URL4f="https://crudata.uea.ac.uk/cru/data/temperature/HadCRUT.4.6.0.0.median.nc#mode=bytes" @@ -88,7 +88,7 @@ diff -wb tmp_$TAG.cdl ${srcdir}/nc_enddef.cdl } if test "x$FEATURE_S3TESTS" = xyes ; then -testsetup https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data +testsetup https://s3.us-east-1.amazonaws.com/${S3TESTBUCKET} fi echo "*** Testing reading NetCDF-3 file with http" @@ -130,7 +130,7 @@ fi # Cleanup if test "x$FEATURE_S3TESTS" = xyes ; then -testcleanup https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data +testcleanup https://s3.us-east-1.amazonaws.com/${S3TESTBUCKET} fi exit diff --git a/nc_test4/Makefile.am b/nc_test4/Makefile.am index f3633218e0..b02056af35 100644 --- a/nc_test4/Makefile.am +++ b/nc_test4/Makefile.am @@ -86,11 +86,13 @@ check_PROGRAMS += tst_multifilter tst_filter_vlen TESTS += tst_filter.sh TESTS += tst_specific_filters.sh TESTS += tst_bloscfail.sh -TESTS += tst_filter_vlen.sh if ISMINGW XFAIL_TESTS += tst_filter.sh endif # ISMINGW +TESTS += tst_filter_vlen.sh +TESTS += tst_filter_misc.sh + if AX_MANUAL # This test is too dangerous to run in a parallel make environment. # It causes race conditions. So suppress and only test by hand. @@ -134,7 +136,7 @@ ref_any.cdl tst_specific_filters.sh tst_unknown.sh \ tst_virtual_datasets.c noop1.cdl unknown.cdl \ tst_broken_files.c ref_bloscx.cdl tst_bloscfail.sh \ tst_fixedstring.sh ref_fixedstring.h5 ref_fixedstring.cdl \ -tst_filterinstall.sh tst_filter_vlen.sh +tst_filterinstall.sh tst_filter_vlen.sh tst_filter_misc.sh CLEANFILES = tst_mpi_parallel.bin cdm_sea_soundings.nc bm_chunking.nc \ tst_floats_1D.cdl floats_1D_3.nc floats_1D.cdl tst_*.nc tmp_*.txt \ @@ -151,6 +153,9 @@ tmp_bzip2.c bzip2.nc noop.nc tmp_*.dmp tmp_*.cdl DISTCLEANFILES = findplugin.sh run_par_test.sh +clean-local: + rm -fr testdir_* testset_* + # If valgrind is present, add valgrind targets. @VALGRIND_CHECK_RULES@ diff --git a/nc_test4/findplugin.in b/nc_test4/findplugin.in index b9dad13d2a..08026bec08 100755 --- a/nc_test4/findplugin.in +++ b/nc_test4/findplugin.in @@ -54,8 +54,8 @@ findpluginext() { FP_PLUGIN_EXT="dll" FP_PLUGIN_PRE="__nc" else # unknown - unset FP_PLUGIN_EXT - unset FP_PLUGIN_PRE + echo "Cannot find plugin extension: please execute 'cd ../plugins; make check'" + exit 1 fi } @@ -88,13 +88,16 @@ fi # Verify if test "x$FP_PLUGIN_DIR" = x ; then - echo "***Fail: Could not locate a usable HDF5_PLUGIN_DIR" + echo "***Fail: Could not locate a usable $FP_PLUGIN_DIR" return 1 fi # Make local path FP_PLUGIN_DIR=`${NCPATHCVT} -F $FP_PLUGIN_DIR` HDF5_PLUGIN_DIR="$FP_PLUGIN_DIR" +if test "x$HDF5_PLUGIN_PATH" = x ; then + HDF5_PLUGIN_PATH="$HDF5_PLUGIN_DIR" +fi } findplugin() { @@ -108,14 +111,13 @@ FP_PLUGIN_LIB="${FP_PLUGIN_PRE}${FP_NAME}.${FP_PLUGIN_EXT}" # Verify if ! test -f "$FP_PLUGIN_DIR/$FP_PLUGIN_LIB" ; then - echo "***Fail: Could not locate a usable HDF5_PLUGIN_LIB" + echo "***Fail: Could not locate a usable $FP_PLUGIN_LIB" return 1 fi # Set the final output variables HDF5_PLUGIN_LIB="$FP_PLUGIN_LIB" HDF5_PLUGIN_DIR="$FP_PLUGIN_DIR" - return 0 } diff --git a/nc_test4/test_filter_vlen.c b/nc_test4/test_filter_vlen.c index e7775370a2..827087bb51 100644 --- a/nc_test4/test_filter_vlen.c +++ b/nc_test4/test_filter_vlen.c @@ -8,25 +8,16 @@ #include #include +#ifdef HAVE_HDF5_H #include +#endif #include "netcdf.h" #include "netcdf_aux.h" #include "netcdf_filter.h" -#undef TESTODDSIZE - #undef DEBUG -#ifndef H5Z_FILTER_FLETCHER32 -#define H5Z_FILTER_FLETCHER32 3 -#endif - -/* The C standard apparently defines all floating point constants as double; - we rely on that in this code. -*/ -#define DBLVAL 12345678.12345678 - -#define TEST_ID 32768 +#define FILTER_ID 1 /*deflate*/ #define MAXERRS 8 @@ -34,40 +25,29 @@ #define NPARAMS 14 -static unsigned int baseline[NPARAMS]; - static const char* testfile = NULL; #define MAXDIMS 8 -#define DFALT_TESTFILE "tmp_misc.nc" - -#define spec "32768, -17b, 23ub, -25S, 27US, 77, 93U, 789f, 12345678.12345678d, -9223372036854775807L, 18446744073709551615UL" - -#ifdef TESTODDSIZE -#define NDIMS 1 -static size_t dimsize[NDIMS] = {4}; -static size_t chunksize[NDIMS] = {3}; +#ifdef TESTNCZARR +#define DFALT_TESTFILE "file://tmp_filter_vlen.nc#mode=nczarr,file" #else +#define DFALT_TESTFILE "tmp_filter_vlen.nc" +#endif + #define NDIMS 4 static size_t dimsize[NDIMS] = {4,4,4,4}; -static size_t chunksize[NDIMS] = {4,4,4,4}; -#endif static size_t ndims = NDIMS; static size_t totalproduct = 1; /* x-product over max dims */ static size_t actualproduct = 1; /* x-product over actualdims */ -static size_t chunkproduct = 1; /* x-product over actual chunks */ - -static size_t pattern[MAXDIMS]; static int nerrs = 0; static int ncid, varid; static int dimids[MAXDIMS]; -static float* array = NULL; -static float* expected = NULL; +static char** array = NULL; /* Forward */ static int test_test1(void); @@ -91,31 +71,10 @@ check(int err,int line) } #define CHECK(x) check(x,__LINE__) -static int -verifychunks(void) -{ - int i; - int store = -1; - size_t localchunks[MAXDIMS]; - memset(localchunks,0,sizeof(localchunks)); - CHECK(nc_inq_var_chunking(ncid, varid, &store, localchunks)); - if(store != NC_CHUNKED) { - fprintf(stderr,"bad chunk store\n"); - return 0; - } - for(i=0;i 1) @@ -247,19 +261,14 @@ init(int argc, char** argv) /* Setup various variables */ totalproduct = 1; actualproduct = 1; - chunkproduct = 1; for(i=0;i 0 ? "FAILED" : "PASS")); exit(nerrs > 0?1:0); } diff --git a/nc_test4/tst_filter_misc.sh b/nc_test4/tst_filter_misc.sh index 7e8ad8bef8..e24191a71c 100755 --- a/nc_test4/tst_filter_misc.sh +++ b/nc_test4/tst_filter_misc.sh @@ -1,9 +1,5 @@ #!/bin/bash -# Test the filter install -# This cannot be run as a regular test -# because installation will not have occurred - if test "x$srcdir" = x ; then srcdir=`pwd`; fi . ../test_common.sh @@ -21,11 +17,21 @@ if test "x$TESTNCZARR" = x1; then s3isolate fi +if test "x$TESTNCZARR" = x1; then + TFAVAIL=${execdir}/test_filter_avail + TFVLEN=${execdir}/test_filter_vlen +else + TFAVAIL=${execdir}/tst_filter_avail + TFVLEN=${execdir}/tst_filter_vlen +fi + # Load the findplugins function . ${builddir}/findplugin.sh echo "findplugin.sh loaded" -if ! filteravail bzip2 ; then +# test for deflate filter +if avail 1 ; then HAVE_DEFLATE=1; else HAVE_DEFLATE=0; fi +if test "$HAVE_DEFLAGE" = 0 ; then echo ">>> Filter bzip2 not available; discontinuing test" exit 0; fi @@ -85,20 +91,12 @@ setfilter() { testavail() { zext=$1 if ! filteravail bzip2; then return 0; fi - if test "x$TESTNCZARR" = x1 ; then - ${execdir}/test_filter_avail - else - ${execdir}/tst_filter_avail - fi + ${TFAVAIL} } testvlen() { zext=$1 - if test "x$TESTNCZARR" = x1 ; then - ${execdir}/test_filter_vlen - else - ${execdir}/tst_filter_vlen - fi + ${TFVLEN} } testset() { diff --git a/nc_test4/tst_filter_vlen.c b/nc_test4/tst_filter_vlen.c index 827087bb51..677541b1cb 100644 --- a/nc_test4/tst_filter_vlen.c +++ b/nc_test4/tst_filter_vlen.c @@ -111,8 +111,6 @@ reopen(void) return NC_NOERR; } - - /* Test that a filter is a variable length var is defined */ static int test_test1(void) @@ -123,7 +121,7 @@ test_test1(void) unsigned params[NPARAMS] = {5}; size_t nparams = 0; - fprintf(stderr,"test4: filter on a variable length type.\n"); + fprintf(stderr,"test_test1: filter on a variable length type.\n"); create(); defvar(NC_STRING); /* Do explicit filter; should never fail, but may produce log warning */ @@ -152,7 +150,7 @@ test_test2(void) size_t i; reset(); - fprintf(stderr,"test4: write with filter on a variable length type.\n"); + fprintf(stderr,"test_test2: write with filter on a variable length type.\n"); /* generate the data to write */ for(i=0;i #include "nc_logging.h" +#undef BE_DEBUG + +#ifdef TESTNCZARR +#include "test_utils.h" +#define FILE_NAME_NC "file://tmp_h5_endians.file#mode=nczarr,file" +#else #define FILE_NAME_NC "tst_h5_endians.nc" +#endif #define NDIM 10 #define NLON 20 @@ -31,6 +38,38 @@ #define LE_DBL_VARNAME "dbl_le" #define BE_DBL_VARNAME "dbl_be" +#if defined BE_DEBUG || defined TESTNCZARR +static float +f32swap(float x) +{ + union { + unsigned char bytes[4]; + float f; + } u; + unsigned char c; + u.f = x; + c = u.bytes[0]; u.bytes[0] = u.bytes[3]; u.bytes[3] = c; + c = u.bytes[1]; u.bytes[1] = u.bytes[2]; u.bytes[2] = c; + return u.f; +} + +static double +f64swap(double x) +{ + union { + unsigned char bytes[8]; + double d; + } u; + unsigned char c; + u.d = x; + c = u.bytes[0]; u.bytes[0] = u.bytes[7]; u.bytes[7] = c; + c = u.bytes[1]; u.bytes[1] = u.bytes[6]; u.bytes[6] = c; + c = u.bytes[2]; u.bytes[2] = u.bytes[5]; u.bytes[5] = c; + c = u.bytes[3]; u.bytes[3] = u.bytes[4]; u.bytes[4] = c; + return u.d; +} +#endif + int main() { int ncid, dimid; @@ -164,6 +203,135 @@ int main() { return retval; } + /* + * 3. Reopen netcdf-generated file, write data and reread. + */ + printf("** Read/Write test files.\n"); + { + ncid = 0; + le_float_varid = 0; + be_float_varid = 0; + le_int_varid = 0; + be_int_varid = 0; + le_dbl_varid = 0; + be_dbl_varid = 0; + + printf("*** %s\n",FILE_NAME_NC); + if ((retval = nc_open(FILE_NAME_NC, NC_NETCDF4 | NC_WRITE, &ncid))) + return retval; + + if ((retval = nc_inq_varid(ncid,LE_FLOAT_VARNAME,&le_float_varid))) + return retval; + if ((retval = nc_inq_varid(ncid,BE_FLOAT_VARNAME,&be_float_varid))) + return retval; + if ((retval = nc_inq_varid(ncid,LE_INT_VARNAME,&le_int_varid))) + return retval; + if ((retval = nc_inq_varid(ncid,BE_INT_VARNAME,&be_int_varid))) + return retval; + if ((retval = nc_inq_varid(ncid,LE_DBL_VARNAME,&le_dbl_varid))) + return retval; + if ((retval = nc_inq_varid(ncid,BE_DBL_VARNAME,&be_dbl_varid))) + return retval; + + { + int i, failed; + int idata_in[NDIM]; + float fdata_in[NDIM]; + double ddata_in[NDIM]; + int idata_le_out[NDIM]; + float fdata_le_out[NDIM]; + double ddata_le_out[NDIM]; + int idata_be_out[NDIM]; +#if defined BE_DEBUG || defined TESTNCZARR + float fdata_be_out[NDIM]; + double ddata_be_out[NDIM]; +#endif + + /* Setup data in/out */ + for(i=0;i -int main(int argc, char* argv[]) +#ifdef TESTNCZARR +#include "test_utils.h" +#define FILE_NAME "file://tmp_put_vars_two_unlim_dim.file#mode=nczarr,file" +#else +#define FILE_NAME "tst_put_vars_two_unlim_dim.nc" +#endif + +int +main(int argc, char* argv[]) { int ret; int ncid; @@ -29,7 +37,7 @@ int main(int argc, char* argv[]) size_t count = 5; double vals[] = { 1.0, 2.0, 3.0, 4.0, 5.0 }; - if ((ret = nc_create("tst_put_vars_two_unlim_dim.nc", NC_NETCDF4 | NC_CLOBBER, &ncid))) { + if ((ret = nc_create(FILE_NAME, NC_NETCDF4 | NC_CLOBBER, &ncid))) { printf("nc_create(...): error code = %d\n", ret); return -1; } diff --git a/nc_test4/tst_unlim_vars.c b/nc_test4/tst_unlim_vars.c index 252ebdd69b..e6f157a1ce 100644 --- a/nc_test4/tst_unlim_vars.c +++ b/nc_test4/tst_unlim_vars.c @@ -11,7 +11,12 @@ #include #include "err_macros.h" +#ifdef TESTNCZARR +#define FILE_NAME "file://tmp_unlim_vars.file#mode=nczarr,file" +#else #define FILE_NAME "tst_unlim_vars.nc" +#endif + #define SFC_TEMP_NAME "surface_temperature" #define LAT_NAME "lat" #define LAT_LEN 2 @@ -25,6 +30,8 @@ int main(int argc, char **argv) { + int stat = NC_NOERR; + printf("\n*** Testing netcdf-4 variables with unlimited dimensions.\n"); printf("*** Testing file with one var, one unlim dim..."); { @@ -74,9 +81,20 @@ main(int argc, char **argv) if (len_in != LAT_LEN || strcmp(name_in, LAT_NAME)) ERR; if (nc_inq_dim(ncid, 2, name_in, &len_in)) ERR; if (len_in != LON_LEN || strcmp(name_in, LON_NAME)) ERR; + + { + size_t chunksizes[3]; + int i,storage; + fprintf(stderr,">>> chunks ="); + if (nc_inq_var_chunking(ncid, sfc_tempid, &storage, chunksizes)) ERR; + for(i=0;i<3;i++) fprintf(stderr," %llu", (unsigned long long)chunksizes[i]); + fprintf(stderr," ; storage=%d\n", storage); + } + if (nc_close(ncid)) ERR; - if (nc_open(FILE_NAME, 0, &ncid)) ERR; + if ((stat=nc_open(FILE_NAME, 0, &ncid))) + ERR; /* Check metadata. */ if (nc_inq(ncid, &ndims_in, &nvars_in, &natts_in, &unlimdimid_in)) ERR; diff --git a/ncdap_test/CMakeLists.txt b/ncdap_test/CMakeLists.txt index d4376e356f..0ac0da75ba 100644 --- a/ncdap_test/CMakeLists.txt +++ b/ncdap_test/CMakeLists.txt @@ -53,7 +53,7 @@ IF(ENABLE_TESTS) add_sh_test(ncdap tst_longremote3) SET_TESTS_PROPERTIES(ncdap_tst_longremote3 PROPERTIES RUN_SERIAL TRUE) ENDIF(ENABLE_DAP_LONG_TESTS) - IF(FALSE) + IF(TRUE) # Apparently iridl.ldeo.columbia.edu is down for now add_sh_test(ncdap tst_encode) # not yet fixed diff --git a/ncdap_test/Makefile.am b/ncdap_test/Makefile.am index e966c7409e..a9983842b6 100644 --- a/ncdap_test/Makefile.am +++ b/ncdap_test/Makefile.am @@ -47,10 +47,11 @@ if BUILD_UTILITIES TESTS += tst_ber.sh tst_remote3.sh tst_formatx.sh testurl.sh tst_fillmismatch.sh tst_zero_len_var.sh endif -if AX_IGNORE -# Apparently iridl.ldeo.columbia.edu is down for now +# Remote servers +# iridl.ldeo.columbia.edu TESTS += tst_encode.sh -endif +# test.opendap.org +TESTS += tst_hyrax.sh TESTS += test_partvar @@ -92,7 +93,7 @@ endif #ENABLE_DAP_REMOTE_TESTS endif #ENABLE_DAP # Need to add subdirs -SUBDIRS = testdata3 expected3 expectremote3 +SUBDIRS = testdata3 expected3 expectremote3 expectedhyrax EXTRA_DIST = tst_ncdap3.sh \ tst_remote3.sh \ @@ -100,7 +101,7 @@ EXTRA_DIST = tst_ncdap3.sh \ tst_zero_len_var.sh \ tst_filelists.sh tst_urls.sh tst_utils.sh \ t_dap.c CMakeLists.txt tst_formatx.sh testauth.sh testurl.sh \ - t_ncf330.c tst_ber.sh tst_fillmismatch.sh tst_encode.sh \ + t_ncf330.c tst_ber.sh tst_fillmismatch.sh tst_encode.sh tst_hyrax.sh \ findtestserver.c.in CLEANFILES = test_varm3 test_cvt3 file_results/* remote_results/* datadds* t_dap3a test_nstride_cached *.exe tmp*.txt @@ -130,6 +131,7 @@ clean-local: clean-local-check clean-local-check: -rm -rf results -rm -f .dodsrc + -rm -fr testdir_* testset_* # If valgrind is present, add valgrind targets. @VALGRIND_CHECK_RULES@ diff --git a/ncdap_test/expectedhyrax/CMakeLists.txt b/ncdap_test/expectedhyrax/CMakeLists.txt new file mode 100644 index 0000000000..5df1d4251b --- /dev/null +++ b/ncdap_test/expectedhyrax/CMakeLists.txt @@ -0,0 +1,12 @@ +# Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, +# 2015, 2016, 2017, 2018 +# University Corporation for Atmospheric Research/Unidata. + +# See netcdf-c/COPYRIGHT file for more info. +FILE(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*) +FILE(COPY ${COPY_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/ FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE) + +FILE(GLOB CUR_EXTRA_DIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/*) +SET(CUR_EXTRA_DIST ${CUR_EXTRA_DIST} CMakeLists.txt Makefile.am) +ADD_EXTRA_DIST("${CUR_EXTRA_DIST}") diff --git a/ncdap_test/expectedhyrax/ECMWF_ERA-40_subset.nc.hyrax b/ncdap_test/expectedhyrax/ECMWF_ERA-40_subset.nc.hyrax new file mode 100644 index 0000000000..b4e8d4a7b6 --- /dev/null +++ b/ncdap_test/expectedhyrax/ECMWF_ERA-40_subset.nc.hyrax @@ -0,0 +1,23 @@ +netcdf ECMWF_ERA-40_subset { +dimensions: + time = UNLIMITED ; // (1 currently) + latitude = 1 ; + longitude = 1 ; +variables: + short tcw.tcw(time, latitude, longitude) ; + tcw.tcw:scale_factor = 0.0013500981745481 ; + tcw.tcw:add_offset = 44.3250482744756 ; + tcw.tcw:_FillValue = -32767s ; + tcw.tcw:missing_value = -32767s ; + tcw.tcw:units = "kg m**-2" ; + tcw.tcw:long_name = "Total column water" ; + +// global attributes: + :Conventions = "CF-1.0" ; + :history = "2004-09-15 17:04:29 GMT by mars2netcdf-0.92" ; + :DODS_EXTRA.Unlimited_Dimension = "time" ; +data: + + tcw.tcw = + _ ; +} diff --git a/ncdap_test/expectedhyrax/Makefile.am b/ncdap_test/expectedhyrax/Makefile.am new file mode 100644 index 0000000000..528537b610 --- /dev/null +++ b/ncdap_test/expectedhyrax/Makefile.am @@ -0,0 +1,8 @@ +## This is a automake file, part of Unidata's netCDF package. +# Copyright 2018, see the COPYRIGHT file for more information. + +# This is to include the libnc-dap test comparison files + +# $Id: Makefile.am,v 1.9 2009/11/20 03:06:32 dmh Exp $ + +EXTRA_DIST = cami_0000-09-01_64x128_L26_c030918.nc.hyrax ECMWF_ERA-40_subset.nc.hyrax CMakeLists.txt diff --git a/ncdap_test/expectedhyrax/cami_0000-09-01_64x128_L26_c030918.nc.hyrax b/ncdap_test/expectedhyrax/cami_0000-09-01_64x128_L26_c030918.nc.hyrax new file mode 100644 index 0000000000..efa8ef9cfb --- /dev/null +++ b/ncdap_test/expectedhyrax/cami_0000-09-01_64x128_L26_c030918.nc.hyrax @@ -0,0 +1,34 @@ +netcdf cami_0000-09-01_64x128_L26_c030918 { +dimensions: + ilev = 27 ; +variables: + double hyai.hyai(ilev) ; + hyai.hyai:long_name = "hybrid A coefficient at layer interfaces" ; + hyai.hyai:_FillValue = 9.99999961690316e+35 ; + +// global attributes: + :Conventions = "CF-1.0" ; + :logname = "olson" ; + :host = "bb0001en" ; + :source = "Interpolated from:/fs/cgd/data0/olson/inputIC/newICeul.cam2.i.0000-09-01-00000.nc::CAM" ; + :case = "cam2run" ; + :title = "Interpolated from:/fs/cgd/data0/olson/inputIC/newICeul.cam2.i.0000-09-01-00000.nc::atm ver atm, eul ver v013, case newICeul" ; + :history = "\n", + "05/07/03 12:15:34 olson:chinookfe:interpic -t SEP1.T42L26.gaussian.template.nc /fs/cgd/data0/olson/inputIC/newICeul.cam2.i.0000-09-01-00000.nc cami_0000-09-01_64x128_L26_c030507.nc\n", + "definesurf -t /fs/cgd/csm/inputdata/atm/cam1/hrtopo/topo.nc cami_0000-09-01_64x128_L26_c030507.nc\n", + "definesurf -t /fs/cgd/csm/inputdata/atm/cam2/hrtopo/topo.nc cami_0000-09-01_64x128_L26_c030507.nc\n", + "definesurf -t /fs/cgd/csm/inputdata/atm/cam2/hrtopo/topo-usgs-10min.nc cami_0000-09-01_64x128_L26_c030507.nc\n", + "definesurf -t /fs/cgd/csm/inputdata/atm/cam2/hrtopo/topo10min.merged_c030506.nc cami_0000-09-01_64x128_L26_c030507.nc\n", + "definesurf -t /fs/cgd/csm/inputdata/atm/cam2/hrtopo/topo10min.merged_c030506.nc -l cami_0000-09-01_64x128_L26_c030624.nc.new" ; + :make_ross = "true" ; + :DODS_EXTRA.Unlimited_Dimension = "time" ; +data: + + hyai.hyai = 0.00219406700000001, 0.00489520900000001, 0.009882418, + 0.01805201, 0.02983724, 0.0446233400000002, 0.0616058700000002, + 0.0785124300000004, 0.0773127100000002, 0.0759013100000003, + 0.0742408600000002, 0.0722874400000002, 0.0699893299999998, 0.06728574, + 0.06410509, 0.0603632200000002, 0.0559611100000001, 0.0507822500000001, + 0.0446896000000001, 0.0375219099999999, 0.0290894900000001, 0.02084739, + 0.01334443, 0.00708499000000001, 0.00252136, 0, 0 ; +} diff --git a/ncdap_test/test_manyurls.c b/ncdap_test/test_manyurls.c index 00343a0fb8..6f2ec01e38 100644 --- a/ncdap_test/test_manyurls.c +++ b/ncdap_test/test_manyurls.c @@ -5,6 +5,8 @@ #include "manyurls.h" +#undef VERBOSE + int main() { int i,ncid; @@ -14,13 +16,17 @@ int main() char* tp = *p; int mode = 0; int status = -1; +#ifdef VERBOSE printf("Opening: %s\n",tp); +#endif status = nc_open(tp, mode, &ncid); switch(status) { case NC_NOERR: break; case NC_ENOTFOUND: +#ifdef VERBOSE printf("{%d} %s\n",i,tp); +#endif status = NC_NOERR; break; default: diff --git a/ncdap_test/tst_hyrax.sh b/ncdap_test/tst_hyrax.sh new file mode 100755 index 0000000000..0a6d441561 --- /dev/null +++ b/ncdap_test/tst_hyrax.sh @@ -0,0 +1,67 @@ +#!/bin/sh + +if test "x$srcdir" = "x"; then srcdir=`dirname $0`; fi +export srcdir; + +. ../test_common.sh + +set -e + +# Uncomment to get more verbose info +#VERBOSE=1 + +isolate testdir_hyrax +THISDIR=`pwd` +cd $ISOPATH + +echo "test_hyrax.sh:" + +WD=`pwd` +cd ${top_srcdir}/ncdap_test/expectedhyrax; BASELINEHY=`pwd` ; cd ${WD} +TESTSERVER="http://test.opendap.org" + +F="\ +opendap/netcdf/examples/ECMWF_ERA-40_subset.nc?tcw.tcw[1][1][1] \ +opendap/netcdf/examples/cami_0000-09-01_64x128_L26_c030918.nc?hyai.hyai +" + +failure() { + echo "*** Fail: $1" + exit 1 +} + +makehyraxurl() { + URL="${TESTSERVER}/${PREFIX}/${FILE}${QUERY}" + URL="$URL#dap2&log" + if test "x$VERBOSE" != x ; then URL="$URL&show=fetch"; fi +} + +hyraxsplit() { + P="$1" + QUERY=`echo $P | cut -d? -f2` + if test "x$QUERY" = "x$P" ; then QUERY="" ; else QUERY="?${QUERY}" ; fi + P=`echo $P | cut -d? -f1` + FILE=`basename $P` + PREFIX=`dirname $P` +} + +if test "x${RESET}" = x1 ; then rm -fr ${BASELINEHY}/*.hyrax ; fi +for f in $F ; do + hyraxsplit $f + makehyraxurl + echo "testing: $URL" + if ! ${NCDUMP} ${DUMPFLAGS} "${URL}" > ./${FILE}.hyrax; then + failure "${URL}" + fi + if test "x${TEST}" = x1 ; then + if ! diff -wBb ${BASELINEHY}/${FILE}.hyrax ./${FILE}.hyrax ; then + failure "diff ${FILE}.hyrax" + fi + elif test "x${RESET}" = x1 ; then + echo "${FILE}:" + cp ./${FILE}.hyrax ${BASELINEHY}/${FILE}.hyrax + fi +done + +echo "*** Pass" +exit 0 diff --git a/ncdump/Makefile.am b/ncdump/Makefile.am index 49e34c0ffa..04a3e0f7f8 100644 --- a/ncdump/Makefile.am +++ b/ncdump/Makefile.am @@ -251,4 +251,4 @@ scope_*.nc copy_scope_*.cdl keyword5.nc tst_enum_undef.cdl tst_times_nc4.cdl # Remove directories clean-local: - rm -fr rcmergedir rchome testdir_ncdump_* + rm -fr rcmergedir rchome testset_* diff --git a/ncdump/ncdump.c b/ncdump/ncdump.c index 3769fa30ad..f577079813 100644 --- a/ncdump/ncdump.c +++ b/ncdump/ncdump.c @@ -1767,7 +1767,6 @@ do_ncdump_rec(int ncid, const char *path) for (varid = 0; varid < nvars; varid++) { NC_CHECK( nc_inq_varndims(ncid, varid, &var.ndims) ); - if(var.dims != NULL) free(var.dims); var.dims = (int *) emalloc((var.ndims + 1) * sizeof(int)); NC_CHECK( nc_inq_var(ncid, varid, var.name, &var.type, 0, var.dims, &var.natts) ); @@ -1890,6 +1889,7 @@ do_ncdump_rec(int ncid, const char *path) pr_att_specials(ncid, kind, varid, &var); } #endif /* USE_NETCDF4 */ + if(var.dims) {free((void*)var.dims); var.dims = NULL;} } if (ngatts > 0 || formatting_specs.special_atts) { @@ -1927,7 +1927,7 @@ do_ncdump_rec(int ncid, const char *path) if (formatting_specs.nlvars > 0 && ! idmember(vlist, varid)) continue; NC_CHECK( nc_inq_varndims(ncid, varid, &var.ndims) ); - if(var.dims != NULL) free(var.dims); + if(var.dims != NULL) {free(var.dims); var.dims = NULL;} var.dims = (int *) emalloc((var.ndims + 1) * sizeof(int)); NC_CHECK( nc_inq_var(ncid, varid, var.name, &var.type, 0, var.dims, &var.natts) ); @@ -1975,6 +1975,7 @@ do_ncdump_rec(int ncid, const char *path) } if(var.fillvalp != NULL) {NC_CHECK(nc_reclaim_data_all(ncid,var.tinfo->tid,var.fillvalp,1)); var.fillvalp = NULL;} + if(var.dims) {free(var.dims); var.dims = NULL;} } if (vdims) { free(vdims); @@ -2396,7 +2397,7 @@ main(int argc, char *argv[]) nc_set_log_level(level); } #endif - ncsetlogging(1); + ncsetloglevel(NCLOGNOTE); break; case 'F': formatting_specs.filter_atts = true; diff --git a/ncdump/ocprint.c b/ncdump/ocprint.c index 72a45fb3ce..1e25f2daf6 100755 --- a/ncdump/ocprint.c +++ b/ncdump/ocprint.c @@ -279,7 +279,7 @@ main(int argc, char **argv) if(ocopt.logging) { ncloginit(); - ncsetlogging(1); + ncsetloglevel(NCLOGNOTE); if(!nclogopen(NULL)) fprintf(stderr,"Failed to open logging output\n"); } diff --git a/ncdump/test_ncdump.sh b/ncdump/test_ncdump.sh index 71785f596d..dd95e3890a 100644 --- a/ncdump/test_ncdump.sh +++ b/ncdump/test_ncdump.sh @@ -85,13 +85,3 @@ diff -b tst_fillbug.cdl $srcdir/ref_tst_fillbug.cdl } - -NCDUMPPATH=${builddir} -if test "x$NCDUMPDIR" != x ; then - NCDUMPPATH="${NCDUMPPATH}/$NCDUMPDIR" - rm -fr $NCDUMPPATH - mkdir $NCDUMPPATH -fi -find $NCDUMPPATH - - diff --git a/ncdump/tst_mud.sh b/ncdump/tst_mud.sh index 89b3e88fe3..d86c54673b 100755 --- a/ncdump/tst_mud.sh +++ b/ncdump/tst_mud.sh @@ -8,28 +8,74 @@ if test "x$srcdir" = x ; then srcdir=`pwd`; fi set -e +if test "x$TESTNCZARR" = x1 ; then +. "$srcdir/test_nczarr.sh" +s3isolate "testdir_mud4" +else +isolate "testdir_mud4" +ISOPATH=`pwd` +fi +THISDIR=`pwd` +cd $ISOPATH + echo "" echo "*** Testing ncdump output for multiple unlimited dimensions" -echo "*** creating netcdf file tst_mud4.nc from ref_tst_mud4.cdl ..." -${NCGEN} -4 -b -o tst_mud4.nc $srcdir/ref_tst_mud4.cdl -echo "*** creating tst_mud4.cdl from tst_mud4.nc ..." -${NCDUMP} tst_mud4.nc > tst_mud4.cdl + +# This is where the ref files are kept +refdir="${srcdir}/../ncdump" + +testcase() { +zext=$1 + +if test "x$TESTNCZARR" = x1 ; then +fileargs "tmp_mud4_${zext}" +deletemap $zext $file +file="$fileurl" +else +file="tmp_mud4_${zext}.nc" +rm -f $file +fi + +echo "*** creating netcdf file $file from ref_tst_mud4.cdl ..." +${NCGEN} -4 -b -o $file $refdir/ref_tst_mud4.cdl +echo "*** creating tmp_mud4.cdl from $file ..." +${NCDUMP} -n tst_mud4 $file > tmp_mud4.cdl # echo "*** comparing tst_mud4.cdl with ref_tst_mud4.cdl..." -diff -b tst_mud4.cdl $srcdir/ref_tst_mud4.cdl -# echo "*** comparing annotation from ncdump -bc tst_mud4.nc with expected output..." -${NCDUMP} -bc tst_mud4.nc > tst_mud4-bc.cdl -diff -b tst_mud4-bc.cdl $srcdir/ref_tst_mud4-bc.cdl +diff -b tmp_mud4.cdl $refdir/ref_tst_mud4.cdl +# echo "*** comparing annotation from ncdump -bc $file with expected output..." +${NCDUMP} -n tst_mud4 -bc $file > tmp_mud4-bc.cdl +diff -b tmp_mud4-bc.cdl $refdir/ref_tst_mud4-bc.cdl + # Now test with char arrays instead of ints -echo "*** creating netcdf file tst_mud4_chars.nc from ref_tst_mud4_chars.cdl ..." -${NCGEN} -4 -b -o tst_mud4_chars.nc $srcdir/ref_tst_mud4_chars.cdl -echo "*** creating tst_mud4_chars.cdl from tst_mud4_chars.nc ..." -${NCDUMP} tst_mud4_chars.nc > tst_mud4_chars.cdl -# echo "*** comparing tst_mud4_chars.cdl with ref_tst_mud4_chars.cdl..." -diff -b tst_mud4_chars.cdl $srcdir/ref_tst_mud4_chars.cdl -exit 0 -# unused -# echo "*** comparing annotation from ncdump -bc tst_mud4_chars.nc with expected output..." -${NCDUMP} -bc tst_mud4_chars.nc > tst_mud4_chars-bc.cdl -# diff -b tst_mud4_chars-bc.cdl $srcdir/ref_tst_mud4_chars-bc.cdl -echo "*** All ncdump test output for multiple unlimited dimensions passed!" +if test "x$TESTNCZARR" = x1 ; then +fileargs "tmp_mud4_chars${zext}" +deletemap $zext $file +file="$fileurl" +else +file="tmp_mud4_chars${zext}.nc" +rm -f $file +fi +echo "*** creating netcdf file $file from ref_tst_mud4_chars.cdl ..." +${NCGEN} -4 -b -o $file $refdir/ref_tst_mud4_chars.cdl +echo "*** creating ${file}.cdl from $file ..." +${NCDUMP} -n tst_mud4_chars $file > tmp_mud4_chars.cdl +# echo "*** comparing tmp_mud4_chars.cdl with ref_tst_mud4_chars.cdl..." +diff -b tmp_mud4_chars.cdl $refdir/ref_tst_mud4_chars.cdl +if test 1 = 0 ; then + # unused + echo "*** comparing annotation from ncdump -bc tst_mud4_chars.nc with expected output..." + ${NCDUMP} -n tst_mud4_chars -bc $file > tmp_mud4_chars-bc.cdl + diff -b tmp_mud4_chars-bc.cdl $refdir/ref_tst_mud4_chars-bc.cdl + echo "*** All ncdump test output for multiple unlimited dimensions passed!" +fi +} + +if test "x$TESTNCZARR" = x1 ; then + testcase file + if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcase zip ; fi + if test "x$FEATURE_S3TESTS" = xyes ; then testcase s3 ; fi +else + testcase nc +fi + exit 0 diff --git a/ncdump/tst_nccopy4.sh b/ncdump/tst_nccopy4.sh index 73ff138fa5..07521da73e 100755 --- a/ncdump/tst_nccopy4.sh +++ b/ncdump/tst_nccopy4.sh @@ -3,14 +3,11 @@ if test "x$srcdir" = x ; then srcdir=`pwd`; fi . ../test_common.sh - - -NCDUMPDIR="testdir_ncdump_nccopy4" . $srcdir/test_ncdump.sh -# Move into test directory -cd $NCDUMPPATH - +isolate "testdir_nccopy4" +THISDIR=`pwd` +cd $ISOPATH set -e diff --git a/ncdump/tst_nccopy5.sh b/ncdump/tst_nccopy5.sh index ebea3b051e..2c2399b18a 100755 --- a/ncdump/tst_nccopy5.sh +++ b/ncdump/tst_nccopy5.sh @@ -3,32 +3,45 @@ if test "x$srcdir" = x ; then srcdir=`pwd`; fi . ../test_common.sh -# If we want to run valgrind -#NCCOPY="valgrind --leak-check=full ${NCCOPY}" +set -e -# Choose tests to run -T1=1 -T2=1 -T3=1 -T4=1 -T5=1 +if test "x$TESTNCZARR" = x1 ; then +. "$srcdir/test_nczarr.sh" +s3isolate "testdir_nccopy5" +else +isolate testdir_ncccopy5 +fi +THISDIR=`pwd` +cd $ISOPATH + +# Program to run +if test "x$TESTNCZARR" = x1 ; then + CHUNKTEST="${execdir}/test_chunking" +else + CHUNKTEST="${execdir}/tst_chunking" +fi # For a netCDF-4 build, test nccopy chunking rules -set -e echo "" # Trim off leading and trailing whitespace -# Also remove any +# Also remove any and de-tabify # usage: trim # Leaves result in variable TRIMMED trim() { # trim leading whitespace and remove - TMP=`echo "$1" |tr -d '\r' | sed -e 's/^[ ]*//'` + TMP=`echo "$1" |tr -d '\r' | tr '\t' ' ' |sed -e 's/^[ ]*//'` # trim trailing whitespace TRIMMED=`echo "$TMP" | sed -e 's/[ ]*$//'` } +if test "x$TESTNCZARR" = x1 ; then +# NCZARR does not support contiguous storage +checkfvar() { +return +} +else # usage: checkfvar checkfvar() { # Make sure that fvar was not chunked @@ -38,6 +51,7 @@ checkfvar() { exit 1 fi } +fi # usage: checkivar checkivar() { @@ -67,135 +81,192 @@ cleanup() { rm -f tmp_nc5_omit.nc tmp_nc5_omit.cdl } -# remove all created files -reset() { - cleanup - rm -fr tst_nc5.nc tst_nc5.cdl tmp_ncc5.cdl - rm -f tst_nc5_omit.nc tst_nc5_omit.cdl +buildfile() { +zext=$1 +index=$2 +if test "x$TESTNCZARR" = x1 ; then +fileargs "tmp_nc5_${index}_${zext}" +deletemap $zext $file +file="$fileurl" +else +file="tmp_nc5_${index}_${zext}.nc" +rm -f $file +fi } -reset +testcase1() { +zext=$1 +buildfile ${zext} 1 -if test "x$T1" = x1 ; then +rm -fr tmp1${zext}.dir +mkdir tmp1${zext}.dir +cd tmp1${zext}.dir # Create a simple classic input file -${execdir}/tst_chunking tmp_nc5_base.nc +${CHUNKTEST} $file # Save a .cdl version -${NCDUMP} tmp_nc5_base.nc > tmp_nc5_base.cdl +${NCDUMP} -n tmp_nc5_base ${file} > tmp_nc5.cdl echo "*** Test nccopy -c with per-variable chunking; classic->enhanced" # This should produce same as -c dim0/,dim1/1,dim2/,dim3/1,dim4/,dim5/1,dim6/ # But note that the chunk product is less than default, so we need to reduce it (-M) -${NCCOPY} -M1000 -c ivar:7,1,2,1,5,1,9 tmp_nc5_base.nc tmp_nc5.nc -${NCDUMP} -n tmp_nc5_base tmp_nc5.nc > tmp_nc5.cdl +${NCCOPY} -M1000 -c ivar:7,1,2,1,5,1,9 ${file} tmp_nc34.nc +${NCDUMP} -n tmp_nc5_base tmp_nc34.nc > tmp_nc34.cdl # Verify that the core cdl is the same -diff tmp_nc5_base.cdl tmp_nc5.cdl -# Look at the output chunking of ivar -rm -f tmp_nc5a.cdl # reuse -${NCDUMP} -hs -n tmp_nc5_base tmp_nc5.nc > tmp_nc5.cdl +diff tmp_nc5.cdl tmp_nc34.cdl + +# Look at the output chunking +${NCDUMP} -hs -n tmp_nc5_base tmp_nc34.nc > tmp_chunking.cdl # extract the chunking line -TESTLINE=`sed -e '/ivar:_ChunkSizes/p' -e d tmp_nc5.cdl echo "*** Test nccopy -c with per-variable chunking; enhanced->enhanced" -reset -${execdir}/tst_chunking tst_nc5.nc deflate -${NCDUMP} -n tmp_nc5_base tst_nc5.nc > tst_nc5.cdl # Use -M to ensure that chunking takes effect -${NCCOPY} -M500 -c ivar:4,1,2,1,5,2,3 tst_nc5.nc tmp_nc5.nc -${NCDUMP} -n tmp_nc5_base tmp_nc5.nc > tmp_nc5.cdl -diff tst_nc5.cdl tmp_nc5.cdl +${NCCOPY} -M500 -c ivar:4,1,2,1,5,2,3 $file tmp_nc44.nc +${NCDUMP} -n tmp_nc5_base tmp_nc44.nc > tmp_nc44.cdl +diff tmp_nc5.cdl tmp_nc44.cdl # Look at the output chunking -rm -f tmp_nc5.cdl # reuse -${NCDUMP} -hs -n tmp_nc5_base tmp_nc5.nc > tmp_nc5.cdl +${NCDUMP} -hs -n tmp_nc5_base tmp_nc44.nc > tmp_chunking.cdl # extract the chunking line -TESTLINE=`sed -e '/ivar:_ChunkSizes/p' -e d tmp_nc5.cdl echo "*** Test nccopy -c with FQN var name; enhanced ->enhanced" -reset -${execdir}/tst_chunking tst_nc5.nc group -${NCDUMP} -n tmp_nc5_base tst_nc5.nc > tst_nc5.cdl -${NCCOPY} -M500 -c /g/ivar:4,1,2,1,5,2,3 tst_nc5.nc tmp_nc5.nc -${NCDUMP} -n tmp_nc5_base tmp_nc5.nc > tmp_nc5.cdl -diff tst_nc5.cdl tmp_nc5.cdl +${NCCOPY} -M500 -c /g/ivar:4,1,2,1,5,2,3 ${file} tmp_nc44.nc +${NCDUMP} -n tmp_nc5_base tmp_nc44.nc > tmp_nc44.cdl +diff tmp_nc5.cdl tmp_nc44.cdl # Verify chunking -${NCDUMP} -hs -n tmp_nc5_base tmp_nc5.nc > tmp_nc5.cdl +${NCDUMP} -hs -n tmp_nc5_base tmp_nc44.nc > tmp_chunking.cdl # extract the chunking line -TESTLINE=`sed -e '/ivar:_ChunkSizes/p' -e d tmp_nc5.cdl echo "*** Test nccopy -c with unlimited dimension; classic ->enhanced" -reset -${execdir}/tst_chunking tst_nc5.nc unlimited # should produce modified tmp_nc5.nc with ivar of rank 2 -${NCDUMP} -n tmp_nc5_base tst_nc5.nc > tst_nc5.cdl -${NCCOPY} -M500 -c ivar:5,3 tst_nc5.nc tmp_nc5.nc -${NCDUMP} -n tmp_nc5_base tmp_nc5.nc > tmp_nc5.cdl -diff tst_nc5.cdl tmp_nc5.cdl +# Warning: make sure that nccopy does not convert small chunking to contiguous => -M +${NCCOPY} -M50 -c ivar:5,3 $file tmp_nc34.nc +${NCDUMP} -n tmp_nc5_base tmp_nc34.nc > tmp_nc34.cdl +diff tmp_nc5.cdl tmp_nc34.cdl # Verify chunking -${NCDUMP} -hs -n tmp_nc5_base tmp_nc5.nc > tmp_nc5.cdl +${NCDUMP} -hs -n tmp_nc5_base tmp_nc34.nc > tmp_chunking.cdl # extract the chunking line -TESTLINE=`sed -e '/ivar:_ChunkSizes/p' -e d tmp_nc5_omit.cdl echo "*** Test nccopy -c fvar: to suppress chunking; classic ->enhanced" -reset -${execdir}/tst_chunking tst_nc5_omit.nc -${NCDUMP} -n tst_nc5_omit tst_nc5_omit.nc > tst_nc5_omit.cdl -${NCCOPY} -M500 -c ivar:7,1,2,1,5,1,9 -c fvar: tst_nc5_omit.nc tmp_nc5_omit.nc -${NCDUMP} -n tst_nc5_omit tmp_nc5_omit.nc > tmp_nc5_omit.cdl -diff tst_nc5_omit.cdl tmp_nc5_omit.cdl +${NCCOPY} -M500 -c ivar:7,1,2,1,5,1,9 -c fvar: $file tmp_nc34_omit.nc +${NCDUMP} -n tmp_nc5_base tmp_nc34_omit.nc > tmp_nc34_omit.cdl +diff tmp_nc5_omit.cdl tmp_nc34_omit.cdl # Verify chunking of ivar -${NCDUMP} -hs -n tst_nc5_omit tmp_nc5_omit.nc > tmp_nc5_omit.cdl +${NCDUMP} -hs -n tmp_nc5_omit tmp_nc34_omit.nc > tmp_chunking_omit.cdl # extract the chunking line -TESTLINE=`sed -e '/ivar:_ChunkSizes/p' -e d $@ cat $(top_srcdir)/nc_test4/tst_filter_vlen.c >> $@ +test_unlim_vars.c: $(top_srcdir)/nc_test4/tst_unlim_vars.c + rm -f $@ + echo "#define TESTNCZARR" > $@ + cat $(top_srcdir)/nc_test4/tst_unlim_vars.c >> $@ + +test_endians.c: $(top_srcdir)/nc_test4/tst_h5_endians.c + rm -f $@ + echo "#define TESTNCZARR" > $@ + cat $(top_srcdir)/nc_test4/tst_h5_endians.c >> $@ + +test_put_vars_two_unlim_dim.c: $(top_srcdir)/nc_test4/tst_put_vars_two_unlim_dim.c + rm -f $@ + echo "#define TESTNCZARR" > $@ + cat $(top_srcdir)/nc_test4/tst_put_vars_two_unlim_dim.c >> $@ + +test_chunking.c: $(top_srcdir)/ncdump/tst_chunking.c + rm -f $@ + echo "#define TESTNCZARR" > $@ + cat $(top_srcdir)/ncdump/tst_chunking.c >> $@ + run_unknown.sh: $(top_srcdir)/nc_test4/tst_unknown.sh rm -f $@ run_unknown.tmp echo "#!/bin/bash" > run_unknown.tmp @@ -214,6 +292,15 @@ run_filter_vlen.sh: $(top_srcdir)/nc_test4/tst_filter_vlen.sh chmod a+x $@ rm -f run_filter_vlen.tmp +run_filter_misc.sh: $(top_srcdir)/nc_test4/tst_filter_misc.sh + rm -f $@ run_filter_misc.tmp + echo "#!/bin/bash" > run_filter_misc.tmp + echo "TESTNCZARR=1" >> run_filter_misc.tmp + cat $(top_srcdir)/nc_test4/tst_filter_misc.sh >> run_filter_misc.tmp + tr -d '\r' < run_filter_misc.tmp > $@ + chmod a+x $@ + rm -f run_filter_misc.tmp + run_filterinstall.sh: $(top_srcdir)/nc_test4/tst_filterinstall.sh rm -f $@ run_filterinstall.tmp echo "#!/bin/bash" > run_filterinstall.tmp @@ -223,18 +310,36 @@ run_filterinstall.sh: $(top_srcdir)/nc_test4/tst_filterinstall.sh chmod a+x $@ rm -f run_filterinstall.tmp +run_mud.sh: $(top_srcdir)/ncdump/tst_mud.sh + rm -f $@ run_mud.tmp + echo "#!/bin/bash" > run_mud.tmp + echo "TESTNCZARR=1" >> run_mud.tmp + cat $(top_srcdir)/ncdump/tst_mud.sh >> run_mud.tmp + tr -d '\r' < run_mud.tmp > $@ + chmod a+x $@ + rm -f run_mud.tmp + +run_nccopy5.sh: $(top_srcdir)/ncdump/tst_nccopy5.sh + rm -f $@ run_nccopy5.tmp + echo "#!/bin/bash" > run_nccopy5.tmp + echo "TESTNCZARR=1" >> run_nccopy5.tmp + cat $(top_srcdir)/ncdump/tst_nccopy5.sh >> run_nccopy5.tmp + tr -d '\r' < run_nccopy5.tmp > $@ + chmod a+x $@ + rm -f run_nccopy5.tmp + # Remove directories clean-local: - rm -fr testdir_* + rm -fr testdir_* testset_* rm -fr tmp_*.nc tmp_*.zarr tst_quantize*.zarr tmp*.file results.file results.s3 results.zip rm -fr rcmiscdir ref_power_901_constants.file +if ENABLE_S3_TESTALL +check-local: + bash ${abs_top_builddir}/s3cleanup.sh +endif -DISTCLEANFILES = findplugin.sh test_quantize.c run_specific_filters.sh run_filterinstall.sh run_unknown.sh test_filter_vlen.c run_filter_vlen.sh - -# Provide a specific s3 cleanup action -s3cleanup:: - ${srcdir}/run_s3_cleanup.sh +DISTCLEANFILES = findplugin.sh ${BUILT_SOURCES} # If valgrind is present, add valgrind targets. @VALGRIND_CHECK_RULES@ diff --git a/nczarr_test/ncdumpchunks.c b/nczarr_test/ncdumpchunks.c index a29046797c..ba2660465a 100755 --- a/nczarr_test/ncdumpchunks.c +++ b/nczarr_test/ncdumpchunks.c @@ -6,6 +6,10 @@ #include #include +#ifdef HAVE_UNISTD_H +#include +#endif + #ifdef HAVE_GETOPT_H #include #endif @@ -30,7 +34,7 @@ #undef DEBUG /* Short Aliases */ -#ifdef HDF5_SUPPORTS_PAR_FILTERS +#ifdef USE_HDF5 #define H5 #endif #ifdef ENABLE_NCZARR @@ -43,6 +47,8 @@ typedef struct Format { char var_name[NC_MAX_NAME]; int fillvalue; int debug; + int linear; + int holevalue; int rank; size_t dimlens[NC_MAX_VAR_DIMS]; size_t chunklens[NC_MAX_VAR_DIMS]; @@ -82,7 +88,7 @@ usage(int err) if(err != 0) { fprintf(stderr,"Error: (%d) %s\n",err,nc_strerror(err)); } - fprintf(stderr,"usage: ncdumpchunks -v \n"); + fprintf(stderr,"usage: ncdumpchunks [-b] -v \n"); fflush(stderr); exit(1); } @@ -107,7 +113,7 @@ printvector(int rank, size_t* vec) void cleanup(void) { - int i; + size_t i; for(i=0;irank;i++) { @@ -214,7 +219,7 @@ chunk_key(int format->rank, size_t* indices) void setoffset(Odometer* odom, size_t* chunksizes, size_t* offset) { - int i; + size_t i; for(i=0;irank;i++) offset[i] = odom->index[i] * chunksizes[i]; } @@ -225,10 +230,40 @@ printindent(size_t indent) while(indent-- > 0) printf(" "); } +int +printchunklinear(Format* format, int* chunkdata, size_t indent) +{ + size_t i; + for(i=0;ichunkprod;i++) { + if(chunkdata[i] == format->fillvalue) + printf(" _"); + else + printf(" %02d", chunkdata[i]); + } + printf("\n"); + return NC_NOERR; +} + +static void +printchunk2d(Format* format, int* chunkdata, size_t indent) +{ + size_t pos; + size_t row; + pos = 0; + for(row=0;rowchunklens[0];row++) { + size_t col; + if(row > 0) printindent(indent); + for(col=0;colchunklens[1];col++,pos++) { + if(chunkdata[pos] == format->fillvalue) printf(" _"); else printf(" %02d", chunkdata[pos]); + } + printf("\n"); + } +} + static void printchunk(Format* format, int* chunkdata, size_t indent) { - int k[3]; + size_t k[3]; int rank = format->rank; unsigned cols[3], pos; size_t* chl = format->chunklens; @@ -268,13 +303,19 @@ printchunk(Format* format, int* chunkdata, size_t indent) if(k[1] > 0) printf(" |"); for(k[2]=0;k[2]xtype == NC_UBYTE) { - int l; + size_t l; unsigned char* bchunkdata = (unsigned char*)(&chunkdata[pos]); for(l=0;lfillvalue) + printf(" _"); + else + printf(" %02u", bchunkdata[l]); } } else { - printf(" %02d", chunkdata[pos]); + if(chunkdata[pos] == format->fillvalue) + printf(" _"); + else + printf(" %02d", chunkdata[pos]); } pos++; } @@ -387,11 +428,11 @@ dump(Format* format) default: usage(NC_EINVAL); } if(holechunk) { - /* Hole chunk: use fillvalue */ + /* Hole chunk: use holevalue */ size_t i = 0; int* idata = (int*)chunkdata; for(i=0;ichunkprod;i++) - idata[i] = format->fillvalue; + idata[i] = format->holevalue; } sindices[0] = '\0'; for(r=0;rrank;r++) { @@ -401,7 +442,12 @@ dump(Format* format) } strcat(sindices," ="); printf("%s",sindices); - printchunk(format,chunkdata,strlen(sindices)); + if(format->linear) + printchunklinear(format,chunkdata,strlen(sindices)); + else if(format->rank == 2) + printchunk2d(format,chunkdata,strlen(sindices)); + else + printchunk(format,chunkdata,strlen(sindices)); fflush(stdout); odom_next(odom); } @@ -470,18 +516,25 @@ main(int argc, char** argv) /* Init some format fields */ format.xtype = NC_INT; - - while ((c = getopt(argc, argv, "bv:DT:")) != EOF) { + format.holevalue = -2; + + while ((c = getopt(argc, argv, "bhv:DT:L")) != EOF) { switch(c) { case 'b': format.xtype = NC_UBYTE; - break; + break; + case 'h': + usage(0); + break; case 'v': strcpy(format.var_name,optarg); - break; + break; case 'D': format.debug = 1; break; + case 'L': + format.linear = 1; + break; case 'T': nctracelevel(atoi(optarg)); break; diff --git a/nczarr_test/ref_misc1.dmp b/nczarr_test/ref_misc1.dmp index 6bee20d5ce..7d80574031 100644 --- a/nczarr_test/ref_misc1.dmp +++ b/nczarr_test/ref_misc1.dmp @@ -1,97 +1,97 @@ rank=3 dims=(6,12,4) chunks=(2,3,1) -[0/0][0/0][0/0] = 00 | -1 | -1 - 04 | -1 | -1 -[0/0][0/0][1/1] = 01 | -1 | -1 - 05 | -1 | -1 -[0/0][0/0][2/2] = 02 | -1 | -1 - 06 | -1 | -1 -[0/0][0/0][3/3] = 03 | -1 | -1 - 07 | -1 | -1 -[0/0][1/3][0/0] = -1 | -1 | -1 - -1 | -1 | -1 -[0/0][1/3][1/1] = -1 | -1 | -1 - -1 | -1 | -1 -[0/0][1/3][2/2] = -1 | -1 | -1 - -1 | -1 | -1 -[0/0][1/3][3/3] = -1 | -1 | -1 - -1 | -1 | -1 -[0/0][2/6][0/0] = -1 | -1 | -1 - -1 | -1 | -1 -[0/0][2/6][1/1] = -1 | -1 | -1 - -1 | -1 | -1 -[0/0][2/6][2/2] = -1 | -1 | -1 - -1 | -1 | -1 -[0/0][2/6][3/3] = -1 | -1 | -1 - -1 | -1 | -1 -[0/0][3/9][0/0] = -1 | -1 | -1 - -1 | -1 | -1 -[0/0][3/9][1/1] = -1 | -1 | -1 - -1 | -1 | -1 -[0/0][3/9][2/2] = -1 | -1 | -1 - -1 | -1 | -1 -[0/0][3/9][3/3] = -1 | -1 | -1 - -1 | -1 | -1 -[1/2][0/0][0/0] = 08 | -1 | -1 - 12 | -1 | -1 -[1/2][0/0][1/1] = 09 | -1 | -1 - 13 | -1 | -1 -[1/2][0/0][2/2] = 10 | -1 | -1 - 14 | -1 | -1 -[1/2][0/0][3/3] = 11 | -1 | -1 - 15 | -1 | -1 -[1/2][1/3][0/0] = -1 | -1 | -1 - -1 | -1 | -1 -[1/2][1/3][1/1] = -1 | -1 | -1 - -1 | -1 | -1 -[1/2][1/3][2/2] = -1 | -1 | -1 - -1 | -1 | -1 -[1/2][1/3][3/3] = -1 | -1 | -1 - -1 | -1 | -1 -[1/2][2/6][0/0] = -1 | -1 | -1 - -1 | -1 | -1 -[1/2][2/6][1/1] = -1 | -1 | -1 - -1 | -1 | -1 -[1/2][2/6][2/2] = -1 | -1 | -1 - -1 | -1 | -1 -[1/2][2/6][3/3] = -1 | -1 | -1 - -1 | -1 | -1 -[1/2][3/9][0/0] = -1 | -1 | -1 - -1 | -1 | -1 -[1/2][3/9][1/1] = -1 | -1 | -1 - -1 | -1 | -1 -[1/2][3/9][2/2] = -1 | -1 | -1 - -1 | -1 | -1 -[1/2][3/9][3/3] = -1 | -1 | -1 - -1 | -1 | -1 -[2/4][0/0][0/0] = 16 | -1 | -1 - 20 | -1 | -1 -[2/4][0/0][1/1] = 17 | -1 | -1 - 21 | -1 | -1 -[2/4][0/0][2/2] = 18 | -1 | -1 - 22 | -1 | -1 -[2/4][0/0][3/3] = 19 | -1 | -1 - 23 | -1 | -1 -[2/4][1/3][0/0] = -1 | -1 | -1 - -1 | -1 | -1 -[2/4][1/3][1/1] = -1 | -1 | -1 - -1 | -1 | -1 -[2/4][1/3][2/2] = -1 | -1 | -1 - -1 | -1 | -1 -[2/4][1/3][3/3] = -1 | -1 | -1 - -1 | -1 | -1 -[2/4][2/6][0/0] = -1 | -1 | -1 - -1 | -1 | -1 -[2/4][2/6][1/1] = -1 | -1 | -1 - -1 | -1 | -1 -[2/4][2/6][2/2] = -1 | -1 | -1 - -1 | -1 | -1 -[2/4][2/6][3/3] = -1 | -1 | -1 - -1 | -1 | -1 -[2/4][3/9][0/0] = -1 | -1 | -1 - -1 | -1 | -1 -[2/4][3/9][1/1] = -1 | -1 | -1 - -1 | -1 | -1 -[2/4][3/9][2/2] = -1 | -1 | -1 - -1 | -1 | -1 -[2/4][3/9][3/3] = -1 | -1 | -1 - -1 | -1 | -1 +[0/0][0/0][0/0] = 00 | _ | _ + 04 | _ | _ +[0/0][0/0][1/1] = 01 | _ | _ + 05 | _ | _ +[0/0][0/0][2/2] = 02 | _ | _ + 06 | _ | _ +[0/0][0/0][3/3] = 03 | _ | _ + 07 | _ | _ +[0/0][1/3][0/0] = _ | _ | _ + _ | _ | _ +[0/0][1/3][1/1] = _ | _ | _ + _ | _ | _ +[0/0][1/3][2/2] = _ | _ | _ + _ | _ | _ +[0/0][1/3][3/3] = _ | _ | _ + _ | _ | _ +[0/0][2/6][0/0] = _ | _ | _ + _ | _ | _ +[0/0][2/6][1/1] = _ | _ | _ + _ | _ | _ +[0/0][2/6][2/2] = _ | _ | _ + _ | _ | _ +[0/0][2/6][3/3] = _ | _ | _ + _ | _ | _ +[0/0][3/9][0/0] = _ | _ | _ + _ | _ | _ +[0/0][3/9][1/1] = _ | _ | _ + _ | _ | _ +[0/0][3/9][2/2] = _ | _ | _ + _ | _ | _ +[0/0][3/9][3/3] = _ | _ | _ + _ | _ | _ +[1/2][0/0][0/0] = 08 | _ | _ + 12 | _ | _ +[1/2][0/0][1/1] = 09 | _ | _ + 13 | _ | _ +[1/2][0/0][2/2] = 10 | _ | _ + 14 | _ | _ +[1/2][0/0][3/3] = 11 | _ | _ + 15 | _ | _ +[1/2][1/3][0/0] = _ | _ | _ + _ | _ | _ +[1/2][1/3][1/1] = _ | _ | _ + _ | _ | _ +[1/2][1/3][2/2] = _ | _ | _ + _ | _ | _ +[1/2][1/3][3/3] = _ | _ | _ + _ | _ | _ +[1/2][2/6][0/0] = _ | _ | _ + _ | _ | _ +[1/2][2/6][1/1] = _ | _ | _ + _ | _ | _ +[1/2][2/6][2/2] = _ | _ | _ + _ | _ | _ +[1/2][2/6][3/3] = _ | _ | _ + _ | _ | _ +[1/2][3/9][0/0] = _ | _ | _ + _ | _ | _ +[1/2][3/9][1/1] = _ | _ | _ + _ | _ | _ +[1/2][3/9][2/2] = _ | _ | _ + _ | _ | _ +[1/2][3/9][3/3] = _ | _ | _ + _ | _ | _ +[2/4][0/0][0/0] = 16 | _ | _ + 20 | _ | _ +[2/4][0/0][1/1] = 17 | _ | _ + 21 | _ | _ +[2/4][0/0][2/2] = 18 | _ | _ + 22 | _ | _ +[2/4][0/0][3/3] = 19 | _ | _ + 23 | _ | _ +[2/4][1/3][0/0] = _ | _ | _ + _ | _ | _ +[2/4][1/3][1/1] = _ | _ | _ + _ | _ | _ +[2/4][1/3][2/2] = _ | _ | _ + _ | _ | _ +[2/4][1/3][3/3] = _ | _ | _ + _ | _ | _ +[2/4][2/6][0/0] = _ | _ | _ + _ | _ | _ +[2/4][2/6][1/1] = _ | _ | _ + _ | _ | _ +[2/4][2/6][2/2] = _ | _ | _ + _ | _ | _ +[2/4][2/6][3/3] = _ | _ | _ + _ | _ | _ +[2/4][3/9][0/0] = _ | _ | _ + _ | _ | _ +[2/4][3/9][1/1] = _ | _ | _ + _ | _ | _ +[2/4][3/9][2/2] = _ | _ | _ + _ | _ | _ +[2/4][3/9][3/3] = _ | _ | _ + _ | _ | _ diff --git a/nczarr_test/ref_ndims.dmp b/nczarr_test/ref_ndims.dmp index 4818ab4ff4..321e13254d 100644 --- a/nczarr_test/ref_ndims.dmp +++ b/nczarr_test/ref_ndims.dmp @@ -7,10 +7,10 @@ rank=4 dims=(8,8,8,8) chunks=(3,3,4,4) [0/0][1/3][0/0][1/4] = 196 197 198 199 204 205 206 207 212 213 214 215 220 221 222 223 260 261 262 263 268 269 270 271 276 277 278 279 284 285 286 287 324 325 326 327 332 333 334 335 340 341 342 343 348 349 350 351 708 709 710 711 716 717 718 719 724 725 726 727 732 733 734 735 772 773 774 775 780 781 782 783 788 789 790 791 796 797 798 799 836 837 838 839 844 845 846 847 852 853 854 855 860 861 862 863 1220 1221 1222 1223 1228 1229 1230 1231 1236 1237 1238 1239 1244 1245 1246 1247 1284 1285 1286 1287 1292 1293 1294 1295 1300 1301 1302 1303 1308 1309 1310 1311 1348 1349 1350 1351 1356 1357 1358 1359 1364 1365 1366 1367 1372 1373 1374 1375 [0/0][1/3][1/4][0/0] = 224 225 226 227 232 233 234 235 240 241 242 243 248 249 250 251 288 289 290 291 296 297 298 299 304 305 306 307 312 313 314 315 352 353 354 355 360 361 362 363 368 369 370 371 376 377 378 379 736 737 738 739 744 745 746 747 752 753 754 755 760 761 762 763 800 801 802 803 808 809 810 811 816 817 818 819 824 825 826 827 864 865 866 867 872 873 874 875 880 881 882 883 888 889 890 891 1248 1249 1250 1251 1256 1257 1258 1259 1264 1265 1266 1267 1272 1273 1274 1275 1312 1313 1314 1315 1320 1321 1322 1323 1328 1329 1330 1331 1336 1337 1338 1339 1376 1377 1378 1379 1384 1385 1386 1387 1392 1393 1394 1395 1400 1401 1402 1403 [0/0][1/3][1/4][1/4] = 228 229 230 231 236 237 238 239 244 245 246 247 252 253 254 255 292 293 294 295 300 301 302 303 308 309 310 311 316 317 318 319 356 357 358 359 364 365 366 367 372 373 374 375 380 381 382 383 740 741 742 743 748 749 750 751 756 757 758 759 764 765 766 767 804 805 806 807 812 813 814 815 820 821 822 823 828 829 830 831 868 869 870 871 876 877 878 879 884 885 886 887 892 893 894 895 1252 1253 1254 1255 1260 1261 1262 1263 1268 1269 1270 1271 1276 1277 1278 1279 1316 1317 1318 1319 1324 1325 1326 1327 1332 1333 1334 1335 1340 1341 1342 1343 1380 1381 1382 1383 1388 1389 1390 1391 1396 1397 1398 1399 1404 1405 1406 1407 -[0/0][2/6][0/0][0/0] = 384 385 386 387 392 393 394 395 400 401 402 403 408 409 410 411 448 449 450 451 456 457 458 459 464 465 466 467 472 473 474 475 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 896 897 898 899 904 905 906 907 912 913 914 915 920 921 922 923 960 961 962 963 968 969 970 971 976 977 978 979 984 985 986 987 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 1408 1409 1410 1411 1416 1417 1418 1419 1424 1425 1426 1427 1432 1433 1434 1435 1472 1473 1474 1475 1480 1481 1482 1483 1488 1489 1490 1491 1496 1497 1498 1499 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[0/0][2/6][0/0][1/4] = 388 389 390 391 396 397 398 399 404 405 406 407 412 413 414 415 452 453 454 455 460 461 462 463 468 469 470 471 476 477 478 479 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 900 901 902 903 908 909 910 911 916 917 918 919 924 925 926 927 964 965 966 967 972 973 974 975 980 981 982 983 988 989 990 991 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 1412 1413 1414 1415 1420 1421 1422 1423 1428 1429 1430 1431 1436 1437 1438 1439 1476 1477 1478 1479 1484 1485 1486 1487 1492 1493 1494 1495 1500 1501 1502 1503 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[0/0][2/6][1/4][0/0] = 416 417 418 419 424 425 426 427 432 433 434 435 440 441 442 443 480 481 482 483 488 489 490 491 496 497 498 499 504 505 506 507 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 928 929 930 931 936 937 938 939 944 945 946 947 952 953 954 955 992 993 994 995 1000 1001 1002 1003 1008 1009 1010 1011 1016 1017 1018 1019 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 1440 1441 1442 1443 1448 1449 1450 1451 1456 1457 1458 1459 1464 1465 1466 1467 1504 1505 1506 1507 1512 1513 1514 1515 1520 1521 1522 1523 1528 1529 1530 1531 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[0/0][2/6][1/4][1/4] = 420 421 422 423 428 429 430 431 436 437 438 439 444 445 446 447 484 485 486 487 492 493 494 495 500 501 502 503 508 509 510 511 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 932 933 934 935 940 941 942 943 948 949 950 951 956 957 958 959 996 997 998 999 1004 1005 1006 1007 1012 1013 1014 1015 1020 1021 1022 1023 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 1444 1445 1446 1447 1452 1453 1454 1455 1460 1461 1462 1463 1468 1469 1470 1471 1508 1509 1510 1511 1516 1517 1518 1519 1524 1525 1526 1527 1532 1533 1534 1535 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 +[0/0][2/6][0/0][0/0] = 384 385 386 387 392 393 394 395 400 401 402 403 408 409 410 411 448 449 450 451 456 457 458 459 464 465 466 467 472 473 474 475 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 896 897 898 899 904 905 906 907 912 913 914 915 920 921 922 923 960 961 962 963 968 969 970 971 976 977 978 979 984 985 986 987 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 1408 1409 1410 1411 1416 1417 1418 1419 1424 1425 1426 1427 1432 1433 1434 1435 1472 1473 1474 1475 1480 1481 1482 1483 1488 1489 1490 1491 1496 1497 1498 1499 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[0/0][2/6][0/0][1/4] = 388 389 390 391 396 397 398 399 404 405 406 407 412 413 414 415 452 453 454 455 460 461 462 463 468 469 470 471 476 477 478 479 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 900 901 902 903 908 909 910 911 916 917 918 919 924 925 926 927 964 965 966 967 972 973 974 975 980 981 982 983 988 989 990 991 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 1412 1413 1414 1415 1420 1421 1422 1423 1428 1429 1430 1431 1436 1437 1438 1439 1476 1477 1478 1479 1484 1485 1486 1487 1492 1493 1494 1495 1500 1501 1502 1503 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[0/0][2/6][1/4][0/0] = 416 417 418 419 424 425 426 427 432 433 434 435 440 441 442 443 480 481 482 483 488 489 490 491 496 497 498 499 504 505 506 507 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 928 929 930 931 936 937 938 939 944 945 946 947 952 953 954 955 992 993 994 995 1000 1001 1002 1003 1008 1009 1010 1011 1016 1017 1018 1019 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 1440 1441 1442 1443 1448 1449 1450 1451 1456 1457 1458 1459 1464 1465 1466 1467 1504 1505 1506 1507 1512 1513 1514 1515 1520 1521 1522 1523 1528 1529 1530 1531 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[0/0][2/6][1/4][1/4] = 420 421 422 423 428 429 430 431 436 437 438 439 444 445 446 447 484 485 486 487 492 493 494 495 500 501 502 503 508 509 510 511 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 932 933 934 935 940 941 942 943 948 949 950 951 956 957 958 959 996 997 998 999 1004 1005 1006 1007 1012 1013 1014 1015 1020 1021 1022 1023 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 1444 1445 1446 1447 1452 1453 1454 1455 1460 1461 1462 1463 1468 1469 1470 1471 1508 1509 1510 1511 1516 1517 1518 1519 1524 1525 1526 1527 1532 1533 1534 1535 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ [1/3][0/0][0/0][0/0] = 1536 1537 1538 1539 1544 1545 1546 1547 1552 1553 1554 1555 1560 1561 1562 1563 1600 1601 1602 1603 1608 1609 1610 1611 1616 1617 1618 1619 1624 1625 1626 1627 1664 1665 1666 1667 1672 1673 1674 1675 1680 1681 1682 1683 1688 1689 1690 1691 2048 2049 2050 2051 2056 2057 2058 2059 2064 2065 2066 2067 2072 2073 2074 2075 2112 2113 2114 2115 2120 2121 2122 2123 2128 2129 2130 2131 2136 2137 2138 2139 2176 2177 2178 2179 2184 2185 2186 2187 2192 2193 2194 2195 2200 2201 2202 2203 2560 2561 2562 2563 2568 2569 2570 2571 2576 2577 2578 2579 2584 2585 2586 2587 2624 2625 2626 2627 2632 2633 2634 2635 2640 2641 2642 2643 2648 2649 2650 2651 2688 2689 2690 2691 2696 2697 2698 2699 2704 2705 2706 2707 2712 2713 2714 2715 [1/3][0/0][0/0][1/4] = 1540 1541 1542 1543 1548 1549 1550 1551 1556 1557 1558 1559 1564 1565 1566 1567 1604 1605 1606 1607 1612 1613 1614 1615 1620 1621 1622 1623 1628 1629 1630 1631 1668 1669 1670 1671 1676 1677 1678 1679 1684 1685 1686 1687 1692 1693 1694 1695 2052 2053 2054 2055 2060 2061 2062 2063 2068 2069 2070 2071 2076 2077 2078 2079 2116 2117 2118 2119 2124 2125 2126 2127 2132 2133 2134 2135 2140 2141 2142 2143 2180 2181 2182 2183 2188 2189 2190 2191 2196 2197 2198 2199 2204 2205 2206 2207 2564 2565 2566 2567 2572 2573 2574 2575 2580 2581 2582 2583 2588 2589 2590 2591 2628 2629 2630 2631 2636 2637 2638 2639 2644 2645 2646 2647 2652 2653 2654 2655 2692 2693 2694 2695 2700 2701 2702 2703 2708 2709 2710 2711 2716 2717 2718 2719 [1/3][0/0][1/4][0/0] = 1568 1569 1570 1571 1576 1577 1578 1579 1584 1585 1586 1587 1592 1593 1594 1595 1632 1633 1634 1635 1640 1641 1642 1643 1648 1649 1650 1651 1656 1657 1658 1659 1696 1697 1698 1699 1704 1705 1706 1707 1712 1713 1714 1715 1720 1721 1722 1723 2080 2081 2082 2083 2088 2089 2090 2091 2096 2097 2098 2099 2104 2105 2106 2107 2144 2145 2146 2147 2152 2153 2154 2155 2160 2161 2162 2163 2168 2169 2170 2171 2208 2209 2210 2211 2216 2217 2218 2219 2224 2225 2226 2227 2232 2233 2234 2235 2592 2593 2594 2595 2600 2601 2602 2603 2608 2609 2610 2611 2616 2617 2618 2619 2656 2657 2658 2659 2664 2665 2666 2667 2672 2673 2674 2675 2680 2681 2682 2683 2720 2721 2722 2723 2728 2729 2730 2731 2736 2737 2738 2739 2744 2745 2746 2747 @@ -19,19 +19,19 @@ rank=4 dims=(8,8,8,8) chunks=(3,3,4,4) [1/3][1/3][0/0][1/4] = 1732 1733 1734 1735 1740 1741 1742 1743 1748 1749 1750 1751 1756 1757 1758 1759 1796 1797 1798 1799 1804 1805 1806 1807 1812 1813 1814 1815 1820 1821 1822 1823 1860 1861 1862 1863 1868 1869 1870 1871 1876 1877 1878 1879 1884 1885 1886 1887 2244 2245 2246 2247 2252 2253 2254 2255 2260 2261 2262 2263 2268 2269 2270 2271 2308 2309 2310 2311 2316 2317 2318 2319 2324 2325 2326 2327 2332 2333 2334 2335 2372 2373 2374 2375 2380 2381 2382 2383 2388 2389 2390 2391 2396 2397 2398 2399 2756 2757 2758 2759 2764 2765 2766 2767 2772 2773 2774 2775 2780 2781 2782 2783 2820 2821 2822 2823 2828 2829 2830 2831 2836 2837 2838 2839 2844 2845 2846 2847 2884 2885 2886 2887 2892 2893 2894 2895 2900 2901 2902 2903 2908 2909 2910 2911 [1/3][1/3][1/4][0/0] = 1760 1761 1762 1763 1768 1769 1770 1771 1776 1777 1778 1779 1784 1785 1786 1787 1824 1825 1826 1827 1832 1833 1834 1835 1840 1841 1842 1843 1848 1849 1850 1851 1888 1889 1890 1891 1896 1897 1898 1899 1904 1905 1906 1907 1912 1913 1914 1915 2272 2273 2274 2275 2280 2281 2282 2283 2288 2289 2290 2291 2296 2297 2298 2299 2336 2337 2338 2339 2344 2345 2346 2347 2352 2353 2354 2355 2360 2361 2362 2363 2400 2401 2402 2403 2408 2409 2410 2411 2416 2417 2418 2419 2424 2425 2426 2427 2784 2785 2786 2787 2792 2793 2794 2795 2800 2801 2802 2803 2808 2809 2810 2811 2848 2849 2850 2851 2856 2857 2858 2859 2864 2865 2866 2867 2872 2873 2874 2875 2912 2913 2914 2915 2920 2921 2922 2923 2928 2929 2930 2931 2936 2937 2938 2939 [1/3][1/3][1/4][1/4] = 1764 1765 1766 1767 1772 1773 1774 1775 1780 1781 1782 1783 1788 1789 1790 1791 1828 1829 1830 1831 1836 1837 1838 1839 1844 1845 1846 1847 1852 1853 1854 1855 1892 1893 1894 1895 1900 1901 1902 1903 1908 1909 1910 1911 1916 1917 1918 1919 2276 2277 2278 2279 2284 2285 2286 2287 2292 2293 2294 2295 2300 2301 2302 2303 2340 2341 2342 2343 2348 2349 2350 2351 2356 2357 2358 2359 2364 2365 2366 2367 2404 2405 2406 2407 2412 2413 2414 2415 2420 2421 2422 2423 2428 2429 2430 2431 2788 2789 2790 2791 2796 2797 2798 2799 2804 2805 2806 2807 2812 2813 2814 2815 2852 2853 2854 2855 2860 2861 2862 2863 2868 2869 2870 2871 2876 2877 2878 2879 2916 2917 2918 2919 2924 2925 2926 2927 2932 2933 2934 2935 2940 2941 2942 2943 -[1/3][2/6][0/0][0/0] = 1920 1921 1922 1923 1928 1929 1930 1931 1936 1937 1938 1939 1944 1945 1946 1947 1984 1985 1986 1987 1992 1993 1994 1995 2000 2001 2002 2003 2008 2009 2010 2011 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 2432 2433 2434 2435 2440 2441 2442 2443 2448 2449 2450 2451 2456 2457 2458 2459 2496 2497 2498 2499 2504 2505 2506 2507 2512 2513 2514 2515 2520 2521 2522 2523 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 2944 2945 2946 2947 2952 2953 2954 2955 2960 2961 2962 2963 2968 2969 2970 2971 3008 3009 3010 3011 3016 3017 3018 3019 3024 3025 3026 3027 3032 3033 3034 3035 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[1/3][2/6][0/0][1/4] = 1924 1925 1926 1927 1932 1933 1934 1935 1940 1941 1942 1943 1948 1949 1950 1951 1988 1989 1990 1991 1996 1997 1998 1999 2004 2005 2006 2007 2012 2013 2014 2015 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 2436 2437 2438 2439 2444 2445 2446 2447 2452 2453 2454 2455 2460 2461 2462 2463 2500 2501 2502 2503 2508 2509 2510 2511 2516 2517 2518 2519 2524 2525 2526 2527 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 2948 2949 2950 2951 2956 2957 2958 2959 2964 2965 2966 2967 2972 2973 2974 2975 3012 3013 3014 3015 3020 3021 3022 3023 3028 3029 3030 3031 3036 3037 3038 3039 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[1/3][2/6][1/4][0/0] = 1952 1953 1954 1955 1960 1961 1962 1963 1968 1969 1970 1971 1976 1977 1978 1979 2016 2017 2018 2019 2024 2025 2026 2027 2032 2033 2034 2035 2040 2041 2042 2043 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 2464 2465 2466 2467 2472 2473 2474 2475 2480 2481 2482 2483 2488 2489 2490 2491 2528 2529 2530 2531 2536 2537 2538 2539 2544 2545 2546 2547 2552 2553 2554 2555 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 2976 2977 2978 2979 2984 2985 2986 2987 2992 2993 2994 2995 3000 3001 3002 3003 3040 3041 3042 3043 3048 3049 3050 3051 3056 3057 3058 3059 3064 3065 3066 3067 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[1/3][2/6][1/4][1/4] = 1956 1957 1958 1959 1964 1965 1966 1967 1972 1973 1974 1975 1980 1981 1982 1983 2020 2021 2022 2023 2028 2029 2030 2031 2036 2037 2038 2039 2044 2045 2046 2047 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 2468 2469 2470 2471 2476 2477 2478 2479 2484 2485 2486 2487 2492 2493 2494 2495 2532 2533 2534 2535 2540 2541 2542 2543 2548 2549 2550 2551 2556 2557 2558 2559 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 2980 2981 2982 2983 2988 2989 2990 2991 2996 2997 2998 2999 3004 3005 3006 3007 3044 3045 3046 3047 3052 3053 3054 3055 3060 3061 3062 3063 3068 3069 3070 3071 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[2/6][0/0][0/0][0/0] = 3072 3073 3074 3075 3080 3081 3082 3083 3088 3089 3090 3091 3096 3097 3098 3099 3136 3137 3138 3139 3144 3145 3146 3147 3152 3153 3154 3155 3160 3161 3162 3163 3200 3201 3202 3203 3208 3209 3210 3211 3216 3217 3218 3219 3224 3225 3226 3227 3584 3585 3586 3587 3592 3593 3594 3595 3600 3601 3602 3603 3608 3609 3610 3611 3648 3649 3650 3651 3656 3657 3658 3659 3664 3665 3666 3667 3672 3673 3674 3675 3712 3713 3714 3715 3720 3721 3722 3723 3728 3729 3730 3731 3736 3737 3738 3739 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[2/6][0/0][0/0][1/4] = 3076 3077 3078 3079 3084 3085 3086 3087 3092 3093 3094 3095 3100 3101 3102 3103 3140 3141 3142 3143 3148 3149 3150 3151 3156 3157 3158 3159 3164 3165 3166 3167 3204 3205 3206 3207 3212 3213 3214 3215 3220 3221 3222 3223 3228 3229 3230 3231 3588 3589 3590 3591 3596 3597 3598 3599 3604 3605 3606 3607 3612 3613 3614 3615 3652 3653 3654 3655 3660 3661 3662 3663 3668 3669 3670 3671 3676 3677 3678 3679 3716 3717 3718 3719 3724 3725 3726 3727 3732 3733 3734 3735 3740 3741 3742 3743 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[2/6][0/0][1/4][0/0] = 3104 3105 3106 3107 3112 3113 3114 3115 3120 3121 3122 3123 3128 3129 3130 3131 3168 3169 3170 3171 3176 3177 3178 3179 3184 3185 3186 3187 3192 3193 3194 3195 3232 3233 3234 3235 3240 3241 3242 3243 3248 3249 3250 3251 3256 3257 3258 3259 3616 3617 3618 3619 3624 3625 3626 3627 3632 3633 3634 3635 3640 3641 3642 3643 3680 3681 3682 3683 3688 3689 3690 3691 3696 3697 3698 3699 3704 3705 3706 3707 3744 3745 3746 3747 3752 3753 3754 3755 3760 3761 3762 3763 3768 3769 3770 3771 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[2/6][0/0][1/4][1/4] = 3108 3109 3110 3111 3116 3117 3118 3119 3124 3125 3126 3127 3132 3133 3134 3135 3172 3173 3174 3175 3180 3181 3182 3183 3188 3189 3190 3191 3196 3197 3198 3199 3236 3237 3238 3239 3244 3245 3246 3247 3252 3253 3254 3255 3260 3261 3262 3263 3620 3621 3622 3623 3628 3629 3630 3631 3636 3637 3638 3639 3644 3645 3646 3647 3684 3685 3686 3687 3692 3693 3694 3695 3700 3701 3702 3703 3708 3709 3710 3711 3748 3749 3750 3751 3756 3757 3758 3759 3764 3765 3766 3767 3772 3773 3774 3775 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[2/6][1/3][0/0][0/0] = 3264 3265 3266 3267 3272 3273 3274 3275 3280 3281 3282 3283 3288 3289 3290 3291 3328 3329 3330 3331 3336 3337 3338 3339 3344 3345 3346 3347 3352 3353 3354 3355 3392 3393 3394 3395 3400 3401 3402 3403 3408 3409 3410 3411 3416 3417 3418 3419 3776 3777 3778 3779 3784 3785 3786 3787 3792 3793 3794 3795 3800 3801 3802 3803 3840 3841 3842 3843 3848 3849 3850 3851 3856 3857 3858 3859 3864 3865 3866 3867 3904 3905 3906 3907 3912 3913 3914 3915 3920 3921 3922 3923 3928 3929 3930 3931 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[2/6][1/3][0/0][1/4] = 3268 3269 3270 3271 3276 3277 3278 3279 3284 3285 3286 3287 3292 3293 3294 3295 3332 3333 3334 3335 3340 3341 3342 3343 3348 3349 3350 3351 3356 3357 3358 3359 3396 3397 3398 3399 3404 3405 3406 3407 3412 3413 3414 3415 3420 3421 3422 3423 3780 3781 3782 3783 3788 3789 3790 3791 3796 3797 3798 3799 3804 3805 3806 3807 3844 3845 3846 3847 3852 3853 3854 3855 3860 3861 3862 3863 3868 3869 3870 3871 3908 3909 3910 3911 3916 3917 3918 3919 3924 3925 3926 3927 3932 3933 3934 3935 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[2/6][1/3][1/4][0/0] = 3296 3297 3298 3299 3304 3305 3306 3307 3312 3313 3314 3315 3320 3321 3322 3323 3360 3361 3362 3363 3368 3369 3370 3371 3376 3377 3378 3379 3384 3385 3386 3387 3424 3425 3426 3427 3432 3433 3434 3435 3440 3441 3442 3443 3448 3449 3450 3451 3808 3809 3810 3811 3816 3817 3818 3819 3824 3825 3826 3827 3832 3833 3834 3835 3872 3873 3874 3875 3880 3881 3882 3883 3888 3889 3890 3891 3896 3897 3898 3899 3936 3937 3938 3939 3944 3945 3946 3947 3952 3953 3954 3955 3960 3961 3962 3963 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[2/6][1/3][1/4][1/4] = 3300 3301 3302 3303 3308 3309 3310 3311 3316 3317 3318 3319 3324 3325 3326 3327 3364 3365 3366 3367 3372 3373 3374 3375 3380 3381 3382 3383 3388 3389 3390 3391 3428 3429 3430 3431 3436 3437 3438 3439 3444 3445 3446 3447 3452 3453 3454 3455 3812 3813 3814 3815 3820 3821 3822 3823 3828 3829 3830 3831 3836 3837 3838 3839 3876 3877 3878 3879 3884 3885 3886 3887 3892 3893 3894 3895 3900 3901 3902 3903 3940 3941 3942 3943 3948 3949 3950 3951 3956 3957 3958 3959 3964 3965 3966 3967 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[2/6][2/6][0/0][0/0] = 3456 3457 3458 3459 3464 3465 3466 3467 3472 3473 3474 3475 3480 3481 3482 3483 3520 3521 3522 3523 3528 3529 3530 3531 3536 3537 3538 3539 3544 3545 3546 3547 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 3968 3969 3970 3971 3976 3977 3978 3979 3984 3985 3986 3987 3992 3993 3994 3995 4032 4033 4034 4035 4040 4041 4042 4043 4048 4049 4050 4051 4056 4057 4058 4059 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[2/6][2/6][0/0][1/4] = 3460 3461 3462 3463 3468 3469 3470 3471 3476 3477 3478 3479 3484 3485 3486 3487 3524 3525 3526 3527 3532 3533 3534 3535 3540 3541 3542 3543 3548 3549 3550 3551 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 3972 3973 3974 3975 3980 3981 3982 3983 3988 3989 3990 3991 3996 3997 3998 3999 4036 4037 4038 4039 4044 4045 4046 4047 4052 4053 4054 4055 4060 4061 4062 4063 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[2/6][2/6][1/4][0/0] = 3488 3489 3490 3491 3496 3497 3498 3499 3504 3505 3506 3507 3512 3513 3514 3515 3552 3553 3554 3555 3560 3561 3562 3563 3568 3569 3570 3571 3576 3577 3578 3579 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 4000 4001 4002 4003 4008 4009 4010 4011 4016 4017 4018 4019 4024 4025 4026 4027 4064 4065 4066 4067 4072 4073 4074 4075 4080 4081 4082 4083 4088 4089 4090 4091 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -[2/6][2/6][1/4][1/4] = 3492 3493 3494 3495 3500 3501 3502 3503 3508 3509 3510 3511 3516 3517 3518 3519 3556 3557 3558 3559 3564 3565 3566 3567 3572 3573 3574 3575 3580 3581 3582 3583 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 4004 4005 4006 4007 4012 4013 4014 4015 4020 4021 4022 4023 4028 4029 4030 4031 4068 4069 4070 4071 4076 4077 4078 4079 4084 4085 4086 4087 4092 4093 4094 4095 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 +[1/3][2/6][0/0][0/0] = 1920 1921 1922 1923 1928 1929 1930 1931 1936 1937 1938 1939 1944 1945 1946 1947 1984 1985 1986 1987 1992 1993 1994 1995 2000 2001 2002 2003 2008 2009 2010 2011 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 2432 2433 2434 2435 2440 2441 2442 2443 2448 2449 2450 2451 2456 2457 2458 2459 2496 2497 2498 2499 2504 2505 2506 2507 2512 2513 2514 2515 2520 2521 2522 2523 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 2944 2945 2946 2947 2952 2953 2954 2955 2960 2961 2962 2963 2968 2969 2970 2971 3008 3009 3010 3011 3016 3017 3018 3019 3024 3025 3026 3027 3032 3033 3034 3035 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[1/3][2/6][0/0][1/4] = 1924 1925 1926 1927 1932 1933 1934 1935 1940 1941 1942 1943 1948 1949 1950 1951 1988 1989 1990 1991 1996 1997 1998 1999 2004 2005 2006 2007 2012 2013 2014 2015 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 2436 2437 2438 2439 2444 2445 2446 2447 2452 2453 2454 2455 2460 2461 2462 2463 2500 2501 2502 2503 2508 2509 2510 2511 2516 2517 2518 2519 2524 2525 2526 2527 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 2948 2949 2950 2951 2956 2957 2958 2959 2964 2965 2966 2967 2972 2973 2974 2975 3012 3013 3014 3015 3020 3021 3022 3023 3028 3029 3030 3031 3036 3037 3038 3039 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[1/3][2/6][1/4][0/0] = 1952 1953 1954 1955 1960 1961 1962 1963 1968 1969 1970 1971 1976 1977 1978 1979 2016 2017 2018 2019 2024 2025 2026 2027 2032 2033 2034 2035 2040 2041 2042 2043 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 2464 2465 2466 2467 2472 2473 2474 2475 2480 2481 2482 2483 2488 2489 2490 2491 2528 2529 2530 2531 2536 2537 2538 2539 2544 2545 2546 2547 2552 2553 2554 2555 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 2976 2977 2978 2979 2984 2985 2986 2987 2992 2993 2994 2995 3000 3001 3002 3003 3040 3041 3042 3043 3048 3049 3050 3051 3056 3057 3058 3059 3064 3065 3066 3067 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[1/3][2/6][1/4][1/4] = 1956 1957 1958 1959 1964 1965 1966 1967 1972 1973 1974 1975 1980 1981 1982 1983 2020 2021 2022 2023 2028 2029 2030 2031 2036 2037 2038 2039 2044 2045 2046 2047 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 2468 2469 2470 2471 2476 2477 2478 2479 2484 2485 2486 2487 2492 2493 2494 2495 2532 2533 2534 2535 2540 2541 2542 2543 2548 2549 2550 2551 2556 2557 2558 2559 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 2980 2981 2982 2983 2988 2989 2990 2991 2996 2997 2998 2999 3004 3005 3006 3007 3044 3045 3046 3047 3052 3053 3054 3055 3060 3061 3062 3063 3068 3069 3070 3071 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[2/6][0/0][0/0][0/0] = 3072 3073 3074 3075 3080 3081 3082 3083 3088 3089 3090 3091 3096 3097 3098 3099 3136 3137 3138 3139 3144 3145 3146 3147 3152 3153 3154 3155 3160 3161 3162 3163 3200 3201 3202 3203 3208 3209 3210 3211 3216 3217 3218 3219 3224 3225 3226 3227 3584 3585 3586 3587 3592 3593 3594 3595 3600 3601 3602 3603 3608 3609 3610 3611 3648 3649 3650 3651 3656 3657 3658 3659 3664 3665 3666 3667 3672 3673 3674 3675 3712 3713 3714 3715 3720 3721 3722 3723 3728 3729 3730 3731 3736 3737 3738 3739 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[2/6][0/0][0/0][1/4] = 3076 3077 3078 3079 3084 3085 3086 3087 3092 3093 3094 3095 3100 3101 3102 3103 3140 3141 3142 3143 3148 3149 3150 3151 3156 3157 3158 3159 3164 3165 3166 3167 3204 3205 3206 3207 3212 3213 3214 3215 3220 3221 3222 3223 3228 3229 3230 3231 3588 3589 3590 3591 3596 3597 3598 3599 3604 3605 3606 3607 3612 3613 3614 3615 3652 3653 3654 3655 3660 3661 3662 3663 3668 3669 3670 3671 3676 3677 3678 3679 3716 3717 3718 3719 3724 3725 3726 3727 3732 3733 3734 3735 3740 3741 3742 3743 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[2/6][0/0][1/4][0/0] = 3104 3105 3106 3107 3112 3113 3114 3115 3120 3121 3122 3123 3128 3129 3130 3131 3168 3169 3170 3171 3176 3177 3178 3179 3184 3185 3186 3187 3192 3193 3194 3195 3232 3233 3234 3235 3240 3241 3242 3243 3248 3249 3250 3251 3256 3257 3258 3259 3616 3617 3618 3619 3624 3625 3626 3627 3632 3633 3634 3635 3640 3641 3642 3643 3680 3681 3682 3683 3688 3689 3690 3691 3696 3697 3698 3699 3704 3705 3706 3707 3744 3745 3746 3747 3752 3753 3754 3755 3760 3761 3762 3763 3768 3769 3770 3771 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[2/6][0/0][1/4][1/4] = 3108 3109 3110 3111 3116 3117 3118 3119 3124 3125 3126 3127 3132 3133 3134 3135 3172 3173 3174 3175 3180 3181 3182 3183 3188 3189 3190 3191 3196 3197 3198 3199 3236 3237 3238 3239 3244 3245 3246 3247 3252 3253 3254 3255 3260 3261 3262 3263 3620 3621 3622 3623 3628 3629 3630 3631 3636 3637 3638 3639 3644 3645 3646 3647 3684 3685 3686 3687 3692 3693 3694 3695 3700 3701 3702 3703 3708 3709 3710 3711 3748 3749 3750 3751 3756 3757 3758 3759 3764 3765 3766 3767 3772 3773 3774 3775 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[2/6][1/3][0/0][0/0] = 3264 3265 3266 3267 3272 3273 3274 3275 3280 3281 3282 3283 3288 3289 3290 3291 3328 3329 3330 3331 3336 3337 3338 3339 3344 3345 3346 3347 3352 3353 3354 3355 3392 3393 3394 3395 3400 3401 3402 3403 3408 3409 3410 3411 3416 3417 3418 3419 3776 3777 3778 3779 3784 3785 3786 3787 3792 3793 3794 3795 3800 3801 3802 3803 3840 3841 3842 3843 3848 3849 3850 3851 3856 3857 3858 3859 3864 3865 3866 3867 3904 3905 3906 3907 3912 3913 3914 3915 3920 3921 3922 3923 3928 3929 3930 3931 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[2/6][1/3][0/0][1/4] = 3268 3269 3270 3271 3276 3277 3278 3279 3284 3285 3286 3287 3292 3293 3294 3295 3332 3333 3334 3335 3340 3341 3342 3343 3348 3349 3350 3351 3356 3357 3358 3359 3396 3397 3398 3399 3404 3405 3406 3407 3412 3413 3414 3415 3420 3421 3422 3423 3780 3781 3782 3783 3788 3789 3790 3791 3796 3797 3798 3799 3804 3805 3806 3807 3844 3845 3846 3847 3852 3853 3854 3855 3860 3861 3862 3863 3868 3869 3870 3871 3908 3909 3910 3911 3916 3917 3918 3919 3924 3925 3926 3927 3932 3933 3934 3935 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[2/6][1/3][1/4][0/0] = 3296 3297 3298 3299 3304 3305 3306 3307 3312 3313 3314 3315 3320 3321 3322 3323 3360 3361 3362 3363 3368 3369 3370 3371 3376 3377 3378 3379 3384 3385 3386 3387 3424 3425 3426 3427 3432 3433 3434 3435 3440 3441 3442 3443 3448 3449 3450 3451 3808 3809 3810 3811 3816 3817 3818 3819 3824 3825 3826 3827 3832 3833 3834 3835 3872 3873 3874 3875 3880 3881 3882 3883 3888 3889 3890 3891 3896 3897 3898 3899 3936 3937 3938 3939 3944 3945 3946 3947 3952 3953 3954 3955 3960 3961 3962 3963 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[2/6][1/3][1/4][1/4] = 3300 3301 3302 3303 3308 3309 3310 3311 3316 3317 3318 3319 3324 3325 3326 3327 3364 3365 3366 3367 3372 3373 3374 3375 3380 3381 3382 3383 3388 3389 3390 3391 3428 3429 3430 3431 3436 3437 3438 3439 3444 3445 3446 3447 3452 3453 3454 3455 3812 3813 3814 3815 3820 3821 3822 3823 3828 3829 3830 3831 3836 3837 3838 3839 3876 3877 3878 3879 3884 3885 3886 3887 3892 3893 3894 3895 3900 3901 3902 3903 3940 3941 3942 3943 3948 3949 3950 3951 3956 3957 3958 3959 3964 3965 3966 3967 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[2/6][2/6][0/0][0/0] = 3456 3457 3458 3459 3464 3465 3466 3467 3472 3473 3474 3475 3480 3481 3482 3483 3520 3521 3522 3523 3528 3529 3530 3531 3536 3537 3538 3539 3544 3545 3546 3547 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 3968 3969 3970 3971 3976 3977 3978 3979 3984 3985 3986 3987 3992 3993 3994 3995 4032 4033 4034 4035 4040 4041 4042 4043 4048 4049 4050 4051 4056 4057 4058 4059 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[2/6][2/6][0/0][1/4] = 3460 3461 3462 3463 3468 3469 3470 3471 3476 3477 3478 3479 3484 3485 3486 3487 3524 3525 3526 3527 3532 3533 3534 3535 3540 3541 3542 3543 3548 3549 3550 3551 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 3972 3973 3974 3975 3980 3981 3982 3983 3988 3989 3990 3991 3996 3997 3998 3999 4036 4037 4038 4039 4044 4045 4046 4047 4052 4053 4054 4055 4060 4061 4062 4063 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[2/6][2/6][1/4][0/0] = 3488 3489 3490 3491 3496 3497 3498 3499 3504 3505 3506 3507 3512 3513 3514 3515 3552 3553 3554 3555 3560 3561 3562 3563 3568 3569 3570 3571 3576 3577 3578 3579 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 4000 4001 4002 4003 4008 4009 4010 4011 4016 4017 4018 4019 4024 4025 4026 4027 4064 4065 4066 4067 4072 4073 4074 4075 4080 4081 4082 4083 4088 4089 4090 4091 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ +[2/6][2/6][1/4][1/4] = 3492 3493 3494 3495 3500 3501 3502 3503 3508 3509 3510 3511 3516 3517 3518 3519 3556 3557 3558 3559 3564 3565 3566 3567 3572 3573 3574 3575 3580 3581 3582 3583 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 4004 4005 4006 4007 4012 4013 4014 4015 4020 4021 4022 4023 4028 4029 4030 4031 4068 4069 4070 4071 4076 4077 4078 4079 4084 4085 4086 4087 4092 4093 4094 4095 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ diff --git a/nczarr_test/ref_rem.dmp b/nczarr_test/ref_rem.dmp index 1674a35e11..4fa2c87851 100644 --- a/nczarr_test/ref_rem.dmp +++ b/nczarr_test/ref_rem.dmp @@ -1,10 +1,28 @@ rank=2 dims=(8,8) chunks=(3,3) -[0/0][0/0] = 00 01 02 | 08 09 10 | 16 17 18 -[0/0][1/3] = 03 04 05 | 11 12 13 | 19 20 21 -[0/0][2/6] = 06 07 -1 | 14 15 -1 | 22 23 -1 -[1/3][0/0] = 24 25 26 | 32 33 34 | 40 41 42 -[1/3][1/3] = 27 28 29 | 35 36 37 | 43 44 45 -[1/3][2/6] = 30 31 -1 | 38 39 -1 | 46 47 -1 -[2/6][0/0] = 48 49 50 | 56 57 58 | -1 -1 -1 -[2/6][1/3] = 51 52 53 | 59 60 61 | -1 -1 -1 -[2/6][2/6] = 54 55 -1 | 62 63 -1 | -1 -1 -1 +[0/0][0/0] = 00 01 02 + 08 09 10 + 16 17 18 +[0/0][1/3] = 03 04 05 + 11 12 13 + 19 20 21 +[0/0][2/6] = 06 07 _ + 14 15 _ + 22 23 _ +[1/3][0/0] = 24 25 26 + 32 33 34 + 40 41 42 +[1/3][1/3] = 27 28 29 + 35 36 37 + 43 44 45 +[1/3][2/6] = 30 31 _ + 38 39 _ + 46 47 _ +[2/6][0/0] = 48 49 50 + 56 57 58 + _ _ _ +[2/6][1/3] = 51 52 53 + 59 60 61 + _ _ _ +[2/6][2/6] = 54 55 _ + 62 63 _ + _ _ _ diff --git a/nczarr_test/ref_zarr_test_data.cdl.gz b/nczarr_test/ref_zarr_test_data.cdl.gz index 85ab24c24e..5f1f2dee21 100644 Binary files a/nczarr_test/ref_zarr_test_data.cdl.gz and b/nczarr_test/ref_zarr_test_data.cdl.gz differ diff --git a/nczarr_test/ref_zarr_test_data_2d.cdl.gz b/nczarr_test/ref_zarr_test_data_2d.cdl.gz new file mode 100644 index 0000000000..e8d2e5eed0 Binary files /dev/null and b/nczarr_test/ref_zarr_test_data_2d.cdl.gz differ diff --git a/nczarr_test/run_cachetest.sh b/nczarr_test/run_cachetest.sh index fbd446dae5..80562bbd71 100755 --- a/nczarr_test/run_cachetest.sh +++ b/nczarr_test/run_cachetest.sh @@ -36,5 +36,3 @@ ${execdir}/test_readcaching testcase file if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcase zip; fi if test "x$FEATURE_S3TESTS" = xyes ; then testcase s3; fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_chunkcases.sh b/nczarr_test/run_chunkcases.sh index 02adbef9bd..492c8f8b23 100755 --- a/nczarr_test/run_chunkcases.sh +++ b/nczarr_test/run_chunkcases.sh @@ -21,7 +21,7 @@ s3isolate "testdir_chunkcases" THISDIR=`pwd` cd $ISOPATH -TC="${execdir}/tst_chunkcases -4" +TC="${execdir}/test_chunkcases -4" ZM="${execdir}/zmapio -t int" remfile() { @@ -58,25 +58,6 @@ makefile() { testcasesxfail() { zext=$1 -echo ""; echo "*** XFAIL Test format $1" -# Test whole chunk write and read -echo "Test whole chunk write then read" -makefile tmp_xwhole -rm -f tmp_xwhole_${zext}.txt tmp_xwhole_${zext}.cdl tmp_xerr_${zext}.txt -# This should fail -if ! $TC -d 8,8 -c 4,4 -f 4,3 -e 4,4 -X w -OWw $F >> tmp_xerr_${zext}.txt ; then -echo "XFAIL: wholechunk with bad -f" -else -echo "Unexpected PASS: wholechunk with bad -f" -exit 1 -fi -remfile $file -if ! $TC -d 8,8 -c 4,4 -f 4,4 -e 1,4 -X w -OWw $F >> tmp_xerr_${zext}.txt ; then -echo "XFAIL: wholechunk with bad -e" -else -echo "Unexpected PASS: wholechunk with bad -e" -exit 1 -fi } # testcasesxfail() testcasespass() { @@ -86,8 +67,8 @@ makefile tmp_whole rm -f tmp_whole_${zext}.txt tmp_whole_${zext}.cdl tmp_err_${zext}.txt makefile tmp_whole # This should succeed -$TC -d 8,8 -c 4,4 -f 4,4 -e 4,4 -X w -OWw $F -$TC -d 8,8 -c 4,4 -f 4,4 -e 4,4 -X w -OWr $F > tmp_whole_${zext}.txt +$TC -d 8,8 -c 4,4 -s 4,4 -e 4,4 -OWcw $F +$TC -d 8,8 -c 4,4 -s 4,4 -e 4,4 -vn -OWr $F > tmp_whole_${zext}.txt diff -b ${srcdir}/ref_whole.txt tmp_whole_${zext}.txt ${NCDUMP} $F > tmp_whole_${zext}.cdl diff -b ${srcdir}/ref_whole.cdl tmp_whole_${zext}.cdl @@ -96,8 +77,8 @@ diff -b ${srcdir}/ref_whole.cdl tmp_whole_${zext}.cdl echo "Test chunk skipping during read" makefile tmp_skip rm -f tmp_skip_${zext}.txt tmp_skip_${zext}.cdl -$TC -d 6,6 -c 2,2 -Ow $F -$TC -s 5,5 -p 6,6 -Or $F > tmp_skip_${zext}.txt +$TC -d 6,6 -c 2,2 -s 0,0 -e 6,6 -Ocw $F +$TC -i 5,5 -p 6,6 -Or $F > tmp_skip_${zext}.txt ${NCDUMP} $F > tmp_skip_${zext}.cdl diff -b ${srcdir}/ref_skip.txt tmp_skip_${zext}.txt diff -b ${srcdir}/ref_skip.cdl tmp_skip_${zext}.cdl @@ -105,14 +86,14 @@ diff -b ${srcdir}/ref_skip.cdl tmp_skip_${zext}.cdl echo "Test chunk skipping during write" makefile tmp_skipw rm -f tmp_skipw_${zext}.cdl -$TC -d 6,6 -s 5,5 -p 6,6 -Ow $F +$TC -d 6,6 -c 2,2 -i 5,5 -p 6,6 -Ocw $F ${NCDUMP} $F > tmp_skipw_${zext}.cdl diff -b ${srcdir}/ref_skipw.cdl tmp_skipw_${zext}.cdl echo "Test dimlen % chunklen != 0" makefile tmp_rem rm -f tmp_rem_${zext}.txt tmp_rem_${zext}.cdl -$TC -d 8,8 -c 3,3 -Ow $F +$TC -d 8,8 -c 3,3 -s 0,0 -p 8,8 -Ocw $F ${NCDUMP} $F > tmp_rem_${zext}.cdl diff -b ${srcdir}/ref_rem.cdl tmp_rem_${zext}.cdl ${execdir}/ncdumpchunks -v v $F > tmp_rem_${zext}.txt @@ -121,7 +102,7 @@ diff -b ${srcdir}/ref_rem.dmp tmp_rem_${zext}.txt echo "Test rank > 2" makefile tmp_ndims rm -f tmp_ndims_${zext}.txt tmp_ndims_${zext}.cdl -$TC -d 8,8,8,8 -c 3,3,4,4 -Ow $F +$TC -d 8,8,8,8 -c 3,3,4,4 -s 0,0,0,0 -p 8,8,8,8 -Ocw $F ${NCDUMP} $F > tmp_ndims_${zext}.cdl diff -b ${srcdir}/ref_ndims.cdl tmp_ndims_${zext}.cdl ${execdir}/ncdumpchunks -v v $F > tmp_ndims_${zext}.dmp @@ -130,10 +111,7 @@ diff -b ${srcdir}/ref_ndims.dmp tmp_ndims_${zext}.dmp echo "Test miscellaneous 1" makefile tmp_misc1 rm -f tmp_misc1_${zext}.txt tmp_misc1_${zext}.cdl -$TC -d 6,12,4 -c 2,3,1 -f 0,0,0 -e 6,1,4 -Ow $F -if test "x$FEATURE_S3TESTS" = xyes ; then -${S3UTIL} -u 'https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data' -k '/netcdf-c' list -fi +$TC -d 6,12,4 -c 2,3,1 -s 0,0,0 -e 6,1,4 -Ocw $F ${NCDUMP} $F > tmp_misc1_${zext}.cdl diff -b ${srcdir}/ref_misc1.cdl tmp_misc1_${zext}.cdl ${execdir}/ncdumpchunks -v v $F > tmp_misc1_${zext}.dmp @@ -148,5 +126,3 @@ testcases() { testcases file if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcases zip; fi if test "x$FEATURE_S3TESTS" = xyes ; then testcases s3; fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_external.sh b/nczarr_test/run_external.sh index c243bbbde2..bc019bc6da 100755 --- a/nczarr_test/run_external.sh +++ b/nczarr_test/run_external.sh @@ -16,15 +16,21 @@ cd $ISOPATH TESTCASES= if test "x$FEATURE_BYTERANGE" = xyes && test "x$FEATURE_S3" = xyes && test "x$FP_ISCYGWIN" = x ; then -TESTCASES="${TESTCASES} OR_ABI;http://s3.amazonaws.com/noaa-goes16/ABI-L1b-RadF/2022/001/18/OR_ABI-L1b-RadF-M6C01_G16_s20220011800205_e20220011809513_c20220011809562.nc#mode=bytes,s3" +TESTCASES="${TESTCASES} OR_ABI;;http://s3.amazonaws.com/noaa-goes16/ABI-L1b-RadF/2022/001/18/OR_ABI-L1b-RadF-M6C01_G16_s20220011800205_e20220011809513_c20220011809562.nc#mode=bytes,s3" +TESTCASES="${TESTCASES} cesmLE;blosc;http://s3.us-west-2.amazonaws.com/ncar-cesm-lens/atm/daily/cesmLE-20C-FLNS.zarr#mode=zarr,s3" fi testcase() { NM=`echo "$1" | cut -d';' -f1` -URL=`echo "$1" | cut -d';' -f2` +FILT=`echo "$1" | cut -d';' -f2` +URL=`echo "$1" | cut -d';' -f3` echo "*** Test: $NM = $URL" rm -f "tmp_external_$NM.cdl" -${NCDUMP} -h -n $NM $URL > "tmp_external_${NM}.cdl" +if test "x$FILT" != x ; then + if avail $FILT; then + ${NCDUMP} -h -s -n $NM $URL > "tmp_external_${NM}.cdl" + fi +fi } if test "x$FEATURE_S3" = xyes ; then @@ -32,5 +38,3 @@ for t in $TESTCASES ; do testcase "$t" done fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_fillonlyz.sh b/nczarr_test/run_fillonlyz.sh index 27977c1418..ef324c5f49 100755 --- a/nczarr_test/run_fillonlyz.sh +++ b/nczarr_test/run_fillonlyz.sh @@ -22,11 +22,9 @@ zext=$1 fileargs tmp_fillonly deletemap $zext $file ${NCGEN} -4 -b -o "$fileurl" $srcdir/ref_fillonly.cdl -${execdir}/tst_fillonlyz${ext} "$fileurl" +${execdir}/test_fillonlyz${ext} "$fileurl" } testcase file if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcase zip; fi if test "x$FEATURE_S3TESTS" = xyes ; then testcase s3; fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_filter.sh b/nczarr_test/run_filter.sh index 9ed36c1f20..a62f9970ca 100755 --- a/nczarr_test/run_filter.sh +++ b/nczarr_test/run_filter.sh @@ -72,7 +72,7 @@ if ! test -f ${MISCPATH} ; then echo "Unable to locate ${MISCPATH}"; exit 1; fi testapi() { zext=$1 -echo "*** Testing dynamic filters using API for map=$zext" +echo "*** Testing dynamic filters using API for storage format $zext" fileargs tmp_api deletemap $zext $file ${execdir}/testfilter $fileurl @@ -85,7 +85,7 @@ echo "*** Pass: API dynamic filter for map=$zext" testmisc() { zext=$1 -echo "*** Testing dynamic filters parameter passing for map $zext" +echo "*** Testing dynamic filters parameter passing for storage format $zext" fileargs tmp_misc deletemap $zext $file ${execdir}/testfilter_misc $fileurl @@ -100,12 +100,12 @@ cat >./tmp_misc2_$zext.txt < ./tmp_ng_$zext.txt # Remove irrelevant -s output sclean ./tmp_ng_$zext.txt ./tmp_ng2_$zext.txt diff -b -w ${srcdir}/ref_bzip2.cdl ./tmp_ng2_$zext.txt -echo "*** Pass: ncgen dynamic filter for map $zext" +echo "*** Pass: ncgen dynamic filter for storage format $zext" } testncp() { zext=$1 -echo "*** Testing dynamic filters using nccopy for map $zext" +echo "*** Testing dynamic filters using nccopy for storage format $zext" fileargs tmp_unfiltered deletemap $zext $file # Create our input test files @@ -130,22 +130,22 @@ ${NCDUMP} -s -n filtered $fileurl > ./tmp_ncp_$zext.txt # Remove irrelevant -s output sclean ./tmp_ncp_$zext.txt ./tmp_ncp_$zext.dump diff -b -w ${srcdir}/ref_filtered.cdl ./tmp_ncp_$zext.dump -echo " *** Pass: nccopy simple filter for map $zext" +echo " *** Pass: nccopy simple filter for storage format $zext" } testngc() { zext=$1 -echo "*** Testing dynamic filters using ncgen with -lc for map $zext" +echo "*** Testing dynamic filters using ncgen with -lc for storage format $zext" fileargs tmp_ngc deletemap $zext $file ${NCGEN} -lc -4 ${srcdir}/../nc_test4/bzip2.cdl > tmp_ngc.c diff -b -w ${srcdir}/../nc_test4/../nc_test4/ref_bzip2.c ./tmp_ngc.c -echo "*** Pass: ncgen dynamic filter for map $zext" +echo "*** Pass: ncgen dynamic filter for storage format $zext" } testmulti() { zext=$1 -echo "*** Testing multiple filters for map $zext" +echo "*** Testing multiple filters for storage format $zext" fileargs tmp_multi deletemap $zext $file ${execdir}/testfilter_multi $fileurl @@ -153,12 +153,12 @@ ${NCDUMP} -hsF -n multifilter $fileurl >./tmp_multi_$zext.cdl # Remove irrelevant -s output sclean ./tmp_multi_$zext.cdl ./tmp_smulti_$zext.cdl diff -b -w ${srcdir}/ref_multi.cdl ./tmp_smulti_$zext.cdl -echo "*** Pass: multiple filters for map $zext" +echo "*** Pass: multiple filters for storage format $zext" } testrep() { zext=$1 -echo "*** Testing filter re-definition invocation for map $zext" +echo "*** Testing filter re-definition invocation for storage format $zext" fileargs tmp_rep deletemap $zext $file ${execdir}/testfilter_repeat $fileurl >tmp_rep_$zext.txt @@ -167,12 +167,12 @@ diff -b -w ${srcdir}/../nc_test4/ref_filter_repeat.txt tmp_rep_$zext.txt testorder() { zext=$1 -echo "*** Testing multiple filter order of invocation on create for map $zext" +echo "*** Testing multiple filter order of invocation on create for storage format $zext" fileargs tmp_order deletemap $zext $file ${execdir}/testfilter_order create $fileurl >tmp_order_$zext.txt diff -b -w ${srcdir}/../nc_test4/ref_filter_order_create.txt tmp_order_$zext.txt -echo "*** Testing multiple filter order of invocation on read for map $zext" +echo "*** Testing multiple filter order of invocation on read for storage format $zext" ${execdir}/testfilter_order read $fileurl >tmp_order_rd_$zext.txt diff -b -w ${srcdir}/../nc_test4/ref_filter_order_read.txt tmp_order_rd_$zext.txt } @@ -180,5 +180,3 @@ diff -b -w ${srcdir}/../nc_test4/ref_filter_order_read.txt tmp_order_rd_$zext.tx testset file if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testset zip ; fi if test "x$FEATURE_S3TESTS" = xyes ; then testset s3 ; fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_interop.sh b/nczarr_test/run_interop.sh index 534bcd5874..3579c37384 100755 --- a/nczarr_test/run_interop.sh +++ b/nczarr_test/run_interop.sh @@ -7,6 +7,8 @@ if test "x$srcdir" = x ; then srcdir=`pwd`; fi set -e +metaonly="-h" + s3isolate "testdir_interop" THISDIR=`pwd` cd $ISOPATH @@ -19,36 +21,38 @@ UH="${NCZARR_S3_TEST_HOST}" UB="${NCZARR_S3_TEST_BUCKET}" testcasefile() { + echo -e "\to Running File Testcase:\t$1\t$2\t$3" zext=file base=$1 mode=$2 - metaonly=$3 - if test "x$metaonly" = xmetaonly ; then flags="-h"; fi fileargs ${ISOPATH}/ref_$base "mode=$mode,$zext" rm -f tmp_${base}_${zext}.cdl - ${NCDUMP} $flags $fileurl > tmp_${base}_${zext}.cdl + ${NCDUMP} $metaonly $fileurl > tmp_${base}_${zext}.cdl diff -b ${srcdir}/ref_${base}.cdl tmp_${base}_${zext}.cdl } testcasezip() { + echo -e "\to Running Zip Testcase:\t$1\t$2" zext=zip base=$1 mode=$2 fileargs ${ISOPATH}/ref_$base "mode=$mode,$zext" rm -f tmp_${base}_${zext}.cdl - ${NCDUMP} -h $flags $fileurl > tmp_${base}_${zext}.cdl + ${NCDUMP} $metaonly $flags $fileurl > tmp_${base}_${zext}.cdl diff -b ${srcdir}/ref_${base}.cdl tmp_${base}_${zext}.cdl } testcases3() { + echo -e "\to Running S3 Testcase:\t$1\t$2" zext=s3 base=$1 mode=$2 rm -f tmp_${base}_${zext}.cdl url="https://${UH}/${UB}/${base}.zarr#mode=${mode},s3" - ${NCDUMP} $url > tmp_${base}_${zext}.cdl + # Dumping everything causes timeout so dump a single var + ${NCDUMP} -v "/group_with_dims/var2D" $flags $url > tmp_${base}_${zext}.cdl # Find the proper ref file - diff -b ${ISOPATH}/ref_${base}.cdl tmp_${base}_${zext}.cdl + diff -b ${ISOPATH}/ref_${base}_2d.cdl tmp_${base}_${zext}.cdl } testallcases() { @@ -58,19 +62,20 @@ case "$zext" in # need to unpack unzip ref_power_901_constants.zip >> tmp_ignore.txt mv ${ISOPATH}/ref_power_901_constants ${ISOPATH}/ref_power_901_constants.file - testcasefile power_901_constants zarr metaonly; # test xarray as default + testcasefile power_901_constants zarr; # test xarray as default ;; zip) # Move into position - testcasezip power_901_constants xarray metaonly + testcasezip power_901_constants xarray # Test large constant interoperability - testcasezip quotes zarr metaonly + testcasezip quotes zarr ;; s3) # Read a test case created by netcdf-java zarr. # unpack # Use gunzip because it always appears to be available gunzip -c ${srcdir}/ref_zarr_test_data.cdl.gz > ${ISOPATH}/ref_zarr_test_data.cdl + gunzip -c ${srcdir}/ref_zarr_test_data_2d.cdl.gz > ${ISOPATH}/ref_zarr_test_data_2d.cdl testcases3 zarr_test_data xarray ;; *) echo "unimplemented kind: $1" ; exit 1;; @@ -78,6 +83,7 @@ esac } # common setup + if ! test -f ${ISOPATH}/ref_power_901_constants.zip ; then cp -f ${srcdir}/ref_power_901_constants_orig.zip ${ISOPATH}/ref_power_901_constants.zip fi @@ -88,5 +94,3 @@ fi testallcases file if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testallcases zip; fi if test "x$FEATURE_S3TESTS" = xyes ; then testallcases s3; fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_it_chunks1.sh b/nczarr_test/run_it_chunks1.sh index c3efe9a61b..1d9c91cee7 100755 --- a/nczarr_test/run_it_chunks1.sh +++ b/nczarr_test/run_it_chunks1.sh @@ -1,6 +1,6 @@ #!/bin/sh -# Run (tst_chunks,tst_chunks2) X (file,zip,s3) +# Run (test_chunks,test_chunks2) X (file,zip,s3) if test "x$srcdir" = x ; then srcdir=`pwd`; fi @@ -15,8 +15,8 @@ set -e ittest() { extfor $1 if test "x$2" != x ; then CLOUD="-c $2"; fi -${execdir}/tst_chunks -e $1 $CLOUD -${execdir}/tst_chunks2 -e $1 $CLOUD +${execdir}/test_chunks -e $1 $CLOUD +${execdir}/test_chunks2 -e $1 $CLOUD } ittest file diff --git a/nczarr_test/run_jsonconvention.sh b/nczarr_test/run_jsonconvention.sh index ad35ea8910..64b629d858 100755 --- a/nczarr_test/run_jsonconvention.sh +++ b/nczarr_test/run_jsonconvention.sh @@ -38,5 +38,3 @@ diff -b ${srcdir}/ref_jsonconvention.zmap tmp_jsonconvention_clean_${zext}.txt testcase file if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcase zip; fi if test "x$FEATURE_S3TESTS" = xyes ; then testcase s3; fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_misc.sh b/nczarr_test/run_misc.sh index 2d58cf72b9..b9741c99e1 100755 --- a/nczarr_test/run_misc.sh +++ b/nczarr_test/run_misc.sh @@ -60,5 +60,3 @@ if test "x$FEATURE_S3TESTS" = xyes ; then testcase1 s3 testcase2 s3 fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_nccopyz.sh b/nczarr_test/run_nccopyz.sh index ae426a9d93..66b4286cd7 100755 --- a/nczarr_test/run_nccopyz.sh +++ b/nczarr_test/run_nccopyz.sh @@ -30,14 +30,14 @@ verifychunking() { testcase() { zext=$1 fileargs tmp -./tst_zchunks3 -e ${zext} +${execdir}/test_zchunks3 -e ${zext} echo "*** Test that nccopy -c can chunk files" ${NCCOPY} -M0 tmp_chunks3.nc "$fileurl" ${NCDUMP} -n tmp -sh "$fileurl" > tmp_nccz.cdl verifychunking tmp_nccz.cdl "ivar:_ChunkSizes=7,4,2,3,5,6,9;" "fvar:_ChunkSizes=9,6,5,3,2,4,7;" fileargs tmp_chunked -./tst_zchunks3 -e ${zext} +${execdir}/test_zchunks3 -e ${zext} ${NCCOPY} -M0 -c dim0/,dim1/1,dim2/,dim3/1,dim4/,dim5/1,dim6/ tmp_chunks3.nc "$fileurl" ${NCDUMP} -sh -n tmp "$fileurl" > tmp_chunked.cdl verifychunking tmp_chunked.cdl "ivar:_ChunkSizes=7,1,2,1,5,1,9;" "fvar:_ChunkSizes=9,1,5,1,2,1,7;" @@ -82,5 +82,3 @@ if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcase zip; fi if test "x$FEATURE_S3TESTS" = xyes ; then testcase s3; fi echo "*** All nccopy nczarr tests passed!" - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_ncgen4.sh b/nczarr_test/run_ncgen4.sh index bd56d288bb..b2f6b0010d 100755 --- a/nczarr_test/run_ncgen4.sh +++ b/nczarr_test/run_ncgen4.sh @@ -2,7 +2,7 @@ # Tests for ncgen4 using list of test cdl files from the cdl4 # directory, and comparing output to expected results in the expected4 # directory. Note that these tests are run for classic files in -# tst_ncgen4_classic.sh +# test_ncgen4_classic.sh # Dennis Heimbigner if test "x$srcdir" = x ; then srcdir=`pwd`; fi @@ -95,5 +95,3 @@ if test "x$FEATURE_NCZARR_ZIP" = xyes ; then runtestset zip; fi if test "x$FEATURE_S3TESTS" = xyes ; then runtestset s3; fi echo "*** PASSED ***" - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_nczarr_fill.sh b/nczarr_test/run_nczarr_fill.sh index c095fc0804..688fb1c24e 100755 --- a/nczarr_test/run_nczarr_fill.sh +++ b/nczarr_test/run_nczarr_fill.sh @@ -68,5 +68,3 @@ if test "x$FEATURE_HDF5" = xyes ; then testcase2059 s3 fi fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_nczfilter.sh b/nczarr_test/run_nczfilter.sh index 2d87cbc120..8ec523dc58 100755 --- a/nczarr_test/run_nczfilter.sh +++ b/nczarr_test/run_nczfilter.sh @@ -5,15 +5,12 @@ if test "x$srcdir" = x ; then srcdir=`pwd`; fi . "$srcdir/test_nczarr.sh" -# This shell script runs tst_nczfilter.c +# This shell script runs test_nczfilter.c set -e -pwd - s3isolate "testdir_nczfilter" +THISDIR=`pwd` cd $ISOPATH -${execdir}/tst_nczfilter - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup +${execdir}/test_nczfilter diff --git a/nczarr_test/run_newformat.sh b/nczarr_test/run_newformat.sh index 40eb0d8ef6..fde66c9314 100755 --- a/nczarr_test/run_newformat.sh +++ b/nczarr_test/run_newformat.sh @@ -46,5 +46,3 @@ if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcasecvt zip testcasepure zip fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_notzarr.sh b/nczarr_test/run_notzarr.sh index f81e81dba8..7eb8a01ac1 100755 --- a/nczarr_test/run_notzarr.sh +++ b/nczarr_test/run_notzarr.sh @@ -33,18 +33,18 @@ if test "x$FEATURE_S3TESTS" = xyes ; then fi echo "Test empty file" -RET=`${execdir}/tst_notzarr "file://empty.file#mode=zarr,file"` +RET=`${execdir}/test_notzarr "file://empty.file#mode=zarr,file"` testfailed "$RET" echo "Test non-zarr file" -RET=`${execdir}/tst_notzarr "file://notzarr.file#mode=zarr,file"` +RET=`${execdir}/test_notzarr "file://notzarr.file#mode=zarr,file"` testfailed "$RET" if test "x$FEATURE_NCZARR_ZIP" = xyes ; then echo "Test empty zip file" -RET=`${execdir}/tst_notzarr "file://empty.zip#mode=zarr,zip"` +RET=`${execdir}/test_notzarr "file://empty.zip#mode=zarr,zip"` testfailed "$RET" echo "Test non-zarr zip file" -RET=`${execdir}/tst_notzarr "file://notzarr.zip#mode=zarr,zip"` +RET=`${execdir}/test_notzarr "file://notzarr.zip#mode=zarr,zip"` testfailed "$RET" fi @@ -53,12 +53,10 @@ if test 1 = 0 ; then # This test is NA for S3 echo "Test empty S3 file" KEY2="${KEY}/empty.s3" - RET=`${execdir}/tst_notzarr "https://$URL${KEY2}#mode=zarr,s3"` + RET=`${execdir}/test_notzarr "https://$URL${KEY2}#mode=zarr,s3"` testfailed "$RET" fi echo "Test non-zarr S3 file" -RET=`${execdir}/tst_notzarr "https://$URL/${S3ISOPATH}/notzarr.s3#mode=zarr,s3"` +RET=`${execdir}/test_notzarr "https://$URL/${S3ISOPATH}/notzarr.s3#mode=zarr,s3"` testfailed "$RET" fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_nulls.sh b/nczarr_test/run_nulls.sh index 796bf967f3..85d81b50b4 100755 --- a/nczarr_test/run_nulls.sh +++ b/nczarr_test/run_nulls.sh @@ -53,5 +53,3 @@ diff -bw ${srcdir}/ref_nulls_nczarr.baseline tmp_nulls_nczarr_${zext}.cdl testcase file if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcase zip; fi if test "x$FEATURE_S3TESTS" = xyes ; then testcase s3; fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_perf_chunks1.sh b/nczarr_test/run_perf_chunks1.sh index 86775e19fb..e03dc373a8 100755 --- a/nczarr_test/run_perf_chunks1.sh +++ b/nczarr_test/run_perf_chunks1.sh @@ -1,6 +1,6 @@ #!/bin/sh -# This shell just tests the tst_chunks3 program by running it a few +# This shell just tests the test_chunks3 program by running it a few # times to generate a simple test file. Then it uses ncdump -s to # check that the output is what it should be. # Copied from nc_perf/. @@ -38,5 +38,3 @@ echo '*** SUCCESS!!!' testcases file if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcases zip; fi if test "x$FEATURE_S3TESTS" = xyes ; then testcases s3; fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_put_vars_two_unlim_dim.sh b/nczarr_test/run_put_vars_two_unlim_dim.sh new file mode 100755 index 0000000000..12d15f2add --- /dev/null +++ b/nczarr_test/run_put_vars_two_unlim_dim.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +if test "x$srcdir" = x ; then srcdir=`pwd`; fi +. ../test_common.sh + +set -e + +find ${execdir} -name 'test_put_vars_two_unlim_dim*' + +if test -f ${execdir}/.libs/test_put_vars_two_unlim_dim${ext} ; then +nm ${execdir}/.libs/test_put_vars_two_unlim_dim${ext} +fi + +#${execdir}/test_put_vars_two_unlim_dim${ext} + +exit 0 diff --git a/nczarr_test/run_quantize.sh b/nczarr_test/run_quantize.sh index 32af46df7f..902d56434d 100755 --- a/nczarr_test/run_quantize.sh +++ b/nczarr_test/run_quantize.sh @@ -31,4 +31,3 @@ testcase file if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcase zip; fi # There is a (currently) untraceable bug when using S3 #if test "x$FEATURE_S3TESTS" = xyes ; then testcase s3; fi -# if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_s3_cleanup.sh b/nczarr_test/run_s3_cleanup.sh deleted file mode 100755 index 542af09d5c..0000000000 --- a/nczarr_test/run_s3_cleanup.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh - -if test "x$srcdir" = x ; then srcdir=`pwd`; fi -. ../test_common.sh - -. "$srcdir/test_nczarr.sh" - -set -e - -echo "" -echo "*** Remove /netcdf-c from S3 repository" - -fileargs netcdf-c - -if test "x$FEATURE_S3TESTS" = xyes ; then -${execdir}/s3util -u "${NCZARR_S3_TEST_URL}" -k "/netcdf-c" clear -fi - -exit 0 diff --git a/nczarr_test/run_scalar.sh b/nczarr_test/run_scalar.sh index b1aff8e768..c6de0ebc81 100755 --- a/nczarr_test/run_scalar.sh +++ b/nczarr_test/run_scalar.sh @@ -60,5 +60,3 @@ diff -bw $top_srcdir/nczarr_test/ref_scalar.cdl tmp_scalar_zarr_${zext}.cdl testcase file if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcase zip; fi if test "x$FEATURE_S3TESTS" = xyes ; then testcase s3; fi - -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup diff --git a/nczarr_test/run_strings.sh b/nczarr_test/run_strings.sh index 69cd93b038..3651e5db7f 100755 --- a/nczarr_test/run_strings.sh +++ b/nczarr_test/run_strings.sh @@ -26,7 +26,7 @@ fileargs tmp_string_nczarr "mode=nczarr,$zext" nczarrurl="$fileurl" nczarrfile="$file" -# setup +# setupp deletemap $zext $zarrfile deletemap $zext $nczarrfile @@ -54,4 +54,5 @@ testcase file if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcase zip; fi if test "x$FEATURE_S3TESTS" = xyes ; then testcase s3; fi -if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup + + diff --git a/nczarr_test/run_unlim_io.sh b/nczarr_test/run_unlim_io.sh new file mode 100755 index 0000000000..de5fb6ff81 --- /dev/null +++ b/nczarr_test/run_unlim_io.sh @@ -0,0 +1,108 @@ +#!/bin/sh + +if test "x$srcdir" = x ; then srcdir=`pwd`; fi +. ../test_common.sh + +. ${srcdir}/test_nczarr.sh + +set -e + +s3isolate "testdir_unlim_io" +THISDIR=`pwd` +cd $ISOPATH + +TC="${execdir}/test_unlim_io -4" +ZM="${execdir}/zmapio -t int" + +remfile() { + case "$zext" in + nc4) rm -fr $1 ;; + file) rm -fr $1 ;; + zip) rm -fr $1 ;; + s3) ;; + *) echo "no such extension: $zext" ; exit 1;; + esac +} + +remfile() { + case "$zext" in + nc4) rm -fr $1 ;; + file) rm -fr $1 ;; + zip) rm -fr $1 ;; + s3) ;; + *) echo "no such extension: $zext" ; exit 1;; + esac +} + +buildfile() { +zext=$1 +base=$2 +if test "x$TESTNCZARR" = x1 ; then +fileargs "${base}_${zext}" +deletemap $zext $file +file="$fileurl" +else +file="${base}_${zext}.nc" +rm -f $file +fi +} + +testcase1() { +zext=$1 +buildfile $zext tmp_unlim_io1 +echo ""; echo "*** Test simple use of unlimited" +rm -fr tmp_unlim_io1.nc tmp_unlim_io1.$zext tmp_unlim_io1.nc.txt tmp_unlim_io1.${zext}.txt +$TC -d 0 -c 2 -s 0 -e 1 -Ocw tmp_unlim_io1.nc +$TC -d 0 -c 2 -s 0 -e 1 -Ocw "$file" +${NCDUMP} -n tmp_unlim_io1 tmp_unlim_io1.nc >tmp_unlim_io1.nc.txt +${NCDUMP} -n tmp_unlim_io1 "$file" >tmp_unlim_io1.${zext}.txt +diff -b tmp_unlim_io1.nc.txt tmp_unlim_io1.${zext}.txt +} + +testcase2() { +zext=$1 +buildfile $zext tmp_unlim_io2 +echo ""; echo "*** Test 2-d chunking" +rm -fr tmp_unlim_io2.nc tmp_unlim_io2.$zext tmp_unlim_io2.nc.txt tmp_unlim_io2.${zext}.txt +$TC -d 0,0 -c 2,2 -s 0,0 -e 2,2 -v 17 -Ocw tmp_unlim_io2.nc +$TC -d 0,0 -c 2,2 -s 0,0 -e 2,2 -v 17 -Ocw "$file" +${NCDUMP} -n tmp_unlim_io2 tmp_unlim_io2.nc >tmp_unlim_io2.nc.txt +${NCDUMP} -n tmp_unlim_io2 "$file" >tmp_unlim_io2.${zext}.txt +diff -b tmp_unlim_io2.nc.txt tmp_unlim_io2.${zext}.txt +${execdir}/ncdumpchunks -v v "$file" +} + +testcase3() { +zext=$1 +buildfile $zext tmp_unlim_io3 +echo ""; echo "*** Test multi-chunk extension" +rm -fr tmp_unlim_io3.nc tmp_unlim_io3.$zext tmp_unlim_io3.nc.txt tmp_unlim_io3.${zext}.txt +$TC -d 0,0 -c 2,2 -s 6,6 -e 2,2 -v 17 -Ocw tmp_unlim_io3.nc +$TC -d 0,0 -c 2,2 -s 6,6 -e 2,2 -v 17 -Ocw "$file" +${NCDUMP} -n tmp_unlim_io3 tmp_unlim_io3.nc >tmp_unlim_io3.nc.txt +${NCDUMP} -n tmp_unlim_io3 "$file" >tmp_unlim_io3.${zext}.txt +diff -b tmp_unlim_io3.nc.txt tmp_unlim_io3.${zext}.txt +} + +testcase4() { +zext=$1 +buildfile $zext tmp_unlim_io4 +echo ""; echo "*** Test unlimited as second dimension" +rm -fr tmp_unlim_io4.nc tmp_unlim_io4.$zext tmp_unlim_io4.nc.txt tmp_unlim_io4.${zext}.txt +$TC -d 8,0 -c 2,2 -s 6,6 -e 2,2 -v 17 -Ocw tmp_unlim_io4.nc +$TC -d 8,0 -c 2,2 -s 6,6 -e 2,2 -v 17 -Ocw "$file" +${NCDUMP} -n tmp_unlim_io3 tmp_unlim_io4.nc >tmp_unlim_io4.nc.txt +${NCDUMP} -n tmp_unlim_io3 "$file" >tmp_unlim_io4.${zext}.txt +diff -b tmp_unlim_io4.nc.txt tmp_unlim_io4.${zext}.txt +} + +testcases() { + testcase1 $1 + testcase2 $1 + testcase3 $1 + testcase4 $1 +} + +testcases file +if test "x$FEATURE_NCZARR_ZIP" = xyes ; then testcases zip; fi +if test "x$FEATURE_S3TESTS" = xyes ; then testcases s3; fi diff --git a/nczarr_test/run_ut_chunk.sh b/nczarr_test/run_ut_chunk.sh index c64cf328af..9c0ed43309 100755 --- a/nczarr_test/run_ut_chunk.sh +++ b/nczarr_test/run_ut_chunk.sh @@ -8,8 +8,8 @@ set -e # Test chunking code # Control which test sets are executed -# possible sets: proj walk -TESTS=proj +# possible sets: proj(obsolete) walk(obsolete) +TESTS=walk # Functions @@ -42,7 +42,7 @@ for T in $TESTS ; do case "$T" in proj) -echo ""; echo "*** Test projection computations" +echo ""; echo "*** Test projection computations: obsolete" echo ""; echo "*** Test 1" testproj ;; diff --git a/nczarr_test/run_ut_map.sh b/nczarr_test/run_ut_map.sh index edb97160fa..ba1af07915 100755 --- a/nczarr_test/run_ut_map.sh +++ b/nczarr_test/run_ut_map.sh @@ -7,13 +7,16 @@ if test "x$srcdir" = x ; then srcdir=`pwd`; fi set -e +s3isolate "testdir_utmap" +THISDIR=`pwd` +cd $ISOPATH + # Test those map implementations where # it is possible to look at the actual storage. # in some cases. Note that we # cannot easily look inside S3 storage # except using the aws-cli, if available - # Common CMD="${execdir}/ut_map${ext}" diff --git a/nczarr_test/s3util.c b/nczarr_test/s3util.c index 42562ae9f0..9f0c433f73 100644 --- a/nczarr_test/s3util.c +++ b/nczarr_test/s3util.c @@ -203,7 +203,7 @@ main(int argc, char** argv) memset(&s3sdk,0,sizeof(s3sdk)); - if((stat = NC_s3urlprocess(dumpoptions.url, &s3sdk.s3))) goto done; + if((stat = NC_s3urlprocess(dumpoptions.url, &s3sdk.s3, NULL))) goto done; if(s3sdk.s3.rootkey != NULL && dumpoptions.key != NULL) { /* Make the root key be the concatenation of rootkey+dumpoptions.key */ if((stat = nczm_concat(s3sdk.s3.rootkey,dumpoptions.key,&tmp))) goto done; diff --git a/nczarr_test/tst_chunkcases.c b/nczarr_test/test_chunkcases.c similarity index 53% rename from nczarr_test/tst_chunkcases.c rename to nczarr_test/test_chunkcases.c index dbd65d3232..91774c939a 100644 --- a/nczarr_test/tst_chunkcases.c +++ b/nczarr_test/test_chunkcases.c @@ -2,13 +2,11 @@ #include "config.h" #endif #include -#ifdef HAVE_CONFIG_H #include -#endif #include -#include -#include -#include +#include "netcdf.h" +#include "ncpathmgr.h" +#include "nclist.h" #ifdef HAVE_HDF5_H #include @@ -19,46 +17,33 @@ #include "zincludes.h" #endif - -#include "tst_utils.h" +#include "test_utils.h" static unsigned chunkprod; static unsigned dimprod; static int* data = NULL; static size_t datasize = 0; -static int setupwholechunk(void); -static int reportwholechunk(void); -static void zutest_print(int sort, ...); - static int writedata(void) { int ret = NC_NOERR; - int i; + size_t i; for(i=0;iwholechunk) - setupwholechunk(); - if(options->debug >= 1) { fprintf(stderr,"write: dimlens=%s chunklens=%s\n", printvector(options->rank,options->dimlens),printvector(options->rank,options->chunks)); } if(options->wholechunk) { fprintf(stderr,"write var: wholechunk\n"); - if((ret = nc_put_vars(meta->ncid,meta->varid,options->start,options->count,(ptrdiff_t*)options->stride,data))) + if((ret = nc_put_vars(meta->ncid,meta->varid,options->start,options->edges,(ptrdiff_t*)options->stride,data))) ERR(ret); } else { fprintf(stderr,"write vars: start=%s count=%s stride=%s\n", - printvector(options->rank,options->start),printvector(options->rank,options->count),printvector(options->rank,options->stride)); - if((ret = nc_put_vars(meta->ncid,meta->varid,options->start,options->count,(ptrdiff_t*)options->stride,data))) - ERR(ret); - } - - if(options->wholechunk) { - if((ret=reportwholechunk())) + printvector(options->rank,options->start),printvector(options->rank,options->edges),printvector(options->rank,options->stride)); + if((ret = nc_put_vars(meta->ncid,meta->varid,options->start,options->edges,(ptrdiff_t*)options->stride,data))) ERR(ret); } @@ -69,33 +54,27 @@ static int readdata(void) { int ret = NC_NOERR; - int i; + size_t i; memset(data,0,datasize); - if(options->wholechunk) { - setupwholechunk(); - } - if(options->debug >= 1) fprintf(stderr,"read: dimlens=%s chunklens=%s\n", printvector(options->rank,options->dimlens),printvector(options->rank,options->chunks)); fprintf(stderr,"read vars: start=%s count=%s stride=%s", printvector(options->rank,options->start), - printvector(options->rank,options->count), + printvector(options->rank,options->edges), printvector(options->rank,options->stride)); if(options->wholechunk) fprintf(stderr," wholechunk"); fprintf(stderr,"\n"); - if((ret = nc_get_vars(meta->ncid,meta->varid,options->start,options->count,(ptrdiff_t*)options->stride,data))) + if((ret = nc_get_vars(meta->ncid,meta->varid,options->start,options->edges,(ptrdiff_t*)options->stride,data))) ERR(ret); for(i=0;iwholechunk) - reportwholechunk(); - + return 0; } @@ -116,66 +95,6 @@ genodom(void) return ret; } -static int wholechunkcalls = 0; -static struct ZUTEST zutester; - -static int -setupwholechunk(void) -{ - int ret = NC_NOERR; - - wholechunkcalls = 0; - -#ifdef ENABLE_NCZARR - /* Set the printer */ - zutester.tests = UTEST_WHOLECHUNK; - zutester.print = zutest_print; - zutest = &zutester; /* See zdebug.h */ -#endif - return ret; -} - - -static void -zutest_print(int sort, ...) -{ - va_list ap; - struct Common* common = NULL; - size64_t* chunkindices; - - NC_UNUSED(common); - - va_start(ap,sort); - - switch (sort) { - default: break; /* ignore */ - case UTEST_WHOLECHUNK: - common = va_arg(ap,struct Common*); - chunkindices = va_arg(ap,size64_t*); - if(options->debug >= 1) - fprintf(stderr,"wholechunk: indices=%s\n",printvector64(common->rank,chunkindices)); - wholechunkcalls++; - break; - } - va_end(ap); -} - -static int -reportwholechunk(void) -{ - int ret = NC_NOERR; -#ifdef ENABLE_NCZARR - if(options->debug > 0) - fprintf(stderr,"wholechunkcalls=%d\n",wholechunkcalls); - if(wholechunkcalls != 1) - return NC_EINVAL; -#endif - return ret; -} - - - - int main(int argc, char** argv) { @@ -183,26 +102,10 @@ main(int argc, char** argv) int i; if((stat=getoptions(&argc,&argv))) goto done; + if((stat=verifyoptions(options))) goto done; - if(options->formatx != NC_FORMATX_NCZARR && options->wholechunk) { - fprintf(stderr,"Format is not NCZarr; -OW_ ignored\n"); - options->wholechunk = 0; - } - - switch (options->op) { - case Read: - if((stat = getmetadata(0))) - ERR(stat); - if (argc == 0) {fprintf(stderr, "no input file specified\n");exit(1);} - break; - case Write: - if((stat = getmetadata(1))) - ERR(stat); - if (argc == 0) {fprintf(stderr, "no output file specified\n");exit(1);} - break; - default: - break; /* do not need a file */ - } + if((stat = getmetadata(0))) + ERR(stat); dimprod = 1; chunkprod = 1; diff --git a/nczarr_test/tst_fillonlyz.c b/nczarr_test/test_fillonlyz.c similarity index 93% rename from nczarr_test/tst_fillonlyz.c rename to nczarr_test/test_fillonlyz.c index 521cea86e0..e6dadf685b 100644 --- a/nczarr_test/tst_fillonlyz.c +++ b/nczarr_test/test_fillonlyz.c @@ -10,7 +10,7 @@ #include "zincludes.h" -#include "tst_utils.h" +#include "test_utils.h" #undef DEBUG @@ -18,7 +18,7 @@ static void nccheck(int ret, int lineno) { if(ret == NC_NOERR) return; - report(ret,lineno); + ncz_report(ret,lineno); } #define NCCHECK(err) nccheck(err,__LINE__) @@ -36,6 +36,8 @@ main(int argc, char *argv[] ) size_t i; NCCHECK(getoptions(&argc,&argv)); + if(options->op == Write) NCCHECK(verifyoptions(options)); + filename = options->file; NCCHECK(err = nc_open(filename,NC_NETCDF4,&ncid)); @@ -72,16 +74,3 @@ main(int argc, char *argv[] ) if(idat) free(idat); return 0; } - - - - - - - - - - - - - diff --git a/nczarr_test/test_filter_avail.c b/nczarr_test/test_filter_avail.c new file mode 100644 index 0000000000..9e403fb302 --- /dev/null +++ b/nczarr_test/test_filter_avail.c @@ -0,0 +1,107 @@ +#define TESTNCZARR +/* + Copyright 2018, UCAR/Unidata + See COPYRIGHT file for copying and redistribution conditions. +*/ + +#include "config.h" +#include +#include +#include + +#ifdef USE_HDF5 +#include +#endif + +#include "netcdf.h" +#include "netcdf_aux.h" +#include "netcdf_filter.h" + +#undef DEBUG + +#define MAXPARAMS 32 + +#ifdef TESTNCZARR +#define DFALT_TESTFILE "file://tmp_filter_avail.file#mode=nczarr,file" +#else +#define DFALT_TESTFILE "tmp_filter_avail.nc" +#endif + +static const char* testfile = NULL; +static int nerrs = 0; + +static int ncid; + +/* Forward */ +static int test_test1(void); +static void init(int argc, char** argv); + +#define ERRR do { \ +fflush(stdout); /* Make sure our stdout is synced with stderr. */ \ +fprintf(stderr, "Sorry! Unexpected result, %s, line: %d\n", \ + __FILE__, __LINE__); \ +nerrs++;\ +} while (0) + +static int +check(int err,int line) +{ + if(err != NC_NOERR) { + fprintf(stderr,"fail (%d): %s\n",line,nc_strerror(err)); + } + return NC_NOERR; +} + +#define CHECK(x) check(x,__LINE__) + +static int +test_test1(void) +{ + int stat = NC_NOERR; + + printf("test1: bzip2 availability\n"); + CHECK(nc_create(testfile,NC_NETCDF4|NC_CLOBBER,&ncid)); + CHECK(nc_enddef(ncid)); + switch (stat = nc_inq_filter_avail(ncid,H5Z_FILTER_BZIP2)) { + case NC_NOERR: break; + case NC_ENOFILTER: break; + default: CHECK(stat); goto done; + } + if(stat == NC_ENOFILTER) { + printf("*** FAIL: filter %d not available\n",H5Z_FILTER_BZIP2); + } else { + printf("*** PASS: filter %d available\n",H5Z_FILTER_BZIP2); + } + + CHECK(nc_abort(ncid)); +done: + return stat; +} + +/**************************************************/ +/* Utilities */ + +static void +init(int argc, char** argv) +{ + /* get the testfile path */ + if(argc > 1) + testfile = argv[1]; + else + testfile = DFALT_TESTFILE; +} + +/**************************************************/ +int +main(int argc, char **argv) +{ +#ifdef USE_HDF5 +#ifdef DEBUG + H5Eprint1(stderr); + nc_set_log_level(1); +#endif +#endif + init(argc,argv); + if(test_test1() != NC_NOERR) ERRR; + exit(nerrs > 0?1:0); +} diff --git a/nczarr_test/test_nczarr.sh b/nczarr_test/test_nczarr.sh index 64cd0bf90a..85e4794cd0 100755 --- a/nczarr_test/test_nczarr.sh +++ b/nczarr_test/test_nczarr.sh @@ -12,8 +12,7 @@ if test "x$NCZARR_S3_TEST_HOST" = x ; then export NCZARR_S3_TEST_HOST=s3.us-east-1.amazonaws.com fi if test "x$NCZARR_S3_TEST_BUCKET" = x ; then -# export NCZARR_S3_TEST_BUCKET=unidata-netcdf-zarr-testing - export NCZARR_S3_TEST_BUCKET=unidata-zarr-test-data + export NCZARR_S3_TEST_BUCKET="${S3TESTBUCKET}" fi export NCZARR_S3_TEST_URL="https://${NCZARR_S3_TEST_HOST}/${NCZARR_S3_TEST_BUCKET}" @@ -25,25 +24,6 @@ else S3UTIL="${execdir}/s3util" fi -s3sdkdelete() { -# aws s3api delete-object --endpoint-url=https://${NCZARR_S3_TEST_HOST} --bucket=${NCZARR_S3_TEST_BUCKET} --key="/${S3ISOPATH}/$1" -${S3UTIL} ${PROFILE} -u "${NCZARR_S3_TEST_URL}" -k "$1" clear -} - - -# Create an isolation path for S3; build on the isolation directory -s3isolate() { -if test "x$S3ISOPATH" = x ; then - if test "x$ISOPATH" = x ; then isolate "$1"; fi - S3ISODIR="$ISODIR" - S3ISOPATH="netcdf-c" - if test "x$S3ISODIR" == x ; then - S3ISODIR=`${execdir}/../libdispatch/ncrandom` - fi - S3ISOPATH="${S3ISOPATH}/$S3ISODIR" -fi -} - # Check settings checksetting() { if test -f ${TOPBUILDDIR}/libnetcdf.settings ; then @@ -172,19 +152,39 @@ resetrc() { unset DAPRCFILE } -# Enforce cleanup -atexit() { - atexit_cleanup() { - if test "x$S3ISOPATH" != x ; then - if test "x$FEATURE_S3TESTS" = xyes ; then s3sdkdelete "/${S3ISOPATH}" ; fi # Cleanup - fi - } - trap atexit_cleanup EXIT +s3sdkdelete() { +if test -f ${execdir}/s3util ; then + ${S3UTIL} ${PROFILE} -u "${NCZARR_S3_TEST_URL}" -k "$1" clear +elif which aws ; then + aws s3api delete-object --endpoint-url=https://${NCZARR_S3_TEST_HOST} --bucket=${NCZARR_S3_TEST_BUCKET} --key="/${S3ISOPATH}/$1" +else + echo "**** Could not delete ${NCZAR_S3_TEST_URL}" +fi +} + +s3sdkcleanup() { +if test -f ${execdir}/s3util ; then + ${S3UTIL} ${PROFILE} -u "${NCZARR_S3_TEST_URL}" -k "$1" clear +elif which aws ; then + aws s3api delete-object --endpoint-url=https://${NCZARR_S3_TEST_HOST} --bucket=${NCZARR_S3_TEST_BUCKET} --key="/${S3ISOPATH}/$1" +else + echo "**** Could not delete ${NCZAR_S3_TEST_URL}" +fi +} + +# Create an isolation path for S3; build on the isolation directory +s3isolate() { + if test "x$S3ISOPATH" = x ; then + if test "x$ISOPATH" = x ; then isolate "$1"; fi + S3ISODIR="$ISODIR" + S3ISOTESTSET="${S3TESTSUBTREE}/testset_" + if test "x$NOISOPATH" = x ; then S3ISOTESTSET="${S3ISOTESTSET}${TESTUID}"; fi + S3ISOPATH="${S3ISOTESTSET}/$S3ISODIR" + fi } GDBB="gdb -batch -ex r -ex bt -ex q --args" resetrc -atexit fi #TEST_NCZARR_SH diff --git a/nczarr_test/tst_nczfilter.c b/nczarr_test/test_nczfilter.c similarity index 100% rename from nczarr_test/tst_nczfilter.c rename to nczarr_test/test_nczfilter.c diff --git a/nczarr_test/tst_notzarr.c b/nczarr_test/test_notzarr.c similarity index 100% rename from nczarr_test/tst_notzarr.c rename to nczarr_test/test_notzarr.c diff --git a/nczarr_test/test_unlim_io.c b/nczarr_test/test_unlim_io.c new file mode 100644 index 0000000000..20a32a90bb --- /dev/null +++ b/nczarr_test/test_unlim_io.c @@ -0,0 +1,129 @@ +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif +#include +#include +#include +#include "netcdf.h" +#include "ncpathmgr.h" +#include "nclist.h" + +#ifdef HAVE_HDF5_H +#include +#include +#endif + +#ifdef ENABLE_NCZARR +#include "zincludes.h" +#endif + +#include "test_utils.h" + +#define NDATA MAX_DATA +static int data[NDATA]; + +static unsigned chunkprod; +static unsigned dimprod; +static size_t datasize = 0; + +static int +writedata(void) +{ + int ret = NC_NOERR; + size_t i; + + for(i=0;idata == 0x7fffffff ? i: options->data); + + if(options->debug >= 1) { + fprintf(stderr,"write: dimlens=%s chunklens=%s\n", + printvector(options->rank,options->dimlens),printvector(options->rank,options->chunks)); + } + fprintf(stderr,"write vars: start=%s edges=%s stride=%s\n", + printvector(options->rank,options->start),printvector(options->rank,options->edges),printvector(options->rank,options->stride)); + if((ret = nc_put_vars(meta->ncid,meta->varid,options->start,options->edges,(ptrdiff_t*)options->stride,data))) + ERR(ret); + return 0; +} + +static int +readdata(void) +{ + int ret = NC_NOERR; + size_t i; + + memset(data,0,datasize); + + if(options->debug >= 1) + fprintf(stderr,"read: dimlens=%s chunklens=%s\n", + printvector(options->rank,options->dimlens),printvector(options->rank,options->chunks)); + fprintf(stderr,"read vars: start=%s edges=%s stride=%s", + printvector(options->rank,options->start), + printvector(options->rank,options->edges), + printvector(options->rank,options->stride)); + fprintf(stderr,"\n"); + if((ret = nc_get_vars(meta->ncid,meta->varid,options->start,options->edges,(ptrdiff_t*)options->stride,data))) + ERR(ret); + + for(i=0;irank, options->start, options->stop, options->stride, options->max); + if(odom == NULL) {ret = NC_ENOMEM; goto done;} + if(options->debug > 1) + fprintf(stderr,"genodom: odom = %s\n",odom_print(odom)); + /* Iterate the odometer */ + for(i=0;odom_more(odom);odom_next(odom),i++) { + printf("[%02d] %s\n",i,(i==0?odom_print(odom):odom_printshort(odom))); + } +done: + odom_free(odom); + return ret; +} +#endif + +int +main(int argc, char** argv) +{ + int stat = NC_NOERR; + int i; + + if((stat=getoptions(&argc,&argv))) goto done; + if((stat=verifyoptions(options))) goto done; + + if(meta->ncid == 0) { + fprintf(stderr,"File not found: %s\n",options->file); + ERR(NC_EACCESS); + } + + if(options->create == Create) { + if((stat = getmetadata(1))) ERR(stat); + if(meta->ncid && (stat = nc_close(meta->ncid))) ERR(stat); + } + if((stat = getmetadata(0))) + ERR(stat); + + dimprod = 1; + chunkprod = 1; + for(i=0;irank;i++) {dimprod *= options->dimlens[i]; chunkprod *= options->chunks[i];} + + datasize = dimprod*sizeof(int); + + switch (options->op) { + case Read: readdata(); break; + case Write: writedata(); break; + default: break; + } + if(meta->ncid) {if((stat = nc_close(meta->ncid))) ERR(stat);} + +done: + cleanup(); + return 0; +} diff --git a/nczarr_test/tst_utils.c b/nczarr_test/test_utils.c similarity index 60% rename from nczarr_test/tst_utils.c rename to nczarr_test/test_utils.c index e1987d760d..3afc074b0b 100644 --- a/nczarr_test/tst_utils.c +++ b/nczarr_test/test_utils.c @@ -23,13 +23,33 @@ #include #endif -#include "tst_utils.h" +#include "test_utils.h" Options* options = NULL; Metadata* meta = NULL; NClist* capture = NULL; +void +usage(int err) +{ + if(err) { + fprintf(stderr,"error: (%d) %s\n",err,nc_strerror(err)); + } + fprintf(stderr,"usage:"); + fprintf(stderr," -d [,*]"); + fprintf(stderr," -c [,*]"); + fprintf(stderr," -s [,*]"); + fprintf(stderr," -e [,*]"); + fprintf(stderr," -i [,*]"); + fprintf(stderr," -v [,*]"); + fprintf(stderr," -x c|r|w"); + fprintf(stderr," "); + fprintf(stderr,"\n"); + fflush(stderr); + exit(1); +} + static void CHECKRANK(int r) { @@ -56,11 +76,8 @@ getoptions(int* argcp, char*** argvp) /* Set defaults */ options->mode = 0; /* classic netcdf-3 */ - while ((c = getopt(*argcp, *argvp, "T:34c:d:e:f:n:m:p:s:D:O:X:")) != EOF) { + while ((c = getopt(*argcp, *argvp, "34c:d:e:hi:m:n:p:s:v:D:O:T:X:")) != EOF) { switch(c) { - case 'T': - nctracelevel(atoi(optarg)); - break; case '3': options->mode = 0; break; @@ -76,16 +93,13 @@ getoptions(int* argcp, char*** argvp) options->flags |= HAS_DIMLENS; break; case 'e': - CHECKRANK(parsevector(optarg,options->count)); - options->flags |= HAS_COUNT; - break; - case 'f': - CHECKRANK(parsevector(optarg,options->start)); - options->flags |= HAS_START; + CHECKRANK(parsevector(optarg,options->edges)); + options->flags |= HAS_EDGES; break; - case 'p': - CHECKRANK(parsevector(optarg,options->stop)); - options->flags |= HAS_STOP; + case 'h': usage(0); break; + case 'i': + CHECKRANK(parsevector(optarg,options->stride)); + options->flags |= HAS_STRIDE; break; case 'm': CHECKRANK(parsevector(optarg,options->max)); @@ -94,9 +108,20 @@ getoptions(int* argcp, char*** argvp) case 'n': CHECKRANK(atoi(optarg)); break; + case 'p': + CHECKRANK(parsevector(optarg,options->stop)); + options->flags |= HAS_STOP; + break; case 's': - CHECKRANK(parsevector(optarg,options->stride)); - options->flags |= HAS_STRIDE; + CHECKRANK(parsevector(optarg,options->start)); + options->flags |= HAS_START; + break; + case 'v': + if(strcmp(optarg,"n")==0) + options->data = 0x7fffffff; + else + options->data = atoi(optarg); + options->flags |= HAS_DATA; break; case 'D': options->debug = (unsigned)atoi(optarg); @@ -104,13 +129,18 @@ getoptions(int* argcp, char*** argvp) case 'O': for(p=optarg;*p;p++) { switch (*p) { + case 'c': options->create = Create; break; case 'r': options->op = Read; break; case 'w': options->op = Write; break; - case 'W': options->wholechunk = 1; break; + case 'x': options->op = Extend; break; case 'o': options->op = Odom; break; + case 'W': options->wholechunk = 1; break; default: fprintf(stderr,"Unknown operation '%c'\n",*p); exit(1); } } break; + case 'T': + nctracelevel(atoi(optarg)); + break; case 'X': if(strcmp(optarg,"opt")==0) { options->optimize = 1; @@ -174,29 +204,51 @@ getoptions(int* argcp, char*** argvp) #endif /* Default some vectors */ - if(!(options->flags & HAS_DIMLENS)) {for(i=0;idimlens[i] = 4;}} - if(!(options->flags & HAS_CHUNKS)) {for(i=0;ichunks[i] = 2;}} - if(!(options->flags & HAS_STRIDE)) {for(i=0;istride[i] = 1;}} + if(!(options->flags & HAS_STRIDE)) { + for(i=0;istride[i] = 1;} + options->flags |= HAS_STRIDE; + } /* Computed Defaults */ - if((options->flags & HAS_COUNT) && (options->flags & HAS_STOP)) { - fprintf(stderr,"cannot specify both count and stop\n"); + if((options->flags & HAS_EDGES) && (options->flags & HAS_STOP)) { + fprintf(stderr,"cannot specify both edges and stop\n"); ERR(NC_EINVAL); } - if(!(options->flags & HAS_COUNT) && !(options->flags & HAS_STOP)) { + if( !(options->flags & HAS_EDGES) + && (options->flags & HAS_DIMLENS) + && (options->flags & HAS_STRIDE)) { for(i=0;irank;i++) - options->count[i] = (options->dimlens[i]+options->stride[i]-1)/options->stride[i]; + options->edges[i] = (options->dimlens[i]+options->stride[i]-1)/options->stride[i]; + options->flags |= HAS_EDGES; } - if((options->flags & HAS_COUNT) && !(options->flags & HAS_STOP)) { + if( !(options->flags & HAS_STOP) + && (options->flags & HAS_START) + && (options->flags & HAS_EDGES) + && (options->flags & HAS_STRIDE)) { for(i=0;irank;i++) - options->stop[i] = (options->count[i] * options->stride[i]); + options->stop[i] = options->start[i] + (options->edges[i] * options->stride[i]); + options->flags |= HAS_STOP; } - if(!(options->flags & HAS_COUNT) && (options->flags & HAS_STOP)) { + if( !(options->flags & HAS_EDGES) + && (options->flags & HAS_STRIDE) + && (options->flags & HAS_STOP)) { for(i=0;irank;i++) - options->count[i] = ((options->stop[i]+options->stride[i]-1) / options->stride[i]); + options->edges[i] = ((options->stop[i]+(options->stride[i]-1)) / (options->stride[i])); + options->flags |= HAS_EDGES; } - if(!(options->flags & HAS_MAX)) {for(i=0;imax[i] = options->stop[i];}} + if( !(options->flags & HAS_MAX) + && (options->flags & HAS_STOP)) { + for(i=0;imax[i] = options->stop[i];} + options->flags |= HAS_MAX; + } + + if(options->create == Create) { + if((ret=getmetadata(1))) + ERR(ret); + if(meta->ncid && (ret = nc_close(meta->ncid))) + ERR(ret); + } if(options->debug) { #ifdef USE_HDF5 @@ -208,6 +260,42 @@ getoptions(int* argcp, char*** argvp) return ret; } +int +verifyoptions(Options* options) +{ + int ret = NC_NOERR; + /* Check that we have or can compute relevant values */ + if(options->create == Create) { + if(!(options->flags & HAS_DIMLENS)) { + fprintf(stderr,"dimlens option not computable\n"); + ERR(NC_EINVAL); + } + if(!(options->flags & HAS_CHUNKS)) { + fprintf(stderr,"chunklens option not computable\n"); + ERR(NC_EINVAL); + } + if(!(options->flags & HAS_STRIDE)) { + fprintf(stderr,"stride option not computable\n"); + ERR(NC_EINVAL); + } + } + if(options->op == Read || options->op == Write) { + if(!(options->flags & HAS_STOP)) { + fprintf(stderr,"stop option not computable\n"); + ERR(NC_EINVAL); + } + if(!(options->flags & HAS_EDGES)) { + fprintf(stderr,"edges option not computable\n"); + ERR(NC_EINVAL); + } + if(!(options->flags & HAS_MAX)) { + fprintf(stderr,"max option not computable\n"); + ERR(NC_EINVAL); + } + } + return ret; +} + int getmetadata(int create) { @@ -223,10 +311,19 @@ getmetadata(int create) } if(create) { - if((ret = nc_create(options->file,options->mode,&meta->ncid))) goto done; + if((options->flags & (HAS_DIMLENS | HAS_CHUNKS)) != (HAS_DIMLENS | HAS_CHUNKS)) { + fprintf(stderr,"dimlens or chunks not specified\n"); + ret = NC_EINVAL; + goto done; + } + if((ret = nc_create(options->file,options->mode|NC_CLOBBER,&meta->ncid))) goto done; for(i=0;irank;i++) { snprintf(dname,sizeof(dname),"d%d",i); - if((ret = nc_def_dim(meta->ncid,dname,options->dimlens[i],&meta->dimids[i]))) goto done; + if(options->dimlens[i] == 0) + ret = nc_def_dim(meta->ncid,dname,NC_UNLIMITED,&meta->dimids[i]); + else + ret = nc_def_dim(meta->ncid,dname,options->dimlens[i],&meta->dimids[i]); + if(ret) goto done; } if((ret = nc_def_var(meta->ncid,"v",NC_INT,options->rank,meta->dimids,&meta->varid))) goto done; if((ret = nc_def_var_fill(meta->ncid,meta->varid,0,&meta->fill))) goto done; @@ -235,22 +332,23 @@ getmetadata(int create) } if((ret = nc_enddef(meta->ncid))) goto done; } else {/*Open*/ - if((ret = nc_open(options->file,options->mode,&meta->ncid))) goto done; + if((ret = nc_open(options->file,options->mode|NC_WRITE,&meta->ncid))) goto done; for(i=0;irank;i++) { snprintf(dname,sizeof(dname),"d%d",i); if((ret = nc_inq_dimid(meta->ncid,dname,&meta->dimids[i]))) goto done; if((ret = nc_inq_dimlen(meta->ncid,meta->dimids[i],&options->dimlens[i]))) goto done; } + options->flags |= HAS_DIMLENS; if((ret = nc_inq_varid(meta->ncid,"v",&meta->varid))) goto done; if(options->formatx == NC_FORMATX_NC4 || options->formatx == NC_FORMATX_NCZARR) { int storage = -1; /* Get chunk sizes also */ if((ret = nc_inq_var_chunking(meta->ncid,meta->varid,&storage,options->chunks))) goto done; if(storage != NC_CHUNKED) {ret = NC_EBADCHUNK; goto done;} + options->flags |= HAS_CHUNKS; } - } - + done: return ret; } @@ -259,7 +357,7 @@ void cleanup(void) { if(meta) { - if(meta->ncid) nc_close(meta->ncid); + if(meta->ncid) {nc_close(meta->ncid); meta->ncid = 0;} } nclistfreeall(capture); nullfree(meta); @@ -281,7 +379,32 @@ parsevector(const char* s0, size_t* vec) p = strchr(q,','); if(p == NULL) {p = q+strlen(q); done=1;} *p++ = '\0'; - vec[i++] = (size_t)atol(q); + if(strcasecmp(q,"u")==0) + vec[i++] = 0; /* signals unlimited */ + else + vec[i++] = (size_t)atol(q); + } + if(s) free(s); + return i; +} + +int +parsedata(const char* s0, int* data) +{ + char* s = strdup(s0); + char* p = NULL; + int i, done; + + + if(s0 == NULL || data == NULL) abort(); + + for(done=0,p=s,i=0;!done;) { + char* q; + q = p; + p = strchr(q,','); + if(p == NULL) {p = q+strlen(q); done=1;} + *p++ = '\0'; + data[i++] = (size_t)atoi(q); } if(s) free(s); return i; @@ -319,7 +442,7 @@ printvector64(int rank, const size64_t* vec) Odometer* odom_new(size_t rank, const size_t* start, const size_t* stop, const size_t* stride, const size_t* max) { - int i; + size_t i; Odometer* odom = NULL; if((odom = calloc(1,sizeof(Odometer))) == NULL) return NULL; @@ -329,7 +452,7 @@ odom_new(size_t rank, const size_t* start, const size_t* stop, const size_t* str odom->stop[i] = stop[i]; odom->stride[i] = stride[i]; odom->max[i] = (max?max[i]:stop[i]); - odom->count[i] = (odom->stop[i]+odom->stride[i]-1)/odom->stride[i]; + odom->edges[i] = (odom->stop[i]+odom->stride[i]-1)/odom->stride[i]; odom->index[i] = 0; } return odom; @@ -371,7 +494,7 @@ size_t odom_offset(Odometer* odom) { size_t offset; - int i; + size_t i; offset = 0; for(i=0;irank;i++) { @@ -396,7 +519,7 @@ odom_print1(Odometer* odom, int isshort) strcat(s," stop=("); sv = printvector(odom->rank,odom->stop); strcat(s,sv); strcat(s,")"); strcat(s," stride=("); sv = printvector(odom->rank,odom->stride); strcat(s,sv); strcat(s,")"); strcat(s," max=("); sv = printvector(odom->rank,odom->max); strcat(s,sv); strcat(s,")"); - strcat(s," count=("); sv = printvector(odom->rank,odom->count); strcat(s,sv); strcat(s,")"); + strcat(s," edges=("); sv = printvector(odom->rank,odom->edges); strcat(s,sv); strcat(s,")"); } snprintf(tmp,sizeof(tmp)," offset=%u",(unsigned)odom_offset(odom)); strcat(s,tmp); strcat(s," indices=("); sv = printvector(odom->rank,odom->index); strcat(s,sv); strcat(s,")"); @@ -418,6 +541,60 @@ odom_printshort(Odometer* odom) static const char* urlexts[] = {"nzf", "zip", "nz4", NULL}; +nc_type +gettype(const char* name) +{ + if(strcasecmp(name,"byte")==0) return NC_BYTE; + if(strcasecmp(name,"ubyte")==0) return NC_UBYTE; + if(strcasecmp(name,"short")==0) return NC_SHORT; + if(strcasecmp(name,"ushort")==0) return NC_USHORT; + if(strcasecmp(name,"int")==0) return NC_INT; + if(strcasecmp(name,"uint")==0) return NC_UINT; + if(strcasecmp(name,"int64")==0) return NC_INT64; + if(strcasecmp(name,"uint64")==0) return NC_UINT64; + if(strcasecmp(name,"float")==0) return NC_FLOAT; + if(strcasecmp(name,"double")==0) return NC_DOUBLE; + return NC_NAT; +} + +size_t +gettypesize(nc_type t) +{ + switch (t) { + case NC_BYTE: return sizeof(char); + case NC_UBYTE: return sizeof(unsigned char); + case NC_SHORT: return sizeof(short); + case NC_USHORT: return sizeof(unsigned short); + case NC_INT: return sizeof(int); + case NC_UINT: return sizeof(unsigned int); + case NC_INT64: return sizeof(long long int); + case NC_UINT64: return sizeof(unsigned long long int); + case NC_FLOAT: return sizeof(float); + case NC_DOUBLE: return sizeof(double); + default: break; + } + return 0; +} + +const char* +gettypename(nc_type t) +{ + switch (t) { + case NC_BYTE: return "byte"; + case NC_UBYTE: return "ubyte"; + case NC_SHORT: return "short"; + case NC_USHORT: return "ushort"; + case NC_INT: return "int"; + case NC_UINT: return "uint"; + case NC_INT64: return "int64"; + case NC_UINT64: return "uint64"; + case NC_FLOAT: return "float"; + case NC_DOUBLE: return "double"; + default: break; + } + return NULL; +} + const char* filenamefor(const char* f0) { @@ -465,3 +642,11 @@ ncz_gets3testurl(void) } return s3testurlp; } + +void +ncz_report(int err, int lineno) +{ + fprintf(stderr,"Error: %d: %s\n", lineno, nc_strerror(err)); + exit(1); +} + diff --git a/nczarr_test/tst_utils.h b/nczarr_test/test_utils.h similarity index 78% rename from nczarr_test/tst_utils.h rename to nczarr_test/test_utils.h index 5b30085f04..1140cde22e 100644 --- a/nczarr_test/tst_utils.h +++ b/nczarr_test/test_utils.h @@ -3,9 +3,7 @@ #include "netcdf.h" -#define ERR(e) report(e,__LINE__) - -typedef enum Op {None, Read, Write, Wholechunk, Odom} Op; +typedef enum Op {None=0, Create=1, Read=2, Write=3, Wholechunk=4, Odom=5, Extend=6} Op; /* Bit mask of defined options; powers of 2*/ #define HAS_DIMLENS (1<<0) @@ -13,8 +11,11 @@ typedef enum Op {None, Read, Write, Wholechunk, Odom} Op; #define HAS_STRIDE (1<<2) #define HAS_START (1<<3) #define HAS_STOP (1<<4) -#define HAS_COUNT (1<<5) +#define HAS_EDGES (1<<5) #define HAS_MAX (1<<6) +#define HAS_DATA (1<<7) + +#define MAX_DATA 4096 /* Options */ @@ -23,6 +24,7 @@ typedef struct Options { unsigned wdebug; int optimize; int wholechunk; + Op create; Op op; int mode; int formatx; @@ -31,11 +33,12 @@ typedef struct Options { unsigned flags; size_t dimlens[NC_MAX_VAR_DIMS]; size_t chunks[NC_MAX_VAR_DIMS]; - size_t stride[NC_MAX_VAR_DIMS]; size_t start[NC_MAX_VAR_DIMS]; + size_t edges[NC_MAX_VAR_DIMS]; + size_t stride[NC_MAX_VAR_DIMS]; size_t stop[NC_MAX_VAR_DIMS]; - size_t count[NC_MAX_VAR_DIMS]; size_t max[NC_MAX_VAR_DIMS]; + int data; } Options; typedef struct Metadata { @@ -48,13 +51,15 @@ typedef struct Metadata { typedef struct Odometer { size_t rank; /*rank */ size_t start[NC_MAX_VAR_DIMS]; - size_t stop[NC_MAX_VAR_DIMS]; + size_t edges[NC_MAX_VAR_DIMS]; size_t stride[NC_MAX_VAR_DIMS]; + size_t stop[NC_MAX_VAR_DIMS]; size_t max[NC_MAX_VAR_DIMS]; /* max size of ith index */ - size_t count[NC_MAX_VAR_DIMS]; size_t index[NC_MAX_VAR_DIMS]; /* current value of the odometer*/ } Odometer; +extern void usage(int); + EXTERNL Odometer* odom_new(size_t rank, const size_t* start, const size_t* stop, const size_t* stride, const size_t* max); EXTERNL void odom_free(Odometer* odom); EXTERNL int odom_more(Odometer* odom); @@ -66,27 +71,29 @@ EXTERNL const char* odom_print(Odometer* odom); EXTERNL const char* odom_printshort(Odometer* odom); EXTERNL int parsevector(const char* s0, size_t* vec); +EXTERNL int parsedata(const char* s0, int* data); EXTERNL const char* filenamefor(const char* f0); EXTERNL const char* printvector(int rank, const size_t* vec); EXTERNL const char* printvector64(int rank, const size64_t* vec); EXTERNL int getoptions(int* argcp, char*** argvp); +EXTERNL int verifyoptions(Options*); EXTERNL int getmetadata(int create); EXTERNL void cleanup(void); +EXTERNL nc_type gettype(const char* name); +EXTERNL size_t gettypesize(nc_type t); +EXTERNL const char* gettypename(nc_type t); + EXTERNL int nc__testurl(const char*,char**); EXTERNL const char* ncz_gets3testurl(void); -static void -report(int err, int lineno) -{ - fprintf(stderr,"Error: %d: %s\n", lineno, nc_strerror(err)); - exit(1); -} - EXTERNL Options* options; EXTERNL Metadata* meta; -EXTERNL NClist* capture; + +EXTERNL void ncz_report(int err, int line); + +#define ERR(e) ncz_report(e,__LINE__) #endif /*TST_UTILS_H*/ diff --git a/nczarr_test/tst_zchunks.c b/nczarr_test/test_zchunks.c similarity index 100% rename from nczarr_test/tst_zchunks.c rename to nczarr_test/test_zchunks.c diff --git a/nczarr_test/tst_zchunks2.c b/nczarr_test/test_zchunks2.c similarity index 100% rename from nczarr_test/tst_zchunks2.c rename to nczarr_test/test_zchunks2.c diff --git a/nczarr_test/tst_zchunks3.c b/nczarr_test/test_zchunks3.c similarity index 100% rename from nczarr_test/tst_zchunks3.c rename to nczarr_test/test_zchunks3.c diff --git a/nczarr_test/testfilter_misc.c b/nczarr_test/testfilter_misc.c index 933a85586f..85fdf3df9d 100644 --- a/nczarr_test/testfilter_misc.c +++ b/nczarr_test/testfilter_misc.c @@ -16,6 +16,8 @@ #include "netcdf_aux.h" #include "netcdf_filter.h" +#include "h5misc.h" /* from plugins dir */ + #undef TESTODDSIZE #undef DEBUG @@ -31,8 +33,6 @@ #define MAXPARAMS 32 -#define NPARAMS 14 - static unsigned int baseline[NPARAMS]; static const char* testfile = NULL; @@ -41,8 +41,6 @@ static const char* testfile = NULL; #define DFALT_TESTFILE "tmp_misc.nc" -#define spec "32768, -17b, 23ub, -25S, 27US, 77, 93U, 789f, 12345678.12345678d, -9223372036854775807L, 18446744073709551615UL" - #ifdef TESTODDSIZE #define NDIMS 1 static size_t dimsize[NDIMS] = {4}; @@ -136,7 +134,7 @@ create(void) { int i; - /* Create a file with one big variable, but whose dimensions arte not a multiple of chunksize (to see what happens) */ + /* Create a file with one big variable whose dimensions may or may not be a multiple of chunksize (to see what happens) */ CHECK(nc_create(testfile, NC_NETCDF4|NC_CLOBBER, &ncid)); CHECK(nc_set_fill(ncid, NC_NOFILL, NULL)); for(i=0;i +#include +int main(int argc, char** argv) +{ + Aws::SDKOptions options; + + std::cout << "Running a pure-aws test instantiation to test the aws-cpp-sdk install.\nA failure may manifest as a hang.\n\n"; + + std::cout << "\t* Testing InitAPI()\n"; + options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Debug; + Aws::InitAPI(options); + std::cout << "\t\t* Passed.\n"; + + std::cout << "\t* Testing ShutdownAPI()\n"; + Aws::ShutdownAPI(options); + std::cout << "\t\t* Passed.\n\nFinished.\n\n"; + + return 0; +} \ No newline at end of file diff --git a/nczarr_test/ut_chunking.c b/nczarr_test/ut_chunking.c index 8d56567203..c0aa7140d0 100755 --- a/nczarr_test/ut_chunking.c +++ b/nczarr_test/ut_chunking.c @@ -35,7 +35,7 @@ main(int argc, char** argv) zutester.print = ut_chunk_print; zutest = &zutester; - if((stat = NCZ_projectslices(var->dimsizes, var->chunksizes, utoptions.slices, &common, &odom))) + if((stat = NCZ_projectslices(&common, utoptions.slices, &odom))) goto done; #if 0 diff --git a/nczarr_test/ut_includes.h b/nczarr_test/ut_includes.h index dccf5b863b..774591860e 100644 --- a/nczarr_test/ut_includes.h +++ b/nczarr_test/ut_includes.h @@ -24,6 +24,4 @@ #include "ut_test.h" #include "ut_util.h" -extern struct ZUTEST zutester; - #endif /*UT_INCLUDES_H*/ diff --git a/nczarr_test/ut_map.c b/nczarr_test/ut_map.c index c347ea26c5..005cd59c39 100644 --- a/nczarr_test/ut_map.c +++ b/nczarr_test/ut_map.c @@ -95,7 +95,7 @@ simplecreate(void) goto done; /* Write empty metadata content */ - if((stat = nczmap_write(map, path, 0, 0, (const void*)""))) + if((stat = nczmap_write(map, path, 0, (const void*)""))) goto done; done: @@ -133,7 +133,7 @@ writemeta(void) if((stat=nczm_concat(META1,ZARRAY,&path))) goto done; - if((stat = nczmap_write(map, path, 0, strlen(metadata1), metadata1))) + if((stat = nczmap_write(map, path, strlen(metadata1), metadata1))) goto done; free(path); path = NULL; @@ -156,7 +156,7 @@ writemeta2(void) if((stat=nczm_concat(META2,NCZARRAY,&path))) goto done; - if((stat = nczmap_write(map, path, 0, strlen(metadata2), metadata2))) + if((stat = nczmap_write(map, path, strlen(metadata2), metadata2))) goto done; done: @@ -242,7 +242,6 @@ writedata(void) int i; size64_t totallen; char* data1p = (char*)&data1[0]; /* byte level version of data1 */ - NCZM_FEATURES features; /* Create the data */ for(i=0;i totallen) - last = totallen; - count = last - start; - if((stat = nczmap_write(map, path, start, count, &data1p[start]))) - goto done; - } - } + if((stat = nczmap_write(map, path, totallen, data1p))) + goto done; done: /* Do not delete so we can look at it with ncdump */ diff --git a/nczarr_test/ut_mapapi.c b/nczarr_test/ut_mapapi.c index ffe0907bf2..4e01a80981 100644 --- a/nczarr_test/ut_mapapi.c +++ b/nczarr_test/ut_mapapi.c @@ -16,7 +16,6 @@ #define FAIL 0 #define XFAIL -1 - static const char* metadata1 = "{\n\"foo\": 42,\n\"bar\": \"apples\",\n\"baz\": [1, 2, 3, 4]}"; static const char* metaarray1 = "{\n\"shape\": [1,2,3],\n\"dtype\": \"<1\"}"; @@ -96,7 +95,7 @@ simplecreate(void) printf("Pass: create: create: %s\n",url); truekey = makekey(NCZMETAROOT); - if((stat = nczmap_write(map, truekey, 0, 0, NULL))) + if((stat = nczmap_write(map, truekey, 0, NULL))) goto done; printf("Pass: create: defineobj: %s\n",truekey); @@ -190,13 +189,13 @@ simplemeta(void) goto done; truekey = makekey(key); nullfree(key); key = NULL; - if((stat = nczmap_write(map, truekey, 0, 0, NULL))) + if((stat = nczmap_write(map, truekey, 0, NULL))) goto done; report(PASS,".zarray: def",map); free(truekey); truekey = NULL; truekey = makekey(NCZMETAROOT); - if((stat = nczmap_write(map, truekey, 0, strlen(metadata1), metadata1))) + if((stat = nczmap_write(map, truekey, strlen(metadata1), metadata1))) goto done; report(PASS,".nczarr: writemetadata",map); free(truekey); truekey = NULL; @@ -206,7 +205,7 @@ simplemeta(void) truekey = makekey(key); free(key); key = NULL; - if((stat = nczmap_write(map, truekey, 0, strlen(metaarray1), metaarray1))) + if((stat = nczmap_write(map, truekey, strlen(metaarray1), metaarray1))) goto done; report(PASS,".zarray: writemetaarray1",map); free(truekey); truekey = NULL; @@ -288,7 +287,6 @@ simpledata(void) int i; size64_t totallen, size; char* data1p = (char*)&data1[0]; /* byte level version of data1 */ - NCZM_FEATURES features; title(__func__); @@ -302,24 +300,9 @@ simpledata(void) truekey = makekey(DATA1); - features = nczmap_features(impl); - if((NCZM_ZEROSTART & features) || (NCZM_WRITEONCE & features)) { - if((stat = nczmap_write(map, truekey, 0, totallen, data1p))) - goto done; - } else { - /* Write in 3 slices */ - for(i=0;i<3;i++) { - size64_t start, count, third, last; - third = (totallen+2) / 3; /* round up */ - start = i * third; - last = start + third; - if(last > totallen) - last = totallen; - count = last - start; - if((stat = nczmap_write(map, truekey, start, count, &data1p[start]))) - goto done; - } - } + if((stat = nczmap_write(map, truekey, totallen, data1p))) + goto done; + report(PASS,DATA1": write",map); if((stat = nczmap_close(map,0))) diff --git a/nczarr_test/ut_projections.c b/nczarr_test/ut_projections.c index d3db18b5b9..8ee0dba780 100644 --- a/nczarr_test/ut_projections.c +++ b/nczarr_test/ut_projections.c @@ -38,7 +38,7 @@ main(int argc, char** argv) fillcommon(&common,var); /* Compute chunk ranges */ - if((stat = NCZ_compute_chunk_ranges(var->rank,utoptions.slices,var->chunksizes,ncrv))) + if((stat = NCZ_compute_chunk_ranges(&common,utoptions.slices,ncrv))) goto done; if((stat=NCZ_compute_all_slice_projections( diff --git a/nczarr_test/ut_test.c b/nczarr_test/ut_test.c index 8d82ccf72f..0a4cdefe6a 100755 --- a/nczarr_test/ut_test.c +++ b/nczarr_test/ut_test.c @@ -15,7 +15,9 @@ #include "XGetopt.h" #endif +#ifdef UTTESST struct ZUTEST zutester; +#endif struct UTOptions utoptions; diff --git a/nczarr_test/ut_test.h b/nczarr_test/ut_test.h index 53bbc92c20..63ecad3cba 100644 --- a/nczarr_test/ut_test.h +++ b/nczarr_test/ut_test.h @@ -11,6 +11,7 @@ typedef struct Dimdef { char* name; size64_t size; + int isunlimited; } Dimdef; typedef struct Vardef { diff --git a/nczarr_test/ut_util.c b/nczarr_test/ut_util.c index 52478efc22..e9bb88bb54 100644 --- a/nczarr_test/ut_util.c +++ b/nczarr_test/ut_util.c @@ -88,6 +88,7 @@ parsedimdef(const char* s0, Dimdef** defp) sscanf(s,"%u%n",&l,&nchars); if(nchars == -1) return NC_EINVAL; def->size = (size_t)l; + if(def->size == 0) def->isunlimited = 1; s += nchars; if(*s != '\0') return NC_EINVAL; if(defp) *defp = def; @@ -507,8 +508,8 @@ fillcommon(struct Common* common, Vardef* var) common->typesize = sizeof(int); if(var != NULL) { common->rank = var->rank; - common->dimlens = var->dimsizes; - common->chunklens = var->chunksizes; - common->memshape = common->dimlens; /* fake it */ + memcpy(common->dimlens,var->dimsizes,sizeof(size64_t)*common->rank); + memcpy(common->chunklens,var->chunksizes,sizeof(size64_t)*common->rank); + memcpy(common->memshape,common->dimlens,sizeof(size64_t)*common->rank); /* fake it */ } } diff --git a/nczarr_test/zmapio.c b/nczarr_test/zmapio.c index 0007b7f3d1..d261f8fcc9 100644 --- a/nczarr_test/zmapio.c +++ b/nczarr_test/zmapio.c @@ -332,13 +332,12 @@ objdump(void) for(i=0;i 0;depth++) { + for(depth=0;depth < nclistlength(stack);depth++) { size64_t len = 0; OBJKIND kind = 0; int hascontent = 0; nullfree(content); content = NULL; - nullfree(obj); obj = NULL; - obj = nclistremove(stack,0); /* zero pos is always top of stack */ + obj = nclistget(stack,depth); kind = keykind(obj); /* Now print info for this obj key */ switch (stat=nczmap_len(map,obj,&len)) { @@ -386,7 +385,6 @@ objdump(void) } } done: - nullfree(obj); nullfree(content); nczmap_close(map,0); nclistfreeall(stack); diff --git a/oc2/ocdebug.h b/oc2/ocdebug.h index cb9890e085..148ae2fbff 100644 --- a/oc2/ocdebug.h +++ b/oc2/ocdebug.h @@ -84,6 +84,7 @@ extern OCerror ocbreakpoint(OCerror err); extern OCerror occatch(OCerror err); extern CURLcode ocreportcurlerror(struct OCstate* state, CURLcode cstat); /* Place breakpoint on ocbreakpoint to catch errors close to where they occur*/ +/* Warning: do not evaluate e more than once */ #define OCCATCH(e) occatch(e) #define OCCATCHCHK(e) (void)occatch(e) #define OCGOTO(label) {ocbreakpoint(-1); goto label;} diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt index 65891d82ee..15de8cb89b 100644 --- a/plugins/CMakeLists.txt +++ b/plugins/CMakeLists.txt @@ -65,7 +65,7 @@ MACRO(buildplugin TARGET TARGETLIB) ENDIF() ENDMACRO() -buildplugin(h5misc "h5misc") +buildplugin(h5misc "h5misc" netcdf) buildplugin(h5noop "h5noop") buildplugin(h5noop1 "h5noop1") buildplugin(h5unknown "h5unknown") diff --git a/plugins/H5Zblosc.c b/plugins/H5Zblosc.c index 693b09f0ed..6c318d9707 100755 --- a/plugins/H5Zblosc.c +++ b/plugins/H5Zblosc.c @@ -295,7 +295,7 @@ size_t blosc_filter(unsigned flags, size_t cd_nelmts, bloscsize = blosc_compress(clevel, doshuffle, typesize, nbytes, *buf, outbuf, nbytes); #endif if(bloscsize == 0) { - fprintf(stderr,"Blosc_FIlter Error: blosc_filter: Buffer is uncompressible.\n"); + fprintf(stderr,"Blosc_Filter Error: blosc_filter: Buffer is uncompressible.\n"); goto failed; } else if(bloscsize < 0) { fprintf(stderr,"Blosc Filter Error: blosc_filter: blosc compression error\n"); @@ -353,6 +353,8 @@ size_t blosc_filter(unsigned flags, size_t cd_nelmts, failed: free(outbuf); + *buf = NULL; + *buf_size = 0; return 0; } /* End filter function */ diff --git a/plugins/H5Zmisc.c b/plugins/H5Zmisc.c index f652992cee..56e64734ff 100644 --- a/plugins/H5Zmisc.c +++ b/plugins/H5Zmisc.c @@ -4,9 +4,11 @@ #include #include #include -#include "netcdf_filter_build.h" + #include "h5misc.h" +#include "netcdf_aux.h" + /* WARNING: Starting with HDF5 version 1.10.x, the plugin code MUST be careful when using the standard *malloc()*, *realloc()*, and @@ -35,8 +37,8 @@ static size_t H5Z_filter_test(unsigned int flags, size_t cd_nelmts, const unsigned int cd_values[], size_t nbytes, size_t *buf_size, void **buf); -static int paramcheck(size_t nparams, const unsigned int* params); -static void mismatch(size_t i, const char* which); +static int paramcheck(size_t nparams, const unsigned int* params, struct All* extracted); +static void mismatch(const char* which); const H5Z_class2_t H5Z_TEST[1] = {{ H5Z_CLASS_T_VERS, /* H5Z_class_t version */ @@ -97,6 +99,7 @@ H5Z_filter_test(unsigned int flags, size_t cd_nelmts, void* newbuf; unsigned int testcase = 0; size_t size = 1024 * sizeof(float) * 2; + struct All values; if(cd_nelmts == 0) goto fail; @@ -104,8 +107,8 @@ H5Z_filter_test(unsigned int flags, size_t cd_nelmts, testcase = cd_values[0]; switch (testcase) { - case TC_ENDIAN: - if(!paramcheck(cd_nelmts,cd_values)) + case TC_PARAMS: + if(!paramcheck(cd_nelmts,cd_values,&values)) goto fail; break; case TC_ODDSIZE: @@ -142,14 +145,14 @@ fprintf(stderr,"TC_EXPANDED: decompress: nbytes=%u buf_size=%u xdata[0..8]=|",(u H5free_memory(*buf); *buf = newbuf; - } else { /* Compress */ + } else { /* (flags & H5Z_FLAG_REVERSE) Compress */ if(testcase == TC_EXPANDED) { int i; float* b; #if 0 fprintf(stderr,"TC_EXPANDED: compress: nbytes=%u buf_size=%u size=%u\n",(unsigned)nbytes,(unsigned)*buf_size,(unsigned)size); #endif - /* Replace buffer with one that is bigger than the chunk size */ + /* Replace buffer with one that is bigger than the input size */ newbuf = H5allocate_memory(size,0); if(newbuf == NULL) abort(); b = (float*)newbuf; @@ -176,103 +179,93 @@ fprintf(stderr,"TC_EXPANDED: compress: nbytes=%u buf_size=%u size=%u\n",(unsigne return 0; } +static void +extract1(void* field, size_t size, const unsigned int* params) +{ + union { + unsigned long long ll; + unsigned char char8[8]; + unsigned param[2]; + } u; + unsigned char b = 0; + unsigned short s = 0; + unsigned int i = 0; + unsigned char* bp = 0; + unsigned short* sp = NULL; + unsigned int* ip = NULL; + unsigned long long* llp = NULL; + memset(&u,0,sizeof(u)); + switch (size) { + case 1: + b = (unsigned char)(params[0]); + bp = (unsigned char*)field; + *bp = b; + break; + case 2: + s = (unsigned short)(params[0]); + sp = (unsigned short*)field; + *sp = s; + break; + case 4: + i = (unsigned)(params[0]); + ip = (unsigned*)field; + *ip = i; + break; + case 8: + u.param[0] = params[0]; + u.param[1] = params[1]; + ncaux_h5filterspec_fix8(u.char8,0); + llp = (unsigned long long*)field; + *llp = u.ll; + break; + default: fprintf(stderr,"insert: unexpected size: %u\n",(unsigned)size); abort(); + } +} + +static void +extractparams(size_t nparams, const unsigned int* params, struct All* all) +{ + size_t offset = 0; + extract1(&all->tbyte,sizeof(all->tbyte),¶ms[offset]); offset += 1; + extract1(&all->tubyte,sizeof(all->tubyte),¶ms[offset]); offset += 1; + extract1(&all->tshort,sizeof(all->tshort),¶ms[offset]); offset += 1; + extract1(&all->tushort,sizeof(all->tushort),¶ms[offset]); offset += 1; + extract1(&all->tint,sizeof(all->tint),¶ms[offset]); offset += 1; + extract1(&all->tuint,sizeof(all->tuint),¶ms[offset]); offset += 1; + extract1(&all->tfloat32,sizeof(all->tfloat32),¶ms[offset]); offset += 1; + extract1(&all->tint64,sizeof(all->tint64),¶ms[offset]); offset += 2*1; + extract1(&all->tuint64,sizeof(all->tuint64),¶ms[offset]); offset += 2*1; + extract1(&all->tfloat64,sizeof(all->tfloat64),¶ms[offset]); offset += 2*1; +} + +/* Verify values of the parameters */ static int -paramcheck(size_t nparams, const unsigned int* params) +paramcheck(size_t nparams, const unsigned int* params, struct All* extracted) { - size_t i; - unsigned char mem[8]; + struct All all; - if(nparams != 14) { - fprintf(stderr,"Too few parameters: need=14 sent=%ld\n",(unsigned long)nparams); + memset(&all,0,sizeof(all)); + + if(nparams != NPARAMS) { + fprintf(stderr,"Incorrect number of parameters: expected=%ld sent=%ld\n",(unsigned long)NPARAMS,(unsigned long)nparams); goto fail; } - for(i=0;i>>> The s3cleanup script requires the \"aws\" command (i.e. the AWS command line interface program)" + echo ">>>> Try installing \"awscli\" package with apt or equivalent." + exit 0 +fi + +# 2. Make sure S3TESTSUBTREE is defined +if test "x$S3TESTSUBTREE" = x ; then + echo ">>>> The s3cleanup script requires that S3TESTSUBTREE is defined." + exit 1; +fi + +test_cleanup() { +rm -f s3cleanup_${puid}.json +rm -f s3cleanup_${puid}.keys +rm -f ${abs_top_builddir}/s3cleanup_${puid}.uids +} +trap test_cleanup EXIT + +rm -f s3cleanup_${puid}.json s3cleanup_${puid}.keys + +# Get complete set of keys in ${S3TESTSUBTREE} prefix +unset ALLKEYS +if ! aws s3api list-objects-v2 --bucket ${S3TESTBUCKET} --prefix "${S3TESTSUBTREE}" | grep -F '"Key":' >s3cleanup_${puid}.keys ; then + echo "No keys found" + test_cleanup + exit 0 +fi + +if test "x$VERBOSE" = x1 ; then set +x; fi +while read -r line; do + KEY=`echo "$line" | sed -e 's|[^"]*"Key":[^"]*"\([^"]*\)".*|\1|'` + # Ignore keys that do not start with ${S3TESTSUBTREE} + PREFIX=`echo "$KEY" | sed -e 's|\([^/]*\)/.*|\1|'` + if test "x$PREFIX" = "x$S3TESTSUBTREE" ; then + ALLKEYS="$ALLKEYS $KEY" + fi +done < s3cleanup_${puid}.keys +if test "x$VERBOSE" = x1 ; then set -x; fi + +# get the uid's for all the subtrees to be deleted +UIDS=`cat ${abs_top_builddir}/s3cleanup_${puid}.uids | tr -d '\r' | tr '\n' ' '` +# Capture the keys matching any uid +unset MATCHKEYS +if test "x$VERBOSE" = x1 ; then set +x; fi +for key in $ALLKEYS ; do + for uid in $UIDS ; do + case "$key" in + "$S3TESTSUBTREE/testset_${uid}"*) + # capture the key' + MATCHKEYS="$MATCHKEYS $key" + ;; + *) if test "x$VERBOSE" = x1 ; then echo "Ignoring \"$key\""; fi ;; + esac + done +done +if test "x$VERBOSE" = x1 ; then set -x; fi + +# We can delete at most 1000 objects at a time, so divide into sets of size 500 +REM="$MATCHKEYS" +while test "x$REM" != x ; do + K500=`echo "$REM" | cut -d' ' -f 1-500` + REM=`echo "$REM" | cut -d' ' -f 501-` + unset DELLIST + MATCH=0 + FIRST=1 + DELLIST="{\"Objects\":[" + if test "x$VERBOSE" = x1 ; then set +x; fi + for key in $K500 ; do + if test $FIRST = 0 ; then DELLIST="${DELLIST},"; fi + DELLIST="${DELLIST} +{\"Key\":\"$key\"}" + FIRST=0 + MATCH=1 + done + if test "x$VERBOSE" = x1 ; then set -x; fi + DELLIST="${DELLIST}],\"Quiet\":false}" + if test "x$MATCH" = x1 ;then + rm -f s3cleanup_${puid}.json + echo "$DELLIST" > s3cleanup_${puid}.json + aws s3api delete-objects --bucket ${S3TESTBUCKET} --delete "file://s3cleanup_${puid}.json" + fi +done + +# Final cleanup +test_cleanup + diff --git a/s3gc.in b/s3gc.in new file mode 100755 index 0000000000..612e371d7a --- /dev/null +++ b/s3gc.in @@ -0,0 +1,120 @@ +#!/bin/bash + +# Uncomment to get verbose output +#VERBOSE=1 + +if test "x$VERBOSE" = x1 ; then set -x; fi + +# Constants passed in from configure.ac/CMakeLists +abs_top_srcdir='@abs_top_srcdir@' +abs_top_builddir='@abs_top_builddir@' + +# Additional configuration information +. ${abs_top_builddir}/test_common.sh + +delta="$1" + +# Sanity checks + +# 1. This requires that the AWS CLI (command line interface) is installed. +if ! which aws ; then + echo ">>>> The s3cleanup script requires the \"aws\" command (i.e. the AWS command line interface program)" + echo ">>>> Try installing \"awscli\" package with apt or equivalent." + exit 0 +fi + +# 2. Make sure S3TESTSUBTREE is defined +if test "x$S3TESTSUBTREE" = x ; then + echo ">>>> The s3cleanup script requires that S3TESTSUBTREE is defined." + exit 1 +fi + +# 3. Make sure delta is defined +if test "x$delta" = x ; then + echo ">>>> No delta argument provided" + echo ">>>> Usage: s3gc " + echo ">>>> where is number of days prior to today to begin cleanup" + exit 1 +fi + + +# This script takes a delta (in days) as an argument. +# It then removes from the Unidata S3 bucket those keys +# that are older than (current_date - delta). + +# Compute current_date - delta + +# current date +current=`date +%s` +# convert delta to seconds +deltasec=$((delta*24*60*60)) +# Compute cleanup point +lastdate=$((current-deltasec)) + +rm -f s3gc.json + +# Get complete set of keys in ${S3TESTSUBTREE} prefix +if ! aws s3api list-objects-v2 --bucket ${S3TESTBUCKET} --prefix "${S3TESTSUBTREE}" | grep -F '"Key":' >s3gc.keys ; then + echo "No keys found" + rm -f s3gc.json + exit 0 +fi +aws s3api list-objects-v2 --bucket ${S3TESTBUCKET} --prefix "${S3TESTSUBTREE}" | grep -F '"Key":' >s3gc.keys +while read -r line; do + KEY=`echo "$line" | sed -e 's|[^"]*"Key":[^"]*"\([^"]*\)".*|\1|'` + # Ignore keys that do not start with ${S3TESTSUBTREE} + PREFIX=`echo "$KEY" | sed -e 's|\([^/]*\)/.*|\1|'` + if test "x$PREFIX" = "x$S3TESTSUBTREE" ; then + ALLKEYS="$ALLKEYS $KEY" + fi +done < s3gc.keys + +# Look at each key and see if it is less than lastdate. +# If so, then record that key + +# Capture the keys with old uids to delete +unset MATCHKEYS +for key in $ALLKEYS ; do + case "$key" in + "$S3TESTSUBTREE/testset_"*) + # Capture the uid for this key + s3uid=`echo $key | sed -e "s|$S3TESTSUBTREE/testset_\([0-9][0-9]*\)/.*|\1|"` + # check that we got a uid + if test "x$s3uid" != x ; then + # Test age of the uid + if test $((s3uid < lastdate)) = 1; then + MATCHKEYS="${MATCHKEYS} $key" + fi + else + if test "x$VERBOSE" = x1 ; then echo "Excluding \"$key\""; fi + fi + ;; + *) if test "x$VERBOSE" = x1; then echo "Ignoring \"$key\""; fi ;; + esac +done + +# We can delete at most 1000 objects at a time, so divide into sets of size 500 +REM="$MATCHKEYS" +while test "x$REM" != x ; do + K500=`echo "$REM" | cut -d' ' -f 1-500` + REM=`echo "$REM" | cut -d' ' -f 501-` + unset DELLIST + MATCH=0 + FIRST=1 + DELLIST="{\"Objects\":[" + for key in $K500 ; do + if test $FIRST = 0 ; then DELLIST="${DELLIST},"; fi + DELLIST="${DELLIST} +{\"Key\":\"$key\"}" + FIRST=0 + MATCH=1 + done + DELLIST="${DELLIST}],\"Quiet\":false}" + rm -f s3gc.json + if test "x$MATCH" = x1 ;then + rm -f s3gc.json + echo "$DELLIST" > s3gc.json + aws s3api delete-objects --bucket ${S3TESTBUCKET} --delete "file://s3gc.json" + fi +done +rm -f s3gc.json diff --git a/test_common.in b/test_common.in index 3eee3bd7a5..8be771e19e 100644 --- a/test_common.in +++ b/test_common.in @@ -9,6 +9,8 @@ if test "x$TEST_COMMON_SH" = x ; then export TEST_COMMON_SH=1 +# Define various global constants + # Define location of execution TOPSRCDIR='@abs_top_srcdir@' TOPBUILDDIR='@abs_top_builddir@' @@ -38,13 +40,23 @@ FEATURE_S3_AWS=@HAS_S3_AWS@ FEATURE_S3_INTERNAL=@HAS_S3_INTERNAL@ FEATURE_S3=@HAS_S3@ FEATURE_NCZARR=@HAS_NCZARR@ -FEATURE_S3TESTS=@DO_S3_TESTING@ +FEATURE_S3TESTS=@ENABLE_S3_TESTING@ FEATURE_NCZARR_ZIP=@DO_NCZARR_ZIP_TESTS@ FEATURE_LARGE_TESTS=@DO_LARGE_TESTS@ # Thredds-test server is currently disabled #FEATURE_THREDDSTEST=1 +# This is the Unidata S3 test bucket +# All S3 tests should use this to store intermediate results. +S3TESTBUCKET=@S3TESTBUCKET@ + +# This is the s3 path within the Unidata bucket; +# All S3 tests should use this to store intermediate results. +S3TESTSUBTREE=@S3TESTSUBTREE@ + +TESTUID=@TESTUID@ + set -e # Figure out various locations in the src/build tree. @@ -122,8 +134,10 @@ if test "x$srcdir" = x ; then # we need to figure out our directory # pick off the last component as the relative name of this directory srcdir=`pwd` - current=`basename $srcdir` - srcdir="${top_srcdir}/$current" + if test "x$srcdir" != "x$top_srcdir" ; then + current=`basename $srcdir` + srcdir="${top_srcdir}/$current" + fi fi # We also assume we are executing in builddir @@ -190,23 +204,26 @@ if test yes = `${execdir}/../ncdump/ncfilteravail $1` ; then return 0 ; else ech # Make sure we are in builddir (not execdir) cd $builddir -# As a protection against parallel make inter-test dependencies and race conditions, -# Support the creation of an isolation directory in which created products are stored. + +# Parallel make can cause inter-test interference (mostly because of historical naming issues). +# As a protection against this, the isolate() function supports the creation of an +# isolation directory in which created products are stored. +# Cleanup can be accomplished by deleting the whole isolation directory. +# The name of the isolation directory is by convention "testdir_". +# The isolation dir is created within the ${builddir} directory. +# The is a generated 32 bit unsigned random integer to make the chance +# of collision very small. +# Process ID was not used because of the small but real chance of collision. isolate() { -local rnd -if test "x$ISOPATH" = x ; then - ISOPATH=${builddir} - ISODIR="$1" - if test "x$ISODIR" != x ; then - # Make sure the path is unique - rnd=`${execdir}/../libdispatch/ncrandom` - ISODIR="${ISODIR}_$rnd" - ISOPATH="${ISOPATH}/$ISODIR" - rm -fr $ISOPATH - mkdir $ISOPATH + if test "x$ISOPATH" = x ; then + ISOTESTSET="${builddir}/testset_" + if test "x$NOISOPATH" = x ; then ISOTESTSET="${ISOTESTSET}${TESTUID}"; fi + ISODIR="$1" + ISOPATH="${ISOTESTSET}/$ISODIR" + rm -fr $ISOPATH + mkdir -p $ISOPATH fi -fi } fi #TEST_COMMON_SH diff --git a/unit_test/CMakeLists.txt b/unit_test/CMakeLists.txt index e9cd48e082..4110f91495 100644 --- a/unit_test/CMakeLists.txt +++ b/unit_test/CMakeLists.txt @@ -32,11 +32,11 @@ ENDIF(ENABLE_HDF5) add_bin_test(unit_test test_pathcvt) IF(BUILD_UTILITIES) -IF(ENABLE_S3 AND WITH_S3_TESTING) -# SDK Test -build_bin_test(test_s3sdk ${XGETOPTSRC}) -add_sh_test(unit_test run_s3sdk) -ENDIF() + IF(ENABLE_S3 AND WITH_S3_TESTING) + # SDK Test + build_bin_test(test_s3sdk ${XGETOPTSRC}) + add_sh_test(unit_test run_s3sdk) + ENDIF() ENDIF() # Performance tests diff --git a/unit_test/run_s3sdk.sh b/unit_test/run_s3sdk.sh index a6f37ec85c..4589bcb5f6 100755 --- a/unit_test/run_s3sdk.sh +++ b/unit_test/run_s3sdk.sh @@ -7,13 +7,13 @@ set -e #CMD="valgrind --leak-check=full" -URL="https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data" +URL="https://s3.us-east-1.amazonaws.com/${S3TESTBUCKET}" isolate "testdir_uts3sdk" # Create an isolation path for S3; build on the isolation directory S3ISODIR="$ISODIR" -S3ISOPATH="/netcdf-c" +S3ISOPATH="/${S3TESTSUBTREE}" S3ISOPATH="${S3ISOPATH}/$S3ISODIR" test_cleanup() { @@ -26,17 +26,42 @@ fi THISDIR=`pwd` cd $ISOPATH +echo -e "Running S3 AWSSDK Unit Tests." +echo -e "\to Checking ${URL} exists" ${CMD} ${execdir}/test_s3sdk -u "${URL}" exists + +echo -e "\to Checking write to ${URL}" ${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}/test_s3sdk.txt" write +echo "Status: $?" + +echo -e "\to Checking read from ${URL}" ${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}/test_s3sdk.txt" read +echo "Status: $?" + +echo -e "\to Checking size of ${URL}/test_s3sdk.txt" ${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}/test_s3sdk.txt" size +echo "Status: $?" + +echo -e "\to Checking list command for ${URL}" ${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}" list +echo "Status: $?" + +echo -e "\to Checking search command for ${URL}" ${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}" search +echo "Status: $?" + +echo -e "\to Checking delete command for ${URL}/test_s3sdk.txt" ${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}/test_s3sdk.txt" delete +echo "Status: $?" + if test "x$FEATURE_LARGE_TESTS" = xyes ; then -${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}" longlist + echo -e "\to Checking longlist command for ${URL}" + ${CMD} ${execdir}/test_s3sdk -u "${URL}" -k "${S3ISOPATH}" longlist + echo "Status: $?" fi +echo -e "Finished" + exit if test "x$GITHUB_ACTIONS" = xtrue; then # Cleanup on exit diff --git a/unit_test/test_s3sdk.c b/unit_test/test_s3sdk.c index 4237396187..93557a18f8 100644 --- a/unit_test/test_s3sdk.c +++ b/unit_test/test_s3sdk.c @@ -22,7 +22,7 @@ #endif #undef DEBUG - +//#define DEBUG 1 #define SELF_CLEAN /* Mnemonic(s) */ @@ -43,7 +43,7 @@ struct Options { /* Upload data */ static const char* uploaddata = "line1\nline2\nline3"; -//static const char* testurl = "https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data"; +//static const char* testurl = "https://s3.us-east-1.amazonaws.com/${S3TESTBUCKET}"; /* Global values */ NCURI* purl = NULL; @@ -66,10 +66,7 @@ check(int code, const char* fcn, int line) { if(code == NC_NOERR) return code; fprintf(stderr,"***FAIL: (%d) %s @ %s:%d\n",code,nc_strerror(code),fcn,line); -#ifdef DEBUG abort(); -#endif - exit(1); } static enum Actions @@ -110,7 +107,7 @@ profilesetup(const char* url) fprintf(stderr,"URI parse fail: %s\n",url); goto done; } - CHECK(NC_s3urlprocess(purl, &s3info)); + CHECK(NC_s3urlprocess(purl, &s3info, NULL)); CHECK(NC_getactives3profile(purl, &activeprofile)); CHECK(NC_s3profilelookup(activeprofile, "aws_access_key_id", &accessid)); @@ -135,7 +132,7 @@ testbucketexists(void) int stat = NC_NOERR; int exists = 0; - seturl("https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data",NULL,!FORCE); + seturl("https://s3.us-east-1.amazonaws.com/${S3TESTBUCKET}",NULL,!FORCE); CHECK(profilesetup(dumpoptions.url)); newurl = ncuribuild(purl,NULL,NULL,NCURIALL); @@ -159,7 +156,7 @@ testinfo(void) int stat = NC_NOERR; unsigned long long size = 0; - seturl("https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data","/object_store/dir1/nested1/file1.txt",!FORCE); + seturl("https://s3.us-east-1.amazonaws.com/${S3TESTBUCKET}","/object_store/dir1/nested1/file1.txt",!FORCE); CHECK(profilesetup(dumpoptions.url)); newurl = ncuribuild(purl,NULL,NULL,NCURIALL); @@ -183,7 +180,7 @@ testread(void) unsigned long long size = 0; void* content = NULL; - seturl("https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data", "/netcdf-c/test_s3.txt",!FORCE); + seturl("https://s3.us-east-1.amazonaws.com/${S3TESTBUCKET}", "/netcdf-c/test_s3.txt",!FORCE); CHECK(profilesetup(dumpoptions.url)); newurl = ncuribuild(purl,NULL,NULL,NCURIALL); @@ -212,7 +209,7 @@ testwrite(void) size64_t size = 0; void* content = NULL; - seturl("https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data", "/netcdf-c/test_s3.txt",!FORCE); + seturl("https://s3.us-east-1.amazonaws.com/${S3TESTBUCKET}", "/netcdf-c/test_s3.txt",!FORCE); CHECK(profilesetup(dumpoptions.url)); newurl = ncuribuild(purl,NULL,NULL,NCURIALL); @@ -245,7 +242,7 @@ testgetkeys(void) size_t i,nkeys = 0; char** keys = NULL; - seturl("https://s3.us-east-1.amazonaws.com/unidata-zarr-test-data", "/object_store/dir1",!FORCE); + seturl("https://s3.us-east-1.amazonaws.com/${S3TESTBUCKET}", "/object_store/dir1",!FORCE); CHECK(profilesetup(dumpoptions.url)); newurl = ncuribuild(purl,NULL,NULL,NCURIALL); @@ -261,9 +258,9 @@ testgetkeys(void) printf("\n"); done: - cleanup(); for(i=0;i