Merge branch 'main' of https://github.com/Unidata/netcdf-c into silence-libdap2-warnings

This commit is contained in:
Ward Fisher 2024-07-15 16:25:03 -06:00
commit 71609aa2cc
152 changed files with 4822 additions and 4613 deletions

View File

@ -2,6 +2,9 @@ name: NetCDF-C CMake CI - Windows
on: [pull_request, workflow_dispatch]
env:
REMOTETESTDOWN: ${{ vars.REMOTETESTDOWN }}
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
@ -160,4 +163,4 @@ jobs:
run: |
cd build
PATH=~/tmp/bin:$PATH ctest . --rerun-failed --output-on-failure -VV
if: ${{ failure() }}
if: ${{ failure() }}

View File

@ -4,8 +4,11 @@
name: Run CDash Ubuntu/Linux netCDF Tests
on: workflow_dispatch
on: [workflow_dispatch]
env:
REMOTETESTDOWN: ${{ vars.REMOTETESTDOWN }}
concurrency:
group: ${{ github.workflow}}-${{ github.head_ref }}
cancel-in-progress: true

View File

@ -300,7 +300,7 @@ jobs:
- name: Configure
shell: bash -l {0}
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --enable-dap --disable-dap-remote-tests --disable-xml2
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --enable-dap --disable-dap-remote-tests --disable-libxml2
if: ${{ success() }}
- name: Look at config.log if error

View File

@ -4,7 +4,10 @@
name: Run Ubuntu/Linux netCDF Tests
on: [ pull_request, workflow_dispatch]
on: [pull_request,workflow_dispatch]
env:
REMOTETESTDOWN: ${{ vars.REMOTETESTDOWN }}
concurrency:
group: ${{ github.workflow}}-${{ github.head_ref }}
@ -171,7 +174,7 @@ jobs:
- name: Configure
shell: bash -l {0}
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --enable-dap --disable-dap-remote-tests --enable-doxygen --enable-external-server-tests --disable-xml2
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --enable-dap --disable-dap-remote-tests --enable-doxygen --enable-external-server-tests --disable-libxml2
if: ${{ success() }}
- name: Look at config.log if error
@ -266,7 +269,7 @@ jobs:
- name: Configure
shell: bash -l {0}
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --enable-dap --disable-dap-remote-tests --enable-doxygen --enable-external-server-tests --disable-xml2 --disable-shared --enable-static
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --enable-dap --disable-dap-remote-tests --enable-doxygen --enable-external-server-tests --disable-libxml2 --disable-shared --enable-static
if: ${{ success() }}
- name: Look at config.log if error

View File

@ -10,13 +10,14 @@ env:
SHELLOPTS: igncr
CHERE_INVOKING: 1
CYGWIN_NOWINPATH: 1
REMOTETESTDOWN: ${{ vars.REMOTETESTDOWN }}
jobs:
build-and-test-autotools:
runs-on: windows-latest
defaults:
run:
shell: bash -eo pipefail -o igncr "{0}"
shell: C:/cygwin/bin/bash.exe -eo pipefail -o igncr "{0}"
name: Cygwin-based Autotools tests
@ -26,18 +27,20 @@ jobs:
steps:
- name: Fix line endings
shell: pwsh
run: git config --global core.autocrlf input
- uses: actions/checkout@v4
- uses: cygwin/cygwin-install-action@v2
- uses: cygwin/cygwin-install-action@v4
with:
platform: x86_64
install-dir: 'C:\cygwin'
packages: >-
git automake libtool autoconf2.5 make libhdf5-devel
libhdf4-devel zipinfo libxml2-devel perl zlib-devel
libzstd-devel libbz2-devel libaec-devel libzip-devel
libdeflate-devel gcc-core libcurl-devel libiconv-devel
libdeflate-devel gcc-core gcc-g++ libcurl-devel libiconv-devel
libssl-devel libcrypt-devel
- name: (Autotools) Run autoconf and friends
@ -79,3 +82,72 @@ jobs:
timeout-minutes: 30
run: |
make check -j8 SHELL=/bin/dash
build-and-test-cmake:
name: Cygwin-based CMake tests
runs-on: windows-latest
defaults:
run:
shell: C:/cygwin/bin/bash.exe -eo pipefail -o igncr "{0}"
steps:
- run: git config --global core.autocrlf input
shell: pwsh
- uses: actions/checkout@v4
- uses: cygwin/cygwin-install-action@v4
with:
platform: x86_64
install-dir: 'C:\cygwin'
packages: >-
git automake libtool autoconf2.5 make libhdf5-devel
libhdf4-devel zipinfo libxml2-devel perl zlib-devel
libzstd-devel libbz2-devel libaec-devel libzip-devel
libdeflate-devel gcc-core gcc-g++ libcurl-devel libiconv-devel
libssl-devel libcrypt-devel cmake ninja make m4 diffutils unzip
###
# Configure and build
###
- name: (CMake) Configure Build
env:
MAKE: "/usr/bin/make"
CXX: "/usr/bin/g++"
run: |
/usr/bin/cmake \
-G"Unix Makefiles" \
-B build \
-S . \
-DCMAKE_INSTALL_PREFIX=/tmp \
-DBUILD_SHARED_LIBS=ON \
-DNETCDF_ENABLE_NETCDF_4=ON \
-DNETCDF_BUILD_UTILITIES=ON \
-DNETCDF_ENABLE_TESTS=ON \
-DNETCDF_ENABLE_HDF5=ON \
-DNETCDF_ENABLE_NCZARR=TRUE \
-DNETCDF_ENABLE_PLUGINS=ON
if: ${{ success() }}
- name: (CMake) Look at CMakeCache.txt if error
run: cat build/CMakeCache.txt
if: ${{ failure() }}
- name: (CMake) Print Summary
run: cat build/libnetcdf.settings
- name: (CMake) Build All
env:
MAKE: "/usr/bin/make"
CXX: "/usr/bin/g++"
run: cmake --build build -j$(nproc)
if: ${{ success() }}
- name: (CMake) Run Tests
run: PATH=$PWD/build:$PATH ctest --test-dir build
if: ${{ success() }}
- name: (CMake) Verbose output of CTest failures
run: >-
PATH=$PWD/build:$PATH ctest --test-dir build --output-on-failure -j$(nproc) --rerun-failed -VV
if: ${{ failure() }}

View File

@ -6,6 +6,10 @@
name: Run MSYS2, MinGW64-based Tests (Not Visual Studio)
env:
CPPFLAGS: "-D_BSD_SOURCE"
REMOTETESTDOWN: ${{ vars.REMOTETESTDOWN }}
on: [pull_request,workflow_dispatch]
concurrency:

View File

@ -17,9 +17,7 @@ project(netCDF
)
#Add custom CMake Module
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules/;${PROJECT_SOURCE_DIR}/cmake"
CACHE INTERNAL "Location of our custom CMake modules.")
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules" "${PROJECT_SOURCE_DIR}/cmake")
set(PACKAGE "netCDF" CACHE STRING "")
@ -85,6 +83,7 @@ if(UNAME)
getuname(osname -s)
getuname(osrel -r)
getuname(cpu -m)
getuname(host -n)
set(TMP_BUILDNAME "${osname}-${osrel}-${cpu}")
endif()
@ -225,6 +224,12 @@ include(GenerateExportHeader)
# Compiler and Linker Configuration
################################
# Set in support of https://github.com/Unidata/netcdf-c/issues/2700
if(${CMAKE_C_COMPILER_ID} MATCHES "Intel")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fhonor-infinities")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fhonor-infinities")
endif()
option(NETCDF_FIND_SHARED_LIBS "Find dynamically-built versions of dependent libraries" ${BUILD_SHARED_LIBS})
##
@ -431,41 +436,82 @@ endif()
# Format Option checks
################################
# We need to now treat enable-netcdf4 and enable-hdf5 as separate,
# but for back compatability, we need to treat enable-netcdf4
# as equivalent to enable-hdf5.
# We detect this using these rules:
# 1. if NETCDF_ENABLE_HDF5 is off then disable hdf5
# 2. if NETCDF_ENABLE_NETCDF_4 is off then disable hdf5
# 3. else enable hdf5
option(NETCDF_ENABLE_NETCDF_4 "Use HDF5." ON)
option(NETCDF_ENABLE_HDF5 "Use HDF5." ON)
if(NOT NETCDF_ENABLE_HDF5 OR NOT NETCDF_ENABLE_NETCDF_4)
set(NETCDF_ENABLE_HDF5 OFF CACHE BOOL "Use HDF5" FORCE)
# As a long term goal, and because it is now the case that
# -DNETCDF_ENABLE_NCZARR => USE_NETCDF4, so make the external options
# -DNETCDF_ENABLE-NETCDF-4 and _DNETCDF_ENABLE-NETCDF4 obsolete
# in favor of --NETCDF_ENABLE-HDF5.
# We will do the following for one more release cycle.
# 1. Make --NETCDF_ENABLE-NETCDF-4 be an alias for --NETCDF_ENABLE-NETCDF4.
# 2. Make --NETCDF_ENABLE-NETCDF4 an alias for --NETCDF_ENABLE-HDF5.
# 3. Internally, convert most (but not all) uses of USE_NETCDF_4 and USE_NETCDF4 to USE_HDF5.
# Collect the values of -DNETCDF_ENABLE-NETCDF-4, -DNETCDF_ENABLE-NETCDF4, and -DNETCDF_ENABLE-HDF5.
# Figure out which options are defined and process options
if(DEFINED NETCDF_ENABLE_NETCDF_4)
set(UNDEF_NETCDF_4 OFF CACHE BOOL "")
option(NETCDF_ENABLE_NETCDF_4 "" ON)
else()
set(UNDEF_NETCDF_4 ON CACHE BOOL "")
endif()
if(DEFINED NETCDF_ENABLE_NETCDF4)
set(UNDEF_NETCDF4 OFF CACHE BOOL "")
option(NETCDF_ENABLE_NETCDF4 "" ON)
else()
set(UNDEF_NETCDF4 ON CACHE BOOL "")
endif()
if(DEFINED NETCDF_ENABLE_HDF5)
set(UNDEF_HDF5 OFF CACHE BOOL "")
option(NETCDF_ENABLE_HDF5 "" ON)
else()
set(UNDEF_HDF5 ON CACHE BOOL "")
endif()
option(NETCDF_ENABLE_HDF4 "Build netCDF-4 with HDF4 read capability(HDF4, HDF5 and Zlib required)." OFF)
if(NOT UNDEF_NETCDF_4)
message(WARNING "NETCDF_ENABLE_NETCDF_4 is deprecated; please use NETCDF_ENABLE_HDF5")
endif()
if(NOT UNDEF_NETCDF4)
message(WARNING "NETCDF_ENABLE_NETCDF4 is deprecated; please use NETCDF_ENABLE_HDF5")
endif()
# NETCDF_ENABLE_NETCDF_4 overrides NETCDF_ENABLE_NETCDF4 if latter not defined.
if((NOT "${UNDEF_NETCDF_4}") AND UNDEF_NETCDF4)
set(NETCDF_ENABLE_NETCDF4 ${NETCDF_ENABLE_NETCDF_4} CACHE BOOL "" FORCE)
endif()
# NETCDF_ENABLE_NETCDF4 overrides NETCDF_ENABLE_HDF5 if latter not defined.
if((NOT "${UNDEF_NETCDF4}") AND UNDEF_HDF5)
set(NETCDF_ENABLE_HDF5 "${NETCDF_ENABLE_HDF5}" CACHE BOOL "" FORCE)
endif()
# Otherwise, use NETCDF_ENABLE_HDF5 default
if(UNDEF_HDF5)
set(NETCDF_ENABLE_HDF5 ON CACHE BOOL "" FORCE)
endif()
# Turn off enable_netcdf4 because it will be used
# as a shorthand for ENABLE_HDF5|ENABLE_HDF4|ENABLE_NCZARR
set(NETCDF_ENABLE_NETCDF4 OFF CACHE BOOL "" FORCE)
option(NETCDF_ENABLE_DAP "Enable DAP2 and DAP4 Client." ON)
option(NETCDF_ENABLE_NCZARR "Enable NCZarr Client." ON)
option(NETCDF_ENABLE_PNETCDF "Build with parallel I/O for CDF-1, 2, and 5 files using PnetCDF." OFF)
set(NETCDF_ENABLE_CDF5 AUTO CACHE STRING "AUTO")
option(NETCDF_ENABLE_CDF5 "Enable CDF5 support" ON)
if(DEFINED NETCDF_ENABLE_NETCDF4)
message(FATAL_ERROR "NETCDF_ENABLE_NETCDF4 is deprecated. Please use NETCDF_ENABLE_NETCDF_4 instead.")
option(NETCDF_ENABLE_HDF4 "Enable HDF4 Read Support" OFF)
option(NETCDF_ENABLE_HDF4_FILE_TESTS "Enable HDF4 File Tests" ${NETCDF_ENABLE_HDF4})
if(NETCDF_ENABLE_HDF4)
set(USE_HDF4 ON)
endif()
# Netcdf-4 support (i.e. libsrc4) is required by more than just HDF5 (e.g. NCZarr)
# So depending on what above formats are enabled, enable netcdf-4
if(NETCDF_ENABLE_HDF5 OR NETCDF_ENABLE_HDF4 OR NETCDF_ENABLE_NCZARR)
set(NETCDF_ENABLE_NETCDF_4 ON CACHE BOOL "Enable netCDF-4 API" FORCE)
set(NETCDF_ENABLE_NETCDF4 ON CACHE BOOL "Enable netCDF-4 API" FORCE)
endif()
# enable|disable all forms of network access
option(NETCDF_ENABLE_REMOTE_FUNCTIONALITY "Enable|disable all forms remote data access (DAP, S3, etc)" ON)
message(">>> NETCDF_ENABLE_REMOTE_FUNCTIONALITY=${NETCDF_ENABLE_REMOTE_FUNCTIONALITY}")
if(NOT NETCDF_ENABLE_REMOTE_FUNCTIONALITY)
if(NOT NETCDF_ENABLE_REMOTE_FUNCTIONALITY AND NETCDF_ENABLE_DAP)
message(WARNING "NETCDF_ENABLE_REMOTE_FUNCTIONALITY=NO => NETCDF_ENABLE_DAP[4]=NO")
set(NETCDF_ENABLE_DAP OFF CACHE BOOL "NETCDF_ENABLE_REMOTE_FUNCTIONALITY=NO => NETCDF_ENABLE_DAP=NO" FORCE)
set(NETCDF_ENABLE_DAP2 OFF CACHE BOOL "NETCDF_ENABLE_REMOTE_FUNCTIONALITY=NO => NETCDF_ENABLE_DAP2=NO" FORCE)
@ -484,9 +530,9 @@ endif()
# Did the user specify a default minimum blocksize for posixio?
set(NCIO_MINBLOCKSIZE 256 CACHE STRING "Minimum I/O Blocksize for netCDF classic and 64-bit offset format files.")
if(NETCDF_ENABLE_NETCDF_4)
if(NETCDF_ENABLE_NETCDF4)
set(USE_NETCDF4 ON CACHE BOOL "")
set(NETCDF_ENABLE_NETCDF_4 ON CACHE BOOL "")
set(NETCDF_ENABLE_NETCDF4 ON CACHE BOOL "")
else()
set(USE_HDF4_FILE_TESTS OFF)
set(USE_HDF4 OFF)
@ -494,22 +540,15 @@ else()
set(NETCDF_ENABLE_HDF4 OFF)
endif()
# Option Logging, only valid for netcdf4.
# Option Logging, only valid for netcdf4 dispatchers.
option(NETCDF_ENABLE_LOGGING "Enable Logging." OFF)
if(NOT NETCDF_ENABLE_NETCDF_4)
set(NETCDF_ENABLE_LOGGING OFF)
endif()
if(NETCDF_ENABLE_LOGGING)
target_compile_definitions(netcdf PRIVATE LOGGING ENABLE_SET_LOG_LEVEL)
set(LOGGING ON)
set(ENABLE_SET_LOG_LEVEL ON)
endif()
option(NETCDF_ENABLE_SET_LOG_LEVEL_FUNC "Enable definition of nc_set_log_level()." ON)
if(NETCDF_ENABLE_NETCDF_4 AND NOT NETCDF_ENABLE_LOGGING AND NETCDF_ENABLE_SET_LOG_LEVEL_FUNC)
target_compile_definitions(netcdf PRIVATE -DENABLE_SET_LOG_LEVEL)
set(ENABLE_SET_LOG_LEVEL ON)
if(NOT NETCDF_ENABLE_NETCDF4)
set(NETCDF_ENABLE_LOGGING OFF)
endif()
set(LOGGING ${NETCDF_ENABLE_LOGGING})
set(NETCDF_ENABLE_SET_LOG_LEVEL ${NETCDF_ENABLE_LOGGING})
# Option to allow for strict null file padding.
# See https://github.com/Unidata/netcdf-c/issues/657 for more information
option(NETCDF_ENABLE_STRICT_NULL_BYTE_HEADER_PADDING "Enable strict null byte header padding." OFF)
@ -546,7 +585,7 @@ endif()
# Option to support byte-range reading of remote datasets
option(NETCDF_ENABLE_BYTERANGE "Enable byte-range access to remote datasets.." ${NETCDF_ENABLE_DAP})
if(NOT NETCDF_ENABLE_REMOTE_FUNCTIONALITY)
if(NOT NETCDF_ENABLE_REMOTE_FUNCTIONALITY AND NETCDF_ENABLE_BYTERANGE)
message(WARNING "NETCDF_ENABLE_REMOTE_FUNCTIONALITY=NO => NETCDF_ENABLE_BYTERANGE=NO")
set(NETCDF_ENABLE_BYTERANGE OFF CACHE BOOL "NETCDF_ENABLE_REMOTE_FUNCTIONALITY=NO => NETCDF_ENABLE_BYTERANGE=NO" FORCE)
endif()
@ -562,6 +601,12 @@ set(NETCDF_ENABLE_EXTERNAL_SERVER_TESTS OFF CACHE BOOL "" FORCE)
set(NETCDF_ENABLE_DAP_LONG_TESTS OFF CACHE BOOL "" FORCE)
endif()
# Provide a global control for remotetest.
if ("$ENV{REMOTETESTDOWN}" STREQUAL "yes" AND NETCDF_ENABLE_DAP_REMOTE_TESTS)
message(WARNING "ENV(REMOTETESTDOWN) => NETCDF_ENABLE_DAP_REMOTE_TESTS == OFF")
set(NETCDF_ENABLE_DAP_REMOTE_TESTS OFF CACHE BOOL "" FORCE)
endif()
set(REMOTETESTSERVERS "remotetest.unidata.ucar.edu" CACHE STRING "test servers to use for remote test")
set(REMOTETESTSERVERS "remotetest.unidata.ucar.edu" CACHE STRING "test servers to use for remote test")
@ -634,7 +679,7 @@ else() # No option specified
endif()
# Try to enable NCZarr zip support
option(NETCDF_ENABLE_NCZARR_ZIP "Enable NCZarr ZIP support." OFF)
option(NETCDF_ENABLE_NCZARR_ZIP "Enable NCZarr ZIP support." ${NETCDF_ENABLE_NCZARR})
include(CMakeDependentOption)
@ -760,13 +805,17 @@ if(NETCDF_ENABLE_TESTS)
set(NC_CTEST_DROP_SITE "cdash.unidata.ucar.edu:443" CACHE STRING "Dashboard location for CTest-based testing purposes.")
set(NC_CTEST_DROP_LOC_PREFIX "" CACHE STRING "Prefix for Dashboard location on remote server when using CTest-based testing.")
set(SUBMIT_URL "https://cdash.unidata.ucar.edu:443")
find_program(HOSTNAME_CMD NAMES hostname)
if(NOT WIN32)
set(HOSTNAME_ARG "-s")
endif()
if(HOSTNAME_CMD)
execute_process(COMMAND ${HOSTNAME_CMD} "${HOSTNAME_ARG}" OUTPUT_VARIABLE HOSTNAME OUTPUT_STRIP_TRAILING_WHITESPACE)
set(NC_CTEST_SITE "${HOSTNAME}" CACHE STRING "Hostname of test machine.")
if("${host}" STREQUAL "")
find_program(HOSTNAME_CMD NAMES hostname)
if(NOT WIN32)
set(HOSTNAME_ARG "-s")
endif()
if(HOSTNAME_CMD)
execute_process(COMMAND ${HOSTNAME_CMD} "${HOSTNAME_ARG}" OUTPUT_VARIABLE HOSTNAME OUTPUT_STRIP_TRAILING_WHITESPACE)
set(NC_CTEST_SITE "${HOSTNAME}" CACHE STRING "Hostname of test machine.")
endif()
else()
set(NC_CTEST_SITE "${host}" CACHE STRING "Hostname of test machine.")
endif()
if(NC_CTEST_SITE)
@ -835,12 +884,6 @@ if(NETCDF_ENABLE_FSYNC)
set(USE_FSYNC ON)
endif()
# Temporary
OPTION (ENABLE_JNA "Enable jna bug fix code." OFF)
if(ENABLE_JNA)
set(JNA ON)
endif()
# Linux specific large file support flags.
# Modelled after check in CMakeLists.txt for hdf5.
option(NETCDF_ENABLE_LARGE_FILE_SUPPORT "Enable large file support." ON)
@ -958,7 +1001,7 @@ endif()
option(NETCDF_ENABLE_PARALLEL_TESTS "Enable Parallel IO Tests. Requires HDF5/NetCDF4 with parallel I/O Support." "${HDF5_PARALLEL}")
if(NETCDF_ENABLE_PARALLEL_TESTS AND USE_PARALLEL)
set(TEST_PARALLEL ON CACHE BOOL "")
if(USE_NETCDF4)
if(USE_HDF5)
set(TEST_PARALLEL4 ON CACHE BOOL "")
endif()
endif()
@ -983,7 +1026,6 @@ if(NOT BUILD_SHARED_LIBS)
endif()
option(NETCDF_ENABLE_NCZARR_FILTERS "Enable NCZarr filters" ${NETCDF_ENABLE_PLUGINS})
option(NETCDF_ENABLE_NCZARR_FILTERS_TESTING "Enable NCZarr filter testing." ${NETCDF_ENABLE_NCZARR_FILTERS})
# Constraints
if (NOT NETCDF_ENABLE_PLUGINS AND NETCDF_ENABLE_NCZARR_FILTERS)
@ -991,17 +1033,11 @@ if (NOT NETCDF_ENABLE_PLUGINS AND NETCDF_ENABLE_NCZARR_FILTERS)
set(NETCDF_ENABLE_NCZARR_FILTERS OFF CACHE BOOL "Enable NCZarr Filters." FORCE)
endif()
IF (NOT NETCDF_ENABLE_NCZARR)
message(WARNING "NETCDF_ENABLE_NCZARR==NO => NETCDF_ENABLE_NCZARR_FILTERS==NO AND NETCDF_ENABLE_NCZARR_FILTER_TESTING==NO")
IF (NOT NETCDF_ENABLE_NCZARR AND NETCDF_ENABLE_NCZARR_FILTERS)
message(WARNING "NETCDF_ENABLE_NCZARR==NO => NETCDF_ENABLE_NCZARR_FILTERS==NO")
set(NETCDF_ENABLE_NCZARR_FILTERS OFF CACHE BOOL "Disable NCZARR_FILTERS" FORCE)
endif()
IF (NOT NETCDF_ENABLE_NCZARR_FILTERS)
set(NETCDF_ENABLE_NCZARR_FILTER_TESTING OFF CACHE BOOL "Enable NCZarr Filter Testing" FORCE)
endif()
set(ENABLE_CLIENTSIDE_FILTERS OFF)
# Determine whether or not to generate documentation.
option(NETCDF_ENABLE_DOXYGEN "Enable generation of doxygen-based documentation." OFF)
if(NETCDF_ENABLE_DOXYGEN)
@ -1248,6 +1284,7 @@ CHECK_FUNCTION_EXISTS(strlcat HAVE_STRLCAT)
CHECK_FUNCTION_EXISTS(strlcpy HAVE_STRLCPY)
CHECK_FUNCTION_EXISTS(strdup HAVE_STRDUP)
CHECK_FUNCTION_EXISTS(strndup HAVE_STRNDUP)
CHECK_FUNCTION_EXISTS(strlen HAVE_STRLEN)
CHECK_FUNCTION_EXISTS(strtoll HAVE_STRTOLL)
CHECK_FUNCTION_EXISTS(strcasecmp HAVE_STRCASECMP)
CHECK_FUNCTION_EXISTS(strtoull HAVE_STRTOULL)
@ -1391,7 +1428,6 @@ endif(USE_HDF5)
if(USE_HDF4)
add_subdirectory(libhdf4)
add_subdirectory(hdf4_test)
endif(USE_HDF4)
if(NETCDF_ENABLE_DAP2)
@ -1430,6 +1466,8 @@ if(NETCDF_ENABLE_NCZARR)
DESTINATION ${netCDF_BINARY_DIR}/nczarr_test/)
endif()
# Tests and files which depend on libnetcdf must be included
# *after* this line.
add_subdirectory(liblib)
if(NETCDF_ENABLE_PLUGINS)
@ -1450,6 +1488,9 @@ if(NETCDF_ENABLE_TESTS)
add_subdirectory(nctest)
endif()
add_subdirectory(nc_test)
if(USE_HDF4)
add_subdirectory(hdf4_test)
endif()
if(USE_HDF5)
include_directories(h5_test)
add_subdirectory(nc_test4)
@ -1471,6 +1512,7 @@ if(NETCDF_ENABLE_TESTS)
add_subdirectory(unit_test)
endif(NETCDF_ENABLE_UNIT_TESTS)
if(NETCDF_ENABLE_NCZARR)
include_directories(nczarr_test)
add_subdirectory(nczarr_test)
endif()
endif()
@ -1494,12 +1536,16 @@ add_subdirectory(docs)
# in the libdir.
##
if(MSVC)
file(GLOB COPY_FILES ${CMAKE_PREFIX_PATH}/lib/*.lib)
foreach(CPP ${CMAKE_PREFIX_PATH})
file(GLOB COPY_FILES ${CPP}/lib/*.lib)
endforeach()
install(FILES ${COPY_FILES}
DESTINATION ${CMAKE_INSTALL_LIBDIR}
COMPONENT dependencies)
file(GLOB COPY_FILES ${CMAKE_PREFIX_PATH}/bin/*.dll)
foreach(CPP ${CMAKE_PREFIX_PATH})
file(GLOB COPY_FILES ${CPP}/bin/*.dll)
endforeach()
string(REGEX REPLACE "msv[.*].dll" "" COPY_FILES "${COPY_FILES}")
install(FILES ${COPY_FILES}
DESTINATION ${CMAKE_INSTALL_BINDIR}
@ -1569,23 +1615,12 @@ if(NOT IS_DIRECTORY ${netCDF_BINARY_DIR}/tmp)
file(MAKE_DIRECTORY ${netCDF_BINARY_DIR}/tmp)
endif()
configure_file("${netCDF_SOURCE_DIR}/nc-config.cmake.in"
"${netCDF_BINARY_DIR}/tmp/nc-config" @ONLY
NEWLINE_STYLE LF)
file(COPY "${netCDF_BINARY_DIR}/tmp/nc-config"
DESTINATION ${netCDF_BINARY_DIR}/
FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
install(FILES ${netCDF_BINARY_DIR}/netcdf.pc
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig
COMPONENT utilities)
install(PROGRAMS ${netCDF_BINARY_DIR}/nc-config
DESTINATION ${CMAKE_INSTALL_BINDIR}
COMPONENT utilities)
###
# End pkgconfig, nc-config file creation.
# End pkgconfig file creation.
###
##
@ -1607,8 +1642,6 @@ ENABLE_MAKEDIST(README.md COPYRIGHT RELEASE_NOTES.md INSTALL INSTALL.cmake test_
set(host_cpu "${cpu}")
set(host_vendor "${osname}")
set(host_os "${osrel}")
set(abs_top_builddir "${CMAKE_CURRENT_BINARY_DIR}")
set(abs_top_srcdir "${CMAKE_CURRENT_SOURCE_DIR}")
string(RANDOM LENGTH 3 ALPHABET "0123456789" PLATFORMUID)
math(EXPR PLATFORMUID "${PLATFORMUID} + 1" OUTPUT_FORMAT DECIMAL)
@ -1623,8 +1656,9 @@ is_disabled(BUILD_SHARED_LIBS enable_static)
is_enabled(BUILD_SHARED_LIBS enable_shared)
is_enabled(NETCDF_ENABLE_V2_API HAS_NC2)
is_enabled(NETCDF_ENABLE_NETCDF_4 HAS_NC4)
is_enabled(NETCDF_ENABLE_NETCDF4 HAS_NC4)
is_enabled(NETCDF_ENABLE_HDF4 HAS_HDF4)
is_enabled(USE_HDF4 HAS_HDF4)
is_enabled(USE_HDF5 HAS_HDF5)
is_enabled(OFF HAS_BENCHMARKS)
is_enabled(STATUS_PNETCDF HAS_PNETCDF)
@ -1636,8 +1670,7 @@ is_enabled(NETCDF_ENABLE_DAP4 HAS_DAP4)
is_enabled(NETCDF_ENABLE_BYTERANGE HAS_BYTERANGE)
is_enabled(NETCDF_ENABLE_DISKLESS HAS_DISKLESS)
is_enabled(USE_MMAP HAS_MMAP)
is_enabled(JNA HAS_JNA)
is_enabled(NETCDF_ENABLE_ZERO_LENGTH_COORD_BOUND RELAX_COORD_BOUND)
is_enabled(ENABLE_ZERO_LENGTH_COORD_BOUND RELAX_COORD_BOUND)
is_enabled(USE_CDF5 HAS_CDF5)
is_enabled(NETCDF_ENABLE_ERANGE_FILL HAS_ERANGE_FILL)
is_enabled(HDF5_HAS_PAR_FILTERS HAS_PAR_FILTERS)
@ -1647,7 +1680,6 @@ is_enabled(NETCDF_ENABLE_S3_INTERNAL HAS_S3_INTERNAL)
is_enabled(HAS_HDF5_ROS3 HAS_HDF5_ROS3)
is_enabled(NETCDF_ENABLE_NCZARR HAS_NCZARR)
is_enabled(NETCDF_ENABLE_NCZARR_ZIP HAS_NCZARR_ZIP)
is_enabled(NETCDF_ENABLE_NCZARR_ZIP DO_NCZARR_ZIP_TESTS)
is_enabled(NETCDF_ENABLE_QUANTIZE HAS_QUANTIZE)
is_enabled(NETCDF_ENABLE_LOGGING HAS_LOGGING)
is_enabled(NETCDF_ENABLE_FILTER_TESTING DO_FILTER_TESTS)
@ -1696,6 +1728,8 @@ endif()
# Copy the CTest customization file into binary directory, as required.
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/CTestCustom.cmake")
message(STATUS "STD_FILTERS: ${STD_FILTERS}")
# Generate file from template.
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/libnetcdf.settings.in"
"${CMAKE_CURRENT_BINARY_DIR}/libnetcdf.settings"
@ -1727,6 +1761,20 @@ install(FILES "${netCDF_BINARY_DIR}/libnetcdf.settings"
# End libnetcdf.settings section.
#####
#####
# Create 'nc-config' file.
#####
configure_file("${netCDF_SOURCE_DIR}/nc-config.cmake.in"
"${netCDF_BINARY_DIR}/tmp/nc-config" @ONLY
NEWLINE_STYLE LF)
file(COPY "${netCDF_BINARY_DIR}/tmp/nc-config"
DESTINATION ${netCDF_BINARY_DIR}/
FILE_PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
install(PROGRAMS ${netCDF_BINARY_DIR}/nc-config
DESTINATION ${CMAKE_INSTALL_BINDIR}
COMPONENT utilities)
#####
# Create 'netcdf_meta.h' include file.
#####
@ -1747,6 +1795,8 @@ configure_file(
set(EXTRA_DIST ${EXTRA_DIST} ${CMAKE_CURRENT_SOURCE_DIR}/test_common.in)
set(TOPSRCDIR "${CMAKE_CURRENT_SOURCE_DIR}")
set(TOPBUILDDIR "${CMAKE_CURRENT_BINARY_DIR}")
set(abs_top_builddir "${CMAKE_CURRENT_BINARY_DIR}")
set(abs_top_srcdir "${CMAKE_CURRENT_SOURCE_DIR}")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_common.in ${CMAKE_CURRENT_BINARY_DIR}/test_common.sh @ONLY NEWLINE_STYLE LF)
####
@ -1761,10 +1811,9 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/s3gc.in ${CMAKE_CURRENT_BINARY_DIR}/s
#####
# Build and copy nc_test4/findplugin.sh to various places
#####
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nc_test4/findplugin.in ${CMAKE_CURRENT_BINARY_DIR}/nc_test4/findplugin.sh @ONLY NEWLINE_STYLE LF)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nc_test4/findplugin.in ${CMAKE_CURRENT_BINARY_DIR}/nczarr_test/findplugin.sh @ONLY NEWLINE_STYLE LF)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nc_test4/findplugin.in ${CMAKE_CURRENT_BINARY_DIR}/plugins/findplugin.sh @ONLY NEWLINE_STYLE LF)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nc_test4/findplugin.in ${CMAKE_CURRENT_BINARY_DIR}/examples/C/findplugin.sh @ONLY NEWLINE_STYLE LF)
foreach(CC nc_test4 nczarr_test v3_nczarr_test plugins examples/C)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/plugins/findplugin.in ${CMAKE_CURRENT_BINARY_DIR}/${CC}/findplugin.sh @ONLY NEWLINE_STYLE LF)
endforeach()
if(NETCDF_ENABLE_BENCHMARKS)
if(NETCDF_ENABLE_PARALLEL4)

View File

@ -110,7 +110,7 @@ endif
# Build Cloud Storage if desired.
if NETCDF_ENABLE_NCZARR
ZARR_TEST_DIR = nczarr_test
ZARR_TEST_DIRS = nczarr_test
ZARR = libnczarr
endif
@ -129,7 +129,7 @@ if BUILD_TESTSETS
TESTDIRS = $(H5_TEST_DIR)
TESTDIRS += $(UNIT_TEST) $(V2_TEST) nc_test $(NC_TEST4)
TESTDIRS += $(BENCHMARKS_DIR) $(HDF4_TEST_DIR) $(NCDAP2TESTDIR) $(NCDAP4TESTDIR)
TESTDIRS += ${ZARR_TEST_DIR}
TESTDIRS += ${ZARR_TEST_DIRS}
endif
# This is the list of subdirs for which Makefiles will be constructed

View File

@ -7,13 +7,17 @@ This file contains a high-level description of this package's evolution. Release
## 4.9.3 - TBD
* Convert NCZarr V2 to store all netcdf-4 specific info as attributes. This improves interoperability with other Zarr implementations by no longer using non-standard keys. The price to be paid is that lazy attribute reading cannot be supported. See [Github #2836](https://github.com/Unidata/netcdf-c/issues/2936) for more information.
* Cleanup the option code for NETCDF_ENABLE_SET_LOG_LEVEL\[_FUNC\] See [Github #2931](https://github.com/Unidata/netcdf-c/issues/2931) for more information.
* Fix duplicate definition when using aws-sdk-cpp. See [Github #2928](https://github.com/Unidata/netcdf-c/issues/2928) for more information.
* Cleanup various obsolete options and do some code refactoring. See [Github #2926](https://github.com/Unidata/netcdf-c/issues/2926) for more information.
* Convert the Zarr-related ENABLE_XXX options to NETCDF_ENABLE_XXX options (part of the cmake overhaul). See [Github #2923](https://github.com/Unidata/netcdf-c/issues/2923) for more information.
* Refactor macro `_FillValue` to `NC_FillValue` to avoid conflict with libc++ headers. See [Github #2858](https://github.com/Unidata/netcdf-c/issues/2858) for more information.
* Changed `cmake` build options to be prefaced with `NETCDF`, to bring things in to line with best practices. This will permit a number of overall quality of life improvements to netCDF, in terms of allowing it to be more easily integrated with upstream projects via `FetchContent()`, `subdirectory()`, etc. Currently, the naming convention in use thus far will still work, but will result in warning messages about deprecation, and instructions on how to update your workflow. See [Github #2895](https://github.com/Unidata/netcdf-c/pull/2895) for more information.
* Fix some problems in handling S3 urls with missing regions. See [Github #2819](https://github.com/Unidata/netcdf-c/pull/2819).
* Incorporate a more modern look and feel to user documentation generated by Doxygen. See [Doxygen Awesome CSS](https://github.com/jothepro/doxygen-awesome-css) and [Github #2864](https://github.com/Unidata/netcdf-c/pull/2864) for more information.
* Added infrastructure to allow for `CMAKE_UNITY_BUILD`, (thanks \@jschueller). See [Github #2839](https://github.com/Unidata/netcdf-c/pull/2839) for more information.
* [cmake] Move dependency management out of the root-level `CMakeLists.txt` into two different files in the `cmake/` folder, `dependencies.cmake` and `netcdf_functions_macros.cmake`. See [Github #2838](https://github.com/Unidata/netcdf-c/pull/2838/) for more information.
* Fix some problems in handling S3 urls with missing regions. See [Github #2819](https://github.com/Unidata/netcdf-c/pull/2819).
* Obviate a number of irrelevant warnings. See [Github #2781](https://github.com/Unidata/netcdf-c/pull/2781).
* Improve the speed and data quantity for DAP4 queries. See [Github #2765](https://github.com/Unidata/netcdf-c/pull/2765).
* Remove the use of execinfo to programmatically dump the stack; it never worked. See [Github #2789](https://github.com/Unidata/netcdf-c/pull/2789).

View File

@ -14,8 +14,8 @@ find_package(MakeDist)
################################
# HDF4
################################
if(NETCDF_ENABLE_HDF4)
set(USE_HDF4 ON )
if(USE_HDF4)
set(NETCDF_USE_HDF4 ON )
# Check for include files, libraries.
find_path(MFHDF_H_INCLUDE_DIR mfhdf.h)
@ -65,11 +65,17 @@ if(NETCDF_ENABLE_HDF4)
if(NOT JPEG_LIB)
message(FATAL_ERROR "HDF4 Support enabled but cannot find libjpeg")
endif()
set(HDF4_LIBRARIES ${JPEG_LIB} ${HDF4_LIBRARIES} )
set(HDF4_LIBRARIES ${JPEG_LIB} ${HDF4_LIBRARIES} CACHE STRING "")
message(STATUS "Found JPEG libraries: ${JPEG_LIB}")
target_link_libraries(netcdf
PRIVATE
${HDF4_LIBRARIES}
)
# Option to enable HDF4 file tests.
option(NETCDF_ENABLE_HDF4_FILE_TESTS "Run HDF4 file tests. This fetches sample HDF4 files from the Unidata resources site to test with (requires curl)." ON)
#option(NETCDF_ENABLE_HDF4_FILE_TESTS "Run HDF4 file tests. This fetches sample HDF4 files from the Unidata resources site to test with (requires curl)." ON)
if(NETCDF_ENABLE_HDF4_FILE_TESTS)
find_program(PROG_CURL NAMES curl)
if(PROG_CURL)
@ -77,10 +83,11 @@ if(NETCDF_ENABLE_HDF4)
else()
message(STATUS "Unable to locate 'curl'. Disabling hdf4 file tests.")
set(USE_HDF4_FILE_TESTS OFF )
set(NETCDF_ENABLE_HDF4_FILE_TESTS OFF)
endif()
set(USE_HDF4_FILE_TESTS ${USE_HDF4_FILE_TESTS} )
endif()
endif()
endif(USE_HDF4)
################################
# HDF5
@ -127,8 +134,13 @@ if(USE_HDF5)
#####
# First, find the C and HL libraries.
#####
find_package(HDF5 ${HDF5_VERSION_REQUIRED} COMPONENTS C HL REQUIRED)
find_package(HDF5 COMPONENTS C HL REQUIRED)
message(STATUS "Found HDF5 version: ${HDF5_VERSION}")
if(${HDF5_VERSION} VERSION_LESS ${HDF5_VERSION_REQUIRED})
message(FATAL_ERROR "NetCDF requires HDF5 version ${HDF5_VERSION_REQUIRED} or later; found version ${HDF5_VERSION}.")
endif()
message(STATUS "Using HDF5 include dir: ${HDF5_INCLUDE_DIRS}")
target_link_libraries(netcdf
PRIVATE
@ -357,6 +369,7 @@ endif()
################################
# Zips
################################
MESSAGE(STATUS "Checking for filter libraries")
IF (NETCDF_ENABLE_FILTER_SZIP)
find_package(Szip)
elseif(NETCDF_ENABLE_NCZARR)
@ -373,31 +386,40 @@ IF (NETCDF_ENABLE_FILTER_ZSTD)
endif()
# Accumulate standard filters
set(STD_FILTERS "deflate") # Always have deflate*/
#set(STD_FILTERS "bz2")
set(FOUND_STD_FILTERS "")
if(ENABLE_ZLIB)
set(STD_FILTERS "deflate")
endif()
set_std_filter(Szip)
set(HAVE_SZ ${Szip_FOUND})
set(USE_SZIP ${HAVE_SZ})
set_std_filter(Blosc)
if(Zstd_FOUND)
set_std_filter(Zstd)
set(HAVE_ZSTD ON)
endif()
if(Bz2_FOUND)
set_std_filter(Bz2)
else()
# The reason we use a local version is to support a more comples test case
message("libbz2 not found using built-in version")
message("libbz2 not found using built-in version")
set(HAVE_LOCAL_BZ2 ON)
set(HAVE_BZ2 ON CACHE BOOL "")
set(STD_FILTERS "${STD_FILTERS} bz2")
endif()
set(STD_FILTERS "${STD_FILTERS}${FOUND_STD_FILTERS}")
IF (NETCDF_ENABLE_NCZARR_ZIP)
find_package(Zip REQUIRED)
target_include_directories(netcdf
PRIVATE
find_package(Zip)
if(Zip_FOUND)
target_include_directories(netcdf
PRIVATE
${Zip_INCLUDE_DIRS}
)
)
else()
message(STATUS "libzip development package not found, disabling NETCDF_ENABLE_NCZARR_ZIP")
set(NETCDF_ENABLE_NCZARR_ZIP OFF CACHE BOOL "Enable NCZARR_ZIP functionality." FORCE)
endif()
endif ()
################################

View File

@ -25,13 +25,13 @@ message(STATUS "Checking for Deprecated Options")
list(APPEND opts BUILD_UTILITIES ENABLE_BENCHMARKS ENABLE_BYTERANGE ENABLE_CDF5 ENABLE_CONVERSION_WARNINGS)
list(APPEND opts ENABLE_DAP ENABLE_DAP2 ENABLE_DAP4 ENABLE_DISKLESS ENABLE_DOXYGEN ENABLE_ERANGE_FILL)
list(APPEND opts ENABLE_EXAMPLES ENABLE_EXAMPLES_TESTS ENABLE_EXTREME_NUMBERS ENABLE_FFIO ENABLE_FILTER_BLOSC)
list(APPEND opts ENABLEFILTER_BZ2 ENABLE_FILTER_SZIP ENABLE_FILTER_TESTING ENABLE_FILTER_ZSTD ENABLE_FSYNC)
list(APPEND opts ENABLE_FILTER_BZ2 ENABLE_FILTER_SZIP ENABLE_FILTER_TESTING ENABLE_FILTER_ZSTD ENABLE_FSYNC)
list(APPEND opts ENABLE_HDF4 ENABLE_HDF5 ENABLE_LARGE_FILE_SUPPORT ENABLE_LARGE_FILE_TESTS ENABLE_LIBXML2)
list(APPEND opts ENABLE_LOGGING ENABLE_METADATA_PERF_TESTS ENABLE_MMAP ENABLE_NCZARR ENABLE_NCZARR_FILTERS)
list(APPEND opts ENABLE_NCZARR_S3 ENABLE_NCZARR_ZIP ENABLE_NETCDF_4 ENABLE_PARALLEL4 ENABLE_PARALLEL_TESTS)
list(APPEND opts ENABLE_PLUGINS ENABLE_PNETCDF ENABLE_QUANTIZE ENABLE_REMOTE_FUNCTIONALITY ENABLE_S3 ENABLE_S3_AWS)
list(APPEND opts ENABLE_S3_INTERNAL ENABLE_STDIO ENABLE_STRICT_NULL_BYTE_HEADER_PADDING ENABLE_TESTS ENABLE_UNIT_TESTS)
list(APPEND opts FIND_SHARED_LIBS LIB_NAME)
list(APPEND opts FIND_SHARED_LIBS LIB_NAME ENABLE_HDF4_FILE_TESTS)
foreach(opt ${opts})
#MESSAGE(STATUS "Option: ${opt}")

View File

@ -2,26 +2,25 @@
# Macros
################################
macro(set_std_filter filter)
function(set_std_filter filter)
# Upper case the filter name
string(TOUPPER "${filter}" upfilter)
string(TOLOWER "${filter}" downfilter)
if(ENABLE_FILTER_${upfilter})
if(NETCDF_ENABLE_FILTER_${upfilter})
# Define a test flag for filter
if(${filter}_FOUND)
include_directories(${${filter}_INCLUDE_DIRS})
set(ENABLE_${upfilter} TRUE)
set(HAVE_${upfilter} ON)
set(STD_FILTERS "${STD_FILTERS} ${downfilter}")
message(">>> Standard Filter: ${downfilter}")
set(NETCDF_ENABLE_${upfilter} TRUE CACHE BOOL "Enable ${upfilter}")
set(HAVE_${upfilter} ON CACHE BOOL "Have ${upfilter}")
set(FOUND_STD_FILTERS "${FOUND_STD_FILTERS} ${downfilter}" PARENT_SCOPE)
else()
set(ENABLE_${upfilter} FALSE)
set(HAVE_${upfilter} OFF)
set(NETCDF_ENABLE_${upfilter} FALSE CACHE BOOL "Enable ${upfilter}" FORCE)
set(HAVE_${upfilter} OFF CACHE BOOL "Have ${upfilter}" FORCE)
endif()
else()
set(HAVE_${upfilter} OFF)
set(HAVE_${upfilter} OFF CACHE BOOL "Have ${upfilter}" FORCE)
endif()
endmacro(set_std_filter)
endfunction(set_std_filter)
macro(getuname name flag)
execute_process(COMMAND "${UNAME}" "${flag}" OUTPUT_VARIABLE "${name}" OUTPUT_STRIP_TRAILING_WHITESPACE)
@ -148,6 +147,7 @@ macro(build_bin_test F)
add_executable(${F} "${CMAKE_CURRENT_BINARY_DIR}/${F}.c" ${ARGN})
endif()
target_link_libraries(${F} netcdf ${ALL_TLL_LIBS})
if(MSVC)
set_target_properties(${F}
PROPERTIES
@ -176,6 +176,8 @@ endmacro()
# Binary tests which are used by a script looking for a specific name.
macro(build_bin_test_no_prefix F)
build_bin_test(${F})
if(WIN32)
#SET_PROPERTY(TEST ${F} PROPERTY FOLDER "tests/")
set_target_properties(${F} PROPERTIES

View File

@ -130,8 +130,8 @@ are set when opening a binary file on Windows. */
/* if true, enable CDF5 Support */
#cmakedefine NETCDF_ENABLE_CDF5 1
/* if true, enable client side filters */
#cmakedefine ENABLE_CLIENT_FILTERS 1
/* if true, enable filter testing */
#cmakedefine NETCDF_ENABLE_FILTER_TESTING 1
/* if true, enable filter testing */
#cmakedefine NETCDF_ENABLE_FILTER_TESTING 1
@ -468,9 +468,6 @@ with zip */
/* if true, HDF5 is at least version 1.10.5 and supports UTF8 paths */
#cmakedefine HDF5_UTF8_PATHS 1
/* if true, include JNA bug fix */
#cmakedefine JNA 1
/* do large file tests */
#cmakedefine LARGE_FILE_TESTS 1
@ -478,7 +475,8 @@ with zip */
#cmakedefine LOGGING 1
/* If true, define nc_set_log_level. */
#cmakedefine ENABLE_SET_LOG_LEVEL 1
#cmakedefine NETCDF_ENABLE_LOGGING 1
#cmakedefine NETCDF_ENABLE_SET_LOG_LEVEL 1
/* min blocksize for posixio. */
#cmakedefine NCIO_MINBLOCKSIZE ${NCIO_MINBLOCKSIZE}
@ -652,6 +650,9 @@ with zip */
/* if true, enable nczarr blosc support */
#cmakedefine NETCDF_ENABLE_BLOSC 1
/* if true enable tests that access external servers */
#cmakedefine NETCDF_ENABLE_EXTERNAL_SERVER_TESTS 1
/* Version number of package */
#cmakedefine VERSION "${netCDF_VERSION}"

View File

@ -132,30 +132,48 @@ AC_DEFINE_UNQUOTED([WINVERBUILD], [$WINVERBUILD], [windows version build])
AC_MSG_NOTICE([checking supported formats])
# An explicit disable of netcdf-4 | netcdf4 is treated as if it was disable-hdf5
AC_MSG_CHECKING([whether we should build with netcdf4 (alias for HDF5)])
AC_ARG_ENABLE([netcdf4], [AS_HELP_STRING([--disable-netcdf4],
[(Deprecated) Synonym for --enable-hdf5)])])
test "x$enable_netcdf4" = xno || enable_netcdf4=yes
AC_MSG_RESULT([$enable_netcdf4 (Deprecated) Please use --disable-hdf5)])
AC_MSG_CHECKING([whether we should build with netcdf-4 (alias for HDF5)])
AC_ARG_ENABLE([netcdf-4], [AS_HELP_STRING([--disable-netcdf-4],
[(synonym for --disable-netcdf4)])])
test "x$enable_netcdf_4" = xno || enable_netcdf_4=yes
AC_MSG_RESULT([$enable_netcdf_4])
# Propagate the alias
if test "x$enable_netcdf_4" = xno ; then enable_netcdf4=no; fi
if test "x$enable_netcdf4" = xno ; then enable_netcdf_4=no; fi
# As a long term goal, and because it is now the case that --enable-nczarr
# => USE_NETCDF4, make the external options --enable-netcdf-4 and
# --enable-netcdf4 obsolete in favor of --enable-hdf5
# We will do the following for one more release cycle.
# 1. Make --enable-netcdf-4 be an alias for --enable-netcdf4.
# 2. Make --enable-netcdf4 an alias for --enable-hdf5.
# 3. Internally, convert most uses of USE_NETCDF_4 ad USE_NETCDF4 to USE_HDF5
# Does the user want to use HDF5?
# Collect the values of --enable-netcdf-4, --enable-netcdf4, and --enable-hdf5.
# Also determine which have been explicitly set on the command line.
AC_ARG_ENABLE([netcdf-4], [AS_HELP_STRING([--enable-netcdf-4],
[(Deprecated) Synonym for --enable-hdf5; default yes])])
AC_ARG_ENABLE([netcdf4], [AS_HELP_STRING([--enable-netcdf4],
[(Deprecated) Synonym for --enable-hdf5; default yes])])
AC_ARG_ENABLE([hdf5], [AS_HELP_STRING([--enable-hdf5],[default yes])])
# Complain about the use of --enable-netcdf-4/--enable-netcdf4
if test "x$enable_netcdf_4" != x ; then
AC_MSG_WARN([--enable-netcdf-4 is deprecated; please use --enable-hdf5])
fi
if test "x$enable_netcdf4" != x ; then
AC_MSG_WARN([--enable-netcdf4 is deprecated; please use --enable-hdf5])
fi
# --enable-netcdf-4 overrides --enable-netcdf4 if latter not defined
if test "x$enable_netcdf_4" != x && test "x$enable_netcdf4" == x ; then
enable_netcdf4="$enable_netcdf_4"
fi
# --enable-netcdf4 overrides --enable-hdf5 if latter not defined
if test "x$enable_netcdf4" != x && test "x$enable_hdf5" == x ; then
enable_hdf5="$enable_netcdf4"
fi
# Otherwise, use --enable-hdf5
AC_MSG_CHECKING([whether we should build with HDF5])
AC_ARG_ENABLE([hdf5], [AS_HELP_STRING([--disable-hdf5],
[do not build with HDF5])])
test "x$enable_hdf5" = xno || enable_hdf5=yes
if test "x$enable_netcdf4" = xno ; then enable_hdf5=no ; fi
# disable-netcdf4 is synonym for disable-hdf5
AC_MSG_RESULT([$enable_hdf5])
# Turn off enable_netcdf4 because it will be used
# as a shorthand for enable_hdf5|enable_hdf4|enable_nczarr
enable_netcdf4=no
# Check whether we want to enable CDF5 support.
AC_MSG_CHECKING([whether CDF5 support should be disabled])
AC_ARG_ENABLE([cdf5],
@ -217,12 +235,8 @@ AC_MSG_RESULT($enable_nczarr)
# HDF5 | HDF4 | NCZarr => netcdf-4
if test "x$enable_hdf5" = xyes || test "x$enable_hdf4" = xyes || test "x$enable_nczarr" = xyes ; then
enable_netcdf_4=yes
enable_netcdf4=yes
fi
AC_MSG_CHECKING([whether netcdf-4 should be forcibly enabled])
AC_MSG_RESULT([$enable_netcdf_4])
# Synonym
enable_netcdf4=${enable_netcdf_4}
AC_MSG_NOTICE([checking user options])
@ -280,8 +294,8 @@ AC_SUBST([DOXYGEN_SERVER_BASED_SEARCH], ["NO"])
AC_ARG_ENABLE([doxygen-pdf-output],
[AS_HELP_STRING([--enable-doxygen-pdf-output],
[Build netCDF library documentation in PDF format. Experimental.])])
AM_CONDITIONAL([NC_NETCDF_ENABLE_DOXYGEN_PDF_OUTPUT], [test "x$enable_doxygen_pdf_output" = xyes])
AC_SUBST([NC_NETCDF_ENABLE_DOXYGEN_PDF_OUTPUT], [$enable_doxygen_pdf_output])
AM_CONDITIONAL([NC_NETCDF_ENABLE_DOXYGEN_PDF_OUTPUT], [test "x$enable_doxygen_pdf_output" = xyes])
AC_ARG_ENABLE([dot],
[AS_HELP_STRING([--enable-dot],
@ -316,24 +330,6 @@ if test "x$enable_fsync" = xyes ; then
AC_DEFINE([USE_FSYNC], [1], [if true, include experimental fsync code])
fi
# Temporary until JNA bug is fixed (which is probably never).
# The problem being solved is this:
# > On Windows using the microsoft runtime, it is an error
# > for one library to free memory allocated by a different library.
# This is probably only an issue when using the netcdf-c library
# via JNA under Java.
AC_MSG_CHECKING([if jna bug workaround is enabled])
AC_ARG_ENABLE([jna],
[AS_HELP_STRING([--enable-jna],
[enable jna bug workaround])],
[],
[enable_jna=no])
test "x$enable_jna" = xno || enable_jna=yes
AC_MSG_RESULT($enable_jna)
if test "x$enable_jna" = xyes ; then
AC_DEFINE([JNA], [1], [if true, include jna bug workaround code])
fi
# Does the user want to turn off unit tests (useful for test coverage
# analysis).
AC_MSG_CHECKING([if unit tests should be enabled])
@ -454,17 +450,14 @@ AC_ARG_ENABLE([logging],
Ignored if netCDF-4 is not enabled.])])
test "x$enable_logging" = xyes || enable_logging=no
AC_MSG_RESULT([$enable_logging])
# Does the user want to turn off nc_set_log_level() function? (It will
# always be defined if --enable-logging is used.)
AC_MSG_CHECKING([whether nc_set_log_level() function is included (will do nothing unless enable-logging is also used)])
AC_ARG_ENABLE([set_log_level_func], [AS_HELP_STRING([--disable-set-log-level-func],
[disable the nc_set_log_level function])])
test "x$enable_set_log_level_func" = xno -a "x$enable_logging" = xno || enable_set_log_level_func=yes
if test "x$enable_set_log_level_func" = xyes -a "x$enable_netcdf_4" = xyes; then
AC_DEFINE([ENABLE_SET_LOG_LEVEL], 1, [If true, define nc_set_log_level.])
if test "x$enable_logging" = xyes; then
enable_set_log_level_func=yes
enable_set_log_level=yes
AC_DEFINE([NETCDF_ENABLE_SET_LOG_LEVEL], 1, [If true, enable nc_set_log_level function.])
else
enable_set_log_level_func=no
enable_set_log_level=no
fi
AC_MSG_RESULT($enable_set_log_level_func)
# CURLOPT_USERNAME is not defined until curl version 7.19.1
@ -633,6 +626,12 @@ if test "x$enable_dap" = "xno" ; then
fi
AC_MSG_RESULT($enable_dap_remote_tests)
# Provide a global control for remotetest.
if test "xREMOTETESTDOWN" = xyes ; then
AC_MSG_WARN("ENV(REMOTETESTDOWN) => netcdf_enable_dap_remote_tests == no")
enable_dap_remote_tests=no
fi
AC_MSG_CHECKING([whether use of external (non-unidata) servers should be enabled])
AC_ARG_ENABLE([external-server-tests],
[AS_HELP_STRING([--enable-external-server-tests (default off)],
@ -703,14 +702,6 @@ if test "x$enable_dap_remote_tests" = "xno" || test "x$enable_external_server_te
enable_dap_long_tests=no
fi
# Control zarr storage
if test "x$enable_nczarr" = xyes ; then
if test "x$enable_netcdf_4" = xno ; then
AC_MSG_WARN([netCDF-4 disabled, so you must not enable nczarr])
enable_nczarr=no
fi
fi
if test "x$enable_nczarr" = xyes; then
AC_DEFINE([NETCDF_ENABLE_NCZARR], [1], [if true, build NCZarr Client])
AC_SUBST(NETCDF_ENABLE_NCZARR)
@ -1325,7 +1316,7 @@ AC_CHECK_TYPES([struct timespec])
# disable dap4 if hdf5 is disabled
if test "x$enable_hdf5" = "xno" ; then
AC_MSG_WARN([netcdf-4 not enabled; disabling DAP4])
AC_MSG_WARN([hdf5 not enabled; disabling DAP4])
enable_dap4=no
fi
@ -1585,7 +1576,7 @@ fi
AC_CHECK_LIB([m], [floor], [],
[AC_MSG_ERROR([Can't find or link to the math library.])])
if test "x$enable_netcdf_4" = xyes; then
if test "x$enable_netcdf4" = xyes; then
AC_DEFINE([USE_NETCDF4], [1], [if true, build netCDF-4])
fi
@ -1841,7 +1832,7 @@ fi
AC_SUBST(M4FLAGS)
# No logging for netcdf-3.
if test "x$enable_netcdf_4" = xno; then
if test "x$enable_netcdf4" = xno; then
enable_logging=no
fi
if test "x$enable_logging" = xyes; then
@ -1875,25 +1866,15 @@ AC_MSG_WARN([--disable-plugins => --disable-filter-testing])
enable_filter_testing=no
fi
if test "x$enable_filter_testing" = xno; then
AC_MSG_WARN([--disable-filter-testing => --disable-nczarr-filter-testing])
enable_nczarr_filter_testing=no
fi
if test "x$enable_nczarr" = xno; then
AC_MSG_WARN([--disable-nczarr => --disable-nczarr-filters])
enable_nczarr_filters=no
enable_nczarr_filter_testing=no
fi
if test "x$enable_nczarr_filters" = xyes; then
AC_DEFINE([NETCDF_ENABLE_NCZARR_FILTERS], [1], [if true, enable NCZarr filters])
fi
# Client side filter registration is permanently disabled
enable_clientside_filters=no
AM_CONDITIONAL(ENABLE_CLIENTSIDE_FILTERS, [test x$enable_clientside_filters = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_FILTER_TESTING, [test x$enable_filter_testing = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_NCZARR_FILTERS, [test x$enable_nczarr_filters = xyes])
@ -1915,20 +1896,21 @@ AM_CONDITIONAL(NETCDF_ENABLE_DAP_LONG_TESTS, [test "x$enable_dap_long_tests" = x
AM_CONDITIONAL(USE_PNETCDF_DIR, [test ! "x$PNETCDFDIR" = x])
AM_CONDITIONAL(USE_LOGGING, [test "x$enable_logging" = xyes])
AM_CONDITIONAL(CROSS_COMPILING, [test "x$cross_compiling" = xyes])
AM_CONDITIONAL(USE_NETCDF4, [test x$enable_netcdf_4 = xyes])
AM_CONDITIONAL(USE_NETCDF4, [test x$enable_netcdf4 = xyes])
AM_CONDITIONAL(USE_HDF5, [test x$enable_hdf5 = xyes])
AM_CONDITIONAL(USE_HDF4, [test x$enable_hdf4 = xyes])
AM_CONDITIONAL(USE_HDF4_FILE_TESTS, [test x$enable_hdf4_file_tests = xyes])
AM_CONDITIONAL(USE_RENAMEV3, [test x$enable_netcdf_4 = xyes -o x$enable_dap = xyes])
AM_CONDITIONAL(USE_RENAMEV3, [test x$enable_netcdf4 = xyes -o x$enable_dap = xyes])
AM_CONDITIONAL(USE_PNETCDF, [test x$enable_pnetcdf = xyes])
AM_CONDITIONAL(USE_DISPATCH, [test x$enable_dispatch = xyes])
AM_CONDITIONAL(BUILD_MMAP, [test x$enable_mmap = xyes])
AM_CONDITIONAL(BUILD_DOCS, [test x$enable_doxygen = xyes])
AM_CONDITIONAL(SHOW_DOXYGEN_TAG_LIST, [test x$enable_doxygen_tasks = xyes])
AM_CONDITIONAL(ENABLE_METADATA_PERF, [test x$enable_metadata_perf = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_METADATA_PERF, [test x$enable_metadata_perf = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_BYTERANGE, [test "x$enable_byterange" = xyes])
AM_CONDITIONAL(RELAX_COORD_BOUND, [test "xyes" = xyes])
AM_CONDITIONAL(HAS_PAR_FILTERS, [test x$hdf5_supports_par_filters = xyes ])
AM_CONDITIONAL(NETCDF_ENABLE_NCZARR_FILTERS, [test x$enable_nczarr_filters = xyes])
# We need to simplify the set of S3 and Zarr flag combinations
AM_CONDITIONAL(NETCDF_ENABLE_S3, [test "x$enable_s3" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_S3_AWS, [test "x$enable_s3_aws" = xyes])
@ -1946,7 +1928,7 @@ AM_CONDITIONAL(HAVE_ZSTD, [test "x$have_zstd" = xyes])
# If the machine doesn't have a long long, and we want netCDF-4, then
# we've got problems!
if test "x$enable_netcdf_4" = xyes; then
if test "x$enable_netcdf4" = xyes; then
AC_TYPE_LONG_LONG_INT
AC_TYPE_UNSIGNED_LONG_LONG_INT
dnl if test ! "x$ac_cv_type_long_long_int" = xyes -o ! "x$ac_cv_type_unsigned_long_long_int" = xyes; then
@ -1963,7 +1945,7 @@ fi
# meteor strike.
AC_MSG_CHECKING([what to call the output of the ftpbin target])
BINFILE_NAME=binary-netcdf-$PACKAGE_VERSION
test "x$enable_netcdf_4" = xno && BINFILE_NAME=${BINFILE_NAME}_nc3
test "x$enable_netcdf4" = xno && BINFILE_NAME=${BINFILE_NAME}_nc3
BINFILE_NAME=${BINFILE_NAME}.tar
AC_SUBST(BINFILE_NAME)
AC_MSG_RESULT([$BINFILE_NAME $FC $CXX])
@ -2024,7 +2006,6 @@ AC_SUBST(HAS_DAP,[$enable_dap])
AC_SUBST(HAS_DAP2,[$enable_dap])
AC_SUBST(HAS_DAP4,[$enable_dap4])
AC_SUBST(HAS_NC2,[$nc_build_v2])
AC_SUBST(HAS_NC4,[$enable_netcdf_4])
AC_SUBST(HAS_CDF5,[$enable_cdf5])
AC_SUBST(HAS_HDF4,[$enable_hdf4])
AC_SUBST(HAS_BENCHMARKS,[$enable_benchmarks])
@ -2035,7 +2016,6 @@ AC_SUBST(HAS_PARALLEL,[$enable_parallel])
AC_SUBST(HAS_PARALLEL4,[$enable_parallel4])
AC_SUBST(HAS_DISKLESS,[yes])
AC_SUBST(HAS_MMAP,[$enable_mmap])
AC_SUBST(HAS_JNA,[$enable_jna])
AC_SUBST(HAS_ERANGE_FILL,[$enable_erange_fill])
AC_SUBST(HAS_BYTERANGE,[$enable_byterange])
AC_SUBST(RELAX_COORD_BOUND,[yes])
@ -2047,7 +2027,6 @@ AC_SUBST(HAS_HDF5_ROS3,[$has_hdf5_ros3])
AC_SUBST(HAS_NCZARR,[$enable_nczarr])
AC_SUBST(NETCDF_ENABLE_S3_TESTING,[$with_s3_testing])
AC_SUBST(HAS_NCZARR_ZIP,[$enable_nczarr_zip])
AC_SUBST(DO_NCZARR_ZIP_TESTS,[$enable_nczarr_zip])
AC_SUBST(HAS_QUANTIZE,[$enable_quantize])
AC_SUBST(HAS_LOGGING,[$enable_logging])
AC_SUBST(DO_FILTER_TESTS,[$enable_filter_testing])
@ -2190,7 +2169,6 @@ AC_DEFUN([AX_SET_META],[
#####
AC_SUBST([NC_VERSION]) NC_VERSION=$VERSION
AX_SET_META([NC_HAS_NC2],[$nc_build_v2],[yes])
AX_SET_META([NC_HAS_NC4],[$enable_netcdf_4],[yes])
AX_SET_META([NC_HAS_HDF4],[$enable_hdf4],[yes])
AX_SET_META([NC_HAS_BENCHMARKS],[$enable_benchmarks],[yes])
AX_SET_META([NC_HAS_HDF5],[$enable_hdf5],[yes])
@ -2198,7 +2176,6 @@ AX_SET_META([NC_HAS_DAP2],[$enable_dap],[yes])
AX_SET_META([NC_HAS_DAP4],[$enable_dap4],[yes])
AX_SET_META([NC_HAS_DISKLESS],[yes],[yes])
AX_SET_META([NC_HAS_MMAP],[$enable_mmap],[yes])
AX_SET_META([NC_HAS_JNA],[$enable_jna],[yes])
AX_SET_META([NC_HAS_PNETCDF],[$enable_pnetcdf],[yes])
AX_SET_META([NC_HAS_PARALLEL],[$enable_parallel],[yes])
AX_SET_META([NC_HAS_PARALLEL4],[$enable_parallel4],[yes])
@ -2243,10 +2220,9 @@ AC_MSG_NOTICE([generating header files and makefiles])
AC_CONFIG_FILES(test_common.sh:test_common.in)
AC_CONFIG_FILES(s3cleanup.sh:s3cleanup.in, [chmod ugo+x s3cleanup.sh])
AC_CONFIG_FILES(s3gc.sh:s3gc.in, [chmod ugo+x s3gc.sh])
AC_CONFIG_FILES(nc_test4/findplugin.sh:nc_test4/findplugin.in, [chmod ugo+x nc_test4/findplugin.sh])
AC_CONFIG_FILES(nczarr_test/findplugin.sh:nc_test4/findplugin.in, [chmod ugo+x nczarr_test/findplugin.sh])
AC_CONFIG_FILES(plugins/findplugin.sh:nc_test4/findplugin.in, [chmod ugo+x plugins/findplugin.sh])
AC_CONFIG_FILES(examples/C/findplugin.sh:nc_test4/findplugin.in, [chmod ugo+x examples/C/findplugin.sh])
for FP in plugins nc_test4 nczarr_test examples/C ; do
AC_CONFIG_FILES(${FP}/findplugin.sh:plugins/findplugin.in, [chmod ugo+x ${FP}/findplugin.sh])
done
AC_CONFIG_FILES(ncdap_test/findtestserver.c:ncdap_test/findtestserver.c.in, [chmod ugo+x ncdap_test/findtestserver.c])
AC_CONFIG_FILES([nc_test/run_pnetcdf_tests.sh:nc_test/run_pnetcdf_tests.sh.in],[chmod ugo+x nc_test/run_pnetcdf_tests.sh])
AC_CONFIG_FILES(dap4_test/findtestserver4.c:ncdap_test/findtestserver.c.in)

View File

@ -93,8 +93,8 @@ clean-local: clean-local-check
.PHONY: clean-local-check
clean-local-check:
-rm -rf results results_*
-rm -f .dodsrc .daprc .ncrc .netrc
rm -rf results results_*
rm -f .dodsrc .daprc .ncrc .netrc
# The shell file maketests.sh is used to build the testdata
# for dap4 testing. It creates and fills the directories

View File

@ -8,7 +8,7 @@ types:
v1_f1_t f1(2) ;
}; // v1_t
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
v1_t v1 ;
data:

View File

@ -8,7 +8,7 @@ types:
v1_f1_t f1(2) ;
}; // v1_t
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
v1_t v1 ;
}

View File

@ -5,12 +5,12 @@
xmlns="http://xml.opendap.org/ns/DAP/4.0#"
xmlns:dap="http://xml.opendap.org/ns/DAP/4.0#">
<Dimensions>
<Dimension name="_AnonymousDim2" size="2"/>
<Dimension name="_Anonymous_Dim_2" size="2"/>
</Dimensions>
<Types>
<Structure name="v1">
<Seq name="f1" type="/v1_f1_t">
<Dim name="/_AnonymousDim2"/>
<Dim name="/_Anonymous_Dim_2"/>
</Sequence>
</Structure>
<Structure name="v1_f1_base">

View File

@ -1,9 +1,9 @@
netcdf amsre_20060131v5 {
dimensions:
_AnonymousDim3 = 3 ;
_AnonymousDim6 = 6 ;
_Anonymous_Dim_3 = 3 ;
_Anonymous_Dim_6 = 6 ;
variables:
byte time_a(_AnonymousDim3, _AnonymousDim6) ;
byte time_a(_Anonymous_Dim_3, _Anonymous_Dim_6) ;
string time_a:Equator_Crossing_Time = "1:30 PM" ;
data:

View File

@ -8,7 +8,7 @@ types:
v1_f1_t f1(2) ;
}; // v1_t
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
v1_t v1 ;
data:

View File

@ -2,13 +2,13 @@ netcdf test_atomic_array {
types:
opaque(16) opaque16_t ;
dimensions:
_AnonymousDim1 = 1 ;
_AnonymousDim2 = 2 ;
_Anonymous_Dim_1 = 1 ;
_Anonymous_Dim_2 = 2 ;
variables:
ubyte vu8(_AnonymousDim1, _AnonymousDim2) ;
double vd(_AnonymousDim1) ;
string vs(_AnonymousDim1, _AnonymousDim1) ;
opaque16_t vo(_AnonymousDim1, _AnonymousDim1) ;
ubyte vu8(_Anonymous_Dim_1, _Anonymous_Dim_2) ;
double vd(_Anonymous_Dim_1) ;
string vs(_Anonymous_Dim_1, _Anonymous_Dim_1) ;
opaque16_t vo(_Anonymous_Dim_1, _Anonymous_Dim_1) ;
// global attributes:
string :_dap4.ce = "/vu8[1][0:2:2];/vd[1];/vs[1][0];/vo[0][1]" ;

View File

@ -1,8 +1,8 @@
netcdf test_atomic_array {
dimensions:
_AnonymousDim3 = 3 ;
_Anonymous_Dim_3 = 3 ;
variables:
short v16(_AnonymousDim3) ;
short v16(_Anonymous_Dim_3) ;
// global attributes:
string :_dap4.ce = "/v16[0:1,3]" ;

View File

@ -1,8 +1,8 @@
netcdf test_atomic_array {
dimensions:
_AnonymousDim3 = 3 ;
_Anonymous_Dim_3 = 3 ;
variables:
short v16(_AnonymousDim3) ;
short v16(_Anonymous_Dim_3) ;
// global attributes:
string :_dap4.ce = "/v16[3,0:1]" ;

View File

@ -5,9 +5,9 @@ types:
Altocumulus = 7, Cirrostratus = 8, Cirrocumulus = 9, Cirrus = 10,
Missing = 127} ;
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
cloud_class_t primary_cloud(_AnonymousDim2) ;
cloud_class_t primary_cloud(_Anonymous_Dim_2) ;
cloud_class_t primary_cloud:_FillValue = Missing ;
// global attributes:

View File

@ -1,8 +1,8 @@
netcdf test_one_vararray {
dimensions:
_AnonymousDim1 = 1 ;
_Anonymous_Dim_1 = 1 ;
variables:
int t(_AnonymousDim1) ;
int t(_Anonymous_Dim_1) ;
// global attributes:
string :_dap4.ce = "/t[1]" ;

View File

@ -1,8 +1,8 @@
netcdf test_one_vararray {
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
int t(_AnonymousDim2) ;
int t(_Anonymous_Dim_2) ;
// global attributes:
string :_dap4.ce = "/t[0:1]" ;

View File

@ -2,10 +2,10 @@ netcdf test_opaque_array {
types:
opaque(16) opaque16_t ;
dimensions:
_AnonymousDim1 = 1 ;
_AnonymousDim2 = 2 ;
_Anonymous_Dim_1 = 1 ;
_Anonymous_Dim_2 = 2 ;
variables:
opaque16_t vo2(_AnonymousDim1, _AnonymousDim2) ;
opaque16_t vo2(_Anonymous_Dim_1, _Anonymous_Dim_2) ;
// global attributes:
string :_dap4.ce = "/vo2[1][0:1]" ;

View File

@ -5,9 +5,9 @@ types:
int y ;
}; // s_t
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
s_t s(_AnonymousDim2, _AnonymousDim2) ;
s_t s(_Anonymous_Dim_2, _Anonymous_Dim_2) ;
// global attributes:
string :_dap4.ce = "/s[0:2:3][0:1]" ;

View File

@ -8,7 +8,7 @@ types:
v1_f1_t f1(2) ;
}; // v1_t
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
v1_t v1 ;
data:

View File

@ -1,9 +1,9 @@
netcdf \2004050300_eta_211 {
dimensions:
record = UNLIMITED ; // (1 currently)
_AnonymousDim1 = 1 ;
_AnonymousDim4 = 4 ;
_AnonymousDim15 = 15 ;
_Anonymous_Dim_1 = 1 ;
_Anonymous_Dim_4 = 4 ;
_Anonymous_Dim_15 = 15 ;
variables:
double reftime(record) ;
string reftime:units = "hours since 1992-1-1" ;
@ -11,7 +11,7 @@ variables:
double valtime(record) ;
string valtime:units = "hours since 1992-1-1" ;
string valtime:long_name = "valid time" ;
float Z_sfc(_AnonymousDim1, _AnonymousDim4, _AnonymousDim15) ;
float Z_sfc(_Anonymous_Dim_1, _Anonymous_Dim_4, _Anonymous_Dim_15) ;
string Z_sfc:navigation = "nav" ;
float Z_sfc:_FillValue = -9999.006f ;
string Z_sfc:units = "gp m" ;

View File

@ -368,30 +368,30 @@ The details for writing such a filter are defined in the HDF5 documentation[1,2]
The HDF5 loader searches for plugins in a number of directories.
This search is contingent on the presence or absence of the environment
variable named ***HDF5_PLUGIN_PATH***.
variable named ***HDF5\_PLUGIN\_PATH***.
As with all other "...PATH" variables, it is a sequence of absolute
directories separated by a separator character. For *nix* operating systems,
this separator is the colon (':') character. For Windows and Mingw, the
separator is the semi-colon (';') character. So for example:
* Linux: export HDF5_PLUGIN_PATH=/usr/lib:/usr/local/lib
* Windows: export HDF5_PLUGIN_PATH=c:\\ProgramData\\hdf5\\plugin;c:\\tools\\lib
* Linux: `export HDF5_PLUGIN_PATH=/usr/lib:/usr/local/lib`
* Windows: `export HDF5_PLUGIN_PATH=c:\\ProgramData\\hdf5\\plugin;c:\\tools\\lib`
If HDF5_PLUGIN_PATH is defined, then the loader will search each directory
If HDF5\_PLUGIN\_PATH is defined, then the loader will search each directory
in the path from left to right looking for shared libraries with specific
exported symbols representing the entry points into the library.
If HDF5_PLUGIN_PATH is not defined, the loader defaults to using
If HDF5\_PLUGIN\_PATH is not defined, the loader defaults to using
these default directories:
* Linux: /usr/local/hdf5/lib/plugin
* Windows: %ALLUSERSPROFILE%\\hdf5\\lib\\plugin
* Linux: `/usr/local/hdf5/lib/plugin`
* Windows: `%ALLUSERSPROFILE%\\hdf5\\lib\\plugin`
It should be noted that there is a difference between the search order
for HDF5 versus NCZarr. The HDF5 loader will search only the directories
specificed in HDF5_PLUGIN_PATH. In NCZarr, the loader
searches HDF5_PLUGIN_PATH and as a last resort,
specificed in HDF5\_PLUGIN\_PATH. In NCZarr, the loader
searches HDF5\_PLUGIN\_PATH and as a last resort,
it also searches the default directory.
### Plugin Library Naming {#filters_Pluginlib}
@ -637,17 +637,17 @@ to point to that directory or you may be able to copy the shared libraries out o
As of NetCDF version 4.8.2, the netcdf-c library supports
bit-grooming filters.
````
Bit-grooming is a lossy compression algorithm that removes the
bloat due to false-precision, those bits and bytes beyond the
meaningful precision of the data. Bit Grooming is statistically
unbiased, applies to all floating point numbers, and is easy to
use. Bit-Grooming reduces data storage requirements by
25-80%. Unlike its best-known competitor Linear Packing, Bit
Grooming imposes no software overhead on users, and guarantees
its precision throughout the whole floating point range
[https://doi.org/10.5194/gmd-9-3199-2016].
````
Bit-grooming is a lossy compression algorithm that removes the
bloat due to false-precision, those bits and bytes beyond the
meaningful precision of the data. Bit Grooming is statistically
unbiased, applies to all floating point numbers, and is easy to
use. Bit-Grooming reduces data storage requirements by
25-80%. Unlike its best-known competitor Linear Packing, Bit
Grooming imposes no software overhead on users, and guarantees
its precision throughout the whole floating point range
[https://doi.org/10.5194/gmd-9-3199-2016].
The generic term "quantize" is used to refer collectively to the various
precision-trimming algorithms. The key thing to note about quantization is that
it occurs at the point of writing of data only. Since its output is
@ -656,18 +656,20 @@ Because of this, quantization is not part of the standard filter
mechanism and has a separate API.
The API for bit-groom is currently as follows.
````
```
int nc_def_var_quantize(int ncid, int varid, int quantize_mode, int nsd);
int nc_inq_var_quantize(int ncid, int varid, int *quantize_modep, int *nsdp);
````
```
The *quantize_mode* argument specifies the particular algorithm.
Currently, three are supported: NC_QUANTIZE_BITGROOM, NC_QUANTIZE_GRANULARBR,
and NC_QUANTIZE_BITROUND. In addition quantization can be disabled using
the value NC_NOQUANTIZE.
Currently, three are supported: NC\_QUANTIZE\_BITGROOM, NC\_QUANTIZE\_GRANULARBR,
and NC\_QUANTIZE\_BITROUND. In addition quantization can be disabled using
the value NC\_NOQUANTIZE.
The input to ncgen or the output from ncdump supports special attributes
to indicate if quantization was applied to a given variable.
These attributes have the following form.
````
_QuantizeBitGroomNumberOfSignificantDigits = <NSD>
or
@ -830,15 +832,16 @@ If you do not want to use Automake or Cmake, the following has been known to wor
## References {#filters_References}
1. https://support.hdfgroup.org/HDF5/doc/Advanced/DynamicallyLoadedFilters/HDF5DynamicallyLoadedFilters.pdf
2. https://support.hdfgroup.org/HDF5/doc/TechNotes/TechNote-HDF5-CompressionTroubleshooting.pdf
3. https://portal.hdfgroup.org/display/support/Registered+Filter+Plugins
4. https://support.hdfgroup.org/services/contributions.html#filters
5. https://support.hdfgroup.org/HDF5/doc/RM/RM\_H5.html
6. https://confluence.hdfgroup.org/display/HDF5/Filters
7. https://numcodecs.readthedocs.io/en/stable/
8. https://github.com/ccr/ccr
9. https://escholarship.org/uc/item/7xd1739k
1. [https://support.hdfgroup.org/HDF5/doc/Advanced/DynamicallyLoadedFilters/HDF5DynamicallyLoadedFilters.pdf]()
2. [https://support.hdfgroup.org/HDF5/doc/TechNotes/TechNote-HDF5-CompressionTroubleshooting.pdf]()
3.[ https://portal.hdfgroup.org/display/support/Registered+Filter+Plugins]()
4. [https://support.hdfgroup.org/services/contributions.html#filters]()
5. [https://support.hdfgroup.org/HDF5/doc/RM/RM\_H5.html]()
6. [https://confluence.hdfgroup.org/display/HDF5/Filters
]()
7. [https://numcodecs.readthedocs.io/en/stable/]()
8. [https://github.com/ccr/ccr]()
9. [https://escholarship.org/uc/item/7xd1739k]()
## Appendix A. HDF5 Parameter Encode/Decode {#filters_appendixa}
@ -945,6 +948,7 @@ Examples of the use of these functions can be seen in the test program *nc\_test
Some of the above functions use a C struct defined in *netcdf\_filter.h\_.
The definition of that struct is as follows.
````
typedef struct NC_H5_Filterspec {
unsigned int filterid; /* ID for arbitrary filter. */
@ -1189,12 +1193,14 @@ WARNING: the installer still needs to make sure that the actual filter/compressi
The target location into which libraries in the "plugins" directory are installed is specified
using a special *./configure* option
````
--with-plugin-dir=<directorypath>
or
--with-plugin-dir
````
or its corresponding *cmake* option.
````
-DPLUGIN_INSTALL_DIR=<directorypath>
or
@ -1202,6 +1208,7 @@ or
````
This option defaults to the value "yes", which means that filters are
installed by default. This can be disabled by one of the following options.
````
--without-plugin-dir (automake)
or
@ -1212,13 +1219,15 @@ or
If the option is specified with no argument (automake) or with the value "YES" (CMake),
then it defaults (in order) to the following directories:
1. If the HDF5_PLUGIN_PATH environment variable is defined, then last directory in the list of directories in the path is used.
2. (a) "/usr/local/hdf5/lib/plugin” for linux/unix operating systems (including Cygwin)<br>
(b) “%ALLUSERSPROFILE%\\hdf5\\lib\\plugin” for Windows and MinGW
1. If the HDF5\_PLUGIN\_PATH environment variable is defined, then last directory in the list of directories in the path is used.
2. (a) `/usr/local/hdf5/lib/plugin` for linux/unix operating systems (including Cygwin)<br>
(b) `%ALLUSERSPROFILE%\\hdf5\\lib\\plugin` for Windows and MinGW
If NCZarr is enabled, then in addition to wrappers for the standard filters,
additional libraries will be installed to support NCZarr access to filters.
Currently, this list includes the following:
* shuffle &mdash; shuffle filter
* fletcher32 &mdash; fletcher32 checksum
* deflate &mdash; deflate compression
@ -1234,7 +1243,7 @@ provided by the *lib__nczh5filters.so* shared library. Note also that
if you disable HDF5 support, but leave NCZarr support enabled,
then all of the above filters should continue to work.
### HDF5_PLUGIN_PATH
### HDF5\_PLUGIN\_PATH
At the moment, NetCDF uses the existing HDF5 environment variable
*HDF5\_PLUGIN\_PATH* to locate the directories in which filter wrapper
@ -1247,17 +1256,17 @@ separated by a specific separator character. For Windows, the
separator character is a semicolon (';') and for Unix, it is a a
colon (':').
So, if HDF5_PLUGIN_PATH is defined at build time, and
So, if HDF5\_PLUGIN\_PATH is defined at build time, and
*--with-plugin-dir* is specified with no argument then the last
directory in the path will be the one into which filter wrappers are
installed. Otherwise the default directories are used.
The important thing to note is that at run-time, there are several cases to consider:
1. HDF5_PLUGIN_PATH is defined and is the same value as it was at build time -- no action needed
2. HDF5_PLUGIN_PATH is defined and is has a different value from build time -- the user is responsible for ensuring that the run-time path includes the same directory used at build time, otherwise this case will fail.
3. HDF5_PLUGIN_DIR is not defined at either run-time or build-time -- no action needed
4. HDF5_PLUGIN_DIR is not defined at run-time but was defined at build-time -- this will probably fail
1. HDF5\_PLUGIN\_PATH is defined and is the same value as it was at build time -- no action needed
2. HDF5\_PLUGIN\_PATH is defined and is has a different value from build time -- the user is responsible for ensuring that the run-time path includes the same directory used at build time, otherwise this case will fail.
3. HDF5\_PLUGIN\_PATH is not defined at either run-time or build-time -- no action needed
4. HDF5\_PLUGIN\_PATH is not defined at run-time but was defined at build-time -- this will probably fail
## Appendix I. A Warning on Backward Compatibility {#filters_appendixi}
@ -1273,7 +1282,7 @@ inconvenience.
A user may encounter an incompatibility if any of the following appears in user code.
* The function *\_nc\_inq\_var\_filter* was returning the error value NC\_ENOFILTER if a variable had no associated filters.
* The function *nc\_inq\_var\_filter* was returning the error value NC\_ENOFILTER if a variable had no associated filters.
It has been reverted to the previous case where it returns NC\_NOERR and the returned filter id was set to zero if the variable had no filters.
* The function *nc\_inq\_var\_filterids* was renamed to *nc\_inq\_var\_filter\_ids*.
* Some auxilliary functions for parsing textual filter specifications have been moved to the file *netcdf\_aux.h*. See [Appendix A](#filters_appendixa).

View File

@ -8,13 +8,15 @@ The NetCDF NCZarr Implementation
# NCZarr Introduction {#nczarr_introduction}
Beginning with netCDF version 4.8.0, the Unidata NetCDF group has extended the netcdf-c library to provide access to cloud storage (e.g. Amazon S3 <a href="#ref_aws">[1]</a> ).
Beginning with netCDF version 4.8.0, the Unidata NetCDF group has extended the netcdf-c library to support data stored using the Zarr data model and storage format [4,6]. As part of this support, netCDF adds support for accessing data stored using cloud storage (e.g. Amazon S3 <a href="#ref_aws">[1]</a> ).
The goal of this project is to provide maximum interoperability between the netCDF Enhanced (netcdf-4) data model and the Zarr version 2 <a href="#ref_zarrv2">[4]</a> data model. This is embodied in the netcdf-c library so that it is possible to use the netcdf API to read and write Zarr formatted datasets.
The goal of this project, then, is to provide maximum interoperability between the netCDF Enhanced (netcdf-4) data model and the Zarr version 2 <a href="#ref_zarr">[4]</a><!-- or Version 3 <a href="#ref_zarrv3">[13]</a>--> data model. This is embodied in the netcdf-c library so that it is possible to use the netcdf API to read and write Zarr formatted datasets.
In order to better support the netcdf-4 data model, the netcdf-c library implements a limited set of extensions to the Zarr data model.
In order to better support the netcdf-4 data model, the netcdf-c library implements a limited set of extensions to the *Zarr* data model.
This extended model is referred to as *NCZarr*.
An important goal is that those extensions not interfere with reading of those extended datasets by other Zarr specification conforming implementations. This means that one can write a dataset using the NCZarr extensions and expect that dataset to be readable by other Zarr implementations.
Additionally, another goal is to ensure interoperability between *NCZarr*
formatted files and standard (aka pure) *Zarr* formatted files.
This means that (1) an *NCZarr* file can be read by any other *Zarr* library (and especially the Zarr-python library), and (2) a standard *Zarr* file can be read by netCDF. Of course, there limitations in that other *Zarr* libraries will not use the extra, *NCZarr* meta-data, and netCDF will have to "fake" meta-data not provided by a pure *Zarr* file.
As a secondary -- but equally important -- goal, it must be possible to use
the NCZarr library to read and write datasets that are pure Zarr,
@ -29,14 +31,12 @@ Notes on terminology in this document.
# The NCZarr Data Model {#nczarr_data_model}
NCZarr uses a data model <a href="#ref_nczarr">[4]</a> that, by design, extends the Zarr Version 2 Specification <a href="#ref_zarrv2">[6]</a> to add support for the NetCDF-4 data model.
NCZarr uses a data model that, by design, extends the Zarr Version 2 Specification <!--or Version 3 Specification-->.
__Note Carefully__: a legal _NCZarr_ dataset is also a legal _Zarr_ dataset under a specific assumption. This assumption is that within Zarr meta-data objects, like ''.zarray'', unrecognized dictionary keys are ignored.
If this assumption is true of an implementation, then the _NCZarr_ dataset is a legal _Zarr_ dataset and should be readable by that _Zarr_ implementation.
The inverse is true also. A legal _Zarr_ dataset is also a legal _NCZarr_
dataset, where "legal" means it conforms to the Zarr version 2 specification.
__Note Carefully__: a legal _NCZarr_ dataset is expected to also be a legal _Zarr_ dataset.
The inverse is true also. A legal _Zarr_ dataset is expected to also be a legal _NCZarr_ dataset, where "legal" means it conforms to the Zarr specification(s).
In addition, certain non-Zarr features are allowed and used.
Specifically the XArray ''\_ARRAY\_DIMENSIONS'' attribute is one such.
Specifically the XArray [7] ''\_ARRAY\_DIMENSIONS'' attribute is one such.
There are two other, secondary assumption:
@ -45,9 +45,10 @@ There are two other, secondary assumption:
filters](./md_filters.html "filters") for details.
Briefly, the data model supported by NCZarr is netcdf-4 minus
the user-defined types. However, a restricted form of String type
is supported (see Appendix E).
As with netcdf-4 chunking is supported. Filters and compression
the user-defined types and full String type support.
However, a restricted form of String type
is supported (see Appendix D).
As with netcdf-4, chunking is supported. Filters and compression
are also [supported](./md_filters.html "filters").
Specifically, the model supports the following.
@ -74,8 +75,8 @@ When specified, they are treated as chunked where the file consists of only one
This means that testing for contiguous or compact is not possible; the _nc_inq_var_chunking_ function will always return NC_CHUNKED and the chunksizes will be the same as the dimension sizes of the variable's dimensions.
Additionally, it should be noted that NCZarr supports scalar variables,
but Zarr does not; Zarr only supports dimensioned variables.
In order to support interoperability, NCZarr does the following.
but Zarr Version 2 does not; Zarr V2 only supports dimensioned variables.
In order to support interoperability, NCZarr V2 does the following.
1. A scalar variable is recorded in the Zarr metadata as if it has a shape of **[1]**.
2. A note is stored in the NCZarr metadata that this is actually a netCDF scalar variable.
@ -108,55 +109,62 @@ using URLs.
There are, however, some details that are important.
- Protocol: this should be _https_ or _s3_,or _file_.
The _s3_ scheme is equivalent to "https" plus setting "mode=nczarr,s3" (see below). Specifying "file" is mostly used for testing, but is used to support directory tree or zipfile format storage.
The _s3_ scheme is equivalent to "https" plus setting "mode=s3".
Specifying "file" is mostly used for testing, but also for directory tree or zipfile format storage.
## Client Parameters
The fragment part of a URL is used to specify information that is interpreted to specify what data format is to be used, as well as additional controls for that data format.
For NCZarr support, the following _key=value_ pairs are allowed.
- mode=nczarr|zarr|noxarray|file|zip|s3
For reading, _key=value_ pairs are provided for specifying the storage format.
- mode=nczarr|zarr
Typically one will specify two mode flags: one to indicate what format
to use and one to specify the way the dataset is to be stored.
For example, a common one is "mode=zarr,file"
Additional pairs are provided to specify the Zarr version.
- mode=v2<!--|v3-->
Additional pairs are provided to specify the storage medium: Amazon S3 vs File tree vs Zip file.
- mode=file|zip|s3
Note that when reading, an attempt will be made to infer the
format and Zarr version and storage medium format by probing the
file. If inferencing fails, then it is reported. In this case,
the client may need to add specific mode flags to avoid
inferencing.
Typically one will specify three mode flags: one to indicate what format
to use and one to specify the way the dataset is to be stored<!--,and one to specifiy the Zarr format version-->.
For example, a common one is "mode=zarr,file<!--,v2-->"
<!--If not specified, the version will be the default specified when
the netcdf-c library was built.-->
Obviously, when creating a file, inferring the type of file to create
is not possible so the mode flags must be set specifically.
This means that both the storage medium and the exact storage
format must be specified.
Using _mode=nczarr_ causes the URL to be interpreted as a
reference to a dataset that is stored in NCZarr format.
The _zarr_ mode tells the library to
use NCZarr, but to restrict its operation to operate on pure
Zarr Version 2 datasets.
The _zarr_ mode tells the library to use NCZarr, but to restrict its operation to operate on pure Zarr.
<!--The _v2_ mode specifies Version 2 and _v3_mode specifies Version 3.
If the version is not specified, it will default to the value specified when the netcdf-c library was built.-->
The modes _s3_, _file_, and _zip_ tell the library what storage
The modes _s3_, _file_, and _zip_ tell the library what storage medium
driver to use.
* The _s3_ driver is the default and indicates using Amazon S3 or some equivalent.
* The _file_ format stores data in a directory tree.
* The _zip_ format stores data in a local zip file.
* The _s3_ driver stores data using Amazon S3 or some equivalent.
* The _file_ driver stores data in a directory tree.
* The _zip_ driver stores data in a local zip file.
Note that It should be the case that zipping a _file_
As an aside, it should be the case that zipping a _file_
format directory tree will produce a file readable by the
_zip_ storage format, and vice-versa.
By default, the XArray convention is supported and used for
both NCZarr files and pure Zarr files. This
means that every variable in the root group whose named dimensions
By default, the XArray convention is supported for Zarr Version 2
and used for both NCZarr files and pure Zarr files.
<!--It is not needed for Version 3 and is ignored.-->
This means that every variable in the root group whose named dimensions
are also in the root group will have an attribute called
*\_ARRAY\_DIMENSIONS* that stores those dimension names.
The _noxarray_ mode tells the library to disable the XArray support.
The netcdf-c library is capable of inferring additional mode flags based on the flags it finds. Currently we have the following inferences.
- _zarr_ => _nczarr_
So for example: ````...#mode=zarr,zip```` is equivalent to this.
````...#mode=nczarr,zarr,zip
````
<!--
- log=&lt;output-stream&gt;: this control turns on logging output,
which is useful for debugging and testing.
If just _log_ is used
then it is equivalent to _log=stderr_.
-->
# NCZarr Map Implementation {#nczarr_mapimpl}
Internally, the nczarr implementation has a map abstraction that allows different storage formats to be used.
@ -192,7 +200,7 @@ be a prefix of any other key.
There several other concepts of note.
1. __Dataset__ - a dataset is the complete tree contained by the key defining
the root of the dataset.
the root of the dataset. The term __File__ will often be used as a synonym.
Technically, the root of the tree is the key \<dataset\>/.zgroup, where .zgroup can be considered the _superblock_ of the dataset.
2. __Object__ - equivalent of the S3 object; Each object has a unique key
and "contains" data in the form of an arbitrary sequence of 8-bit bytes.
@ -277,14 +285,15 @@ As with other URLS (e.g. DAP), these kind of URLS can be passed as the path argu
# NCZarr versus Pure Zarr. {#nczarr_purezarr}
The NCZARR format extends the pure Zarr format by adding extra keys such as ''\_NCZARR\_ARRAY'' inside the ''.zarray'' object.
It is possible to suppress the use of these extensions so that the netcdf library can read and write a pure zarr formatted file.
This is controlled by using ''mode=zarr'', which is an alias for the
''mode=nczarr,zarr'' combination.
The primary effects of using pure zarr are described in the [Translation Section](@ref nczarr_translation).
There are some constraints on the reading of Zarr datasets using the NCZarr implementation.
The NCZARR format extends the pure Zarr format by adding extra attributes such as ''\_nczarr\_array'' inside the ''.zattr'' object.
It is possible to suppress the use of these extensions so that the netcdf library can write a pure zarr formatted file. But this probably unnecessary
since these attributes should be readable by any other Zarr implementation.
But these extra attributes might be seen as clutter and so it is possible
to suppress them when writing using *mode=zarr*.
Reading of pure Zarr files created using other implementations is a necessary
compatibility feature of NCZarr.
This requirement imposed some constraints on the reading of Zarr datasets using the NCZarr implementation.
1. Zarr allows some primitive types not recognized by NCZarr.
Over time, the set of unrecognized types is expected to diminish.
Examples of currently unsupported types are as follows:
@ -333,13 +342,14 @@ The reason for this is that the bucket name forms the initial segment in the key
## Data Model
The NCZarr storage format is almost identical to that of the the standard Zarr version 2 format.
The NCZarr storage format is almost identical to that of the the standard Zarr format.
The data model differs as follows.
1. Zarr only supports anonymous dimensions -- NCZarr supports only shared (named) dimensions.
2. Zarr attributes are untyped -- or perhaps more correctly characterized as of type string.
3. Zarr does not explicitly support unlimited dimensions -- NCZarr does support them.
## Storage Format
## Storage Medium
Consider both NCZarr and Zarr, and assume S3 notions of bucket and object.
In both systems, Groups and Variables (Array in Zarr) map to S3 objects.
@ -347,8 +357,7 @@ Containment is modeled using the fact that the dataset's key is a prefix of the
So for example, if variable _v1_ is contained in top level group g1 -- _/g1 -- then the key for _v1_ is _/g1/v_.
Additional meta-data information is stored in special objects whose name start with ".z".
In Zarr, the following special objects exist.
In Zarr Version 2, the following special objects exist.
1. Information about a group is kept in a special object named _.zgroup_;
so for example the object _/g1/.zgroup_.
2. Information about an array is kept as a special object named _.zarray_;
@ -359,45 +368,46 @@ so for example the objects _/g1/.zattr_ and _/g1/v1/.zattr_.
The first three contain meta-data objects in the form of a string representing a JSON-formatted dictionary.
The NCZarr format uses the same objects as Zarr, but inserts NCZarr
specific key-value pairs in them to hold NCZarr specific information
The value of each of these keys is a JSON dictionary containing a variety
specific attributes in the *.zattr* object to hold NCZarr specific information
The value of each of these attributes is a JSON dictionary containing a variety
of NCZarr specific information.
These keys are as follows:
These NCZarr-specific attributes are as follows:
_\_nczarr_superblock\__ -- this is in the top level group -- key _/.zarr_.
_\_nczarr_superblock\__ -- this is in the top level group's *.zattr* object.
It is in effect the "superblock" for the dataset and contains
any netcdf specific dataset level information.
It is also used to verify that a given key is the root of a dataset.
Currently it contains the following key(s):
* "version" -- the NCZarr version defining the format of the dataset.
Currently it contains keys that are ignored and exist only to ensure that
older netcdf library versions do not crash.
* "version" -- the NCZarr version defining the format of the dataset (deprecated).
_\_nczarr_group\__ -- this key appears in every _.zgroup_ object.
_\_nczarr_group\__ -- this key appears in every group's _.zattr_ object.
It contains any netcdf specific group information.
Specifically it contains the following keys:
* "dims" -- the name and size of shared dimensions defined in this group, as well an optional flag indictating if the dimension is UNLIMITED.
* "vars" -- the name of variables defined in this group.
* "dimensions" -- the name and size of shared dimensions defined in this group, as well an optional flag indictating if the dimension is UNLIMITED.
* "arrays" -- the name of variables defined in this group.
* "groups" -- the name of sub-groups defined in this group.
These lists allow walking the NCZarr dataset without having to use the potentially costly search operation.
_\_nczarr_array\__ -- this key appears in every _.zarray_ object.
_\_nczarr_array\__ -- this key appears in the *.zattr* object associated
with a _.zarray_ object.
It contains netcdf specific array information.
Specifically it contains the following keys:
* dimrefs -- the names of the shared dimensions referenced by the variable.
* storage -- indicates if the variable is chunked vs contiguous in the netcdf sense.
* dimension_references -- the fully qualified names of the shared dimensions referenced by the variable.
* storage -- indicates if the variable is chunked vs contiguous in the netcdf sense. Also signals if a variable is scalar.
_\_nczarr_attr\__ -- this key appears in every _.zattr_ object.
This means that technically, it is attribute, but one for which access
is normally surpressed .
_\_nczarr_attr\__ -- this attribute appears in every _.zattr_ object.
Specifically it contains the following keys:
* types -- the types of all of the other attributes in the _.zattr_ object.
* types -- the types of all attributes in the _.zattr_ object.
## Translation {#nczarr_translation}
With some constraints, it is possible for an nczarr library to read the pure Zarr format and for a zarr library to read the nczarr format.
The latter case, zarr reading nczarr is possible if the zarr library is willing to ignore keys whose name it does not recognize; specifically anything beginning with _\_nczarr\__.
With some loss of netcdf-4 information, it is possible for an nczarr library to read the pure Zarr format and for other zarr libraries to read the nczarr format.
The former case, nczarr reading zarr is also possible if the nczarr can simulate or infer the contents of the missing _\_nczarr\_xxx_ objects.
The latter case, zarr reading nczarr, is trival because all of the nczarr metadata is stored as ordinary, String valued (but JSON syntax), attributes.
The former case, nczarr reading zarr is possible assuming the nczarr code can simulate or infer the contents of the missing _\_nczarr\_xxx_ attributes.
As a rule this can be done as follows.
1. _\_nczarr_group\__ -- The list of contained variables and sub-groups can be computed using the search API to list the keys "contained" in the key for a group.
The search looks for occurrences of _.zgroup_, _.zattr_, _.zarray_ to infer the keys for the contained groups, attribute sets, and arrays (variables).
@ -405,9 +415,8 @@ Constructing the set of "shared dimensions" is carried out
by walking all the variables in the whole dataset and collecting
the set of unique integer shapes for the variables.
For each such dimension length, a top level dimension is created
named ".zdim_<len>" where len is the integer length.
2. _\_nczarr_array\__ -- The dimrefs are inferred by using the shape
in _.zarray_ and creating references to the simulated shared dimension.
named "_Anonymous_Dimension_<len>" where len is the integer length.
2. _\_nczarr_array\__ -- The dimension referencess are inferred by using the shape in _.zarray_ and creating references to the simulated shared dimensions.
netcdf specific information.
3. _\_nczarr_attr\__ -- The type of each attribute is inferred by trying to parse the first attribute value string.
@ -417,13 +426,15 @@ In order to accomodate existing implementations, certain mode tags are provided
## XArray
The Xarray [XArray Zarr Encoding Specification](http://xarray.pydata.org/en/latest/internals.html#zarr-encoding-specification) Zarr implementation uses its own mechanism for specifying shared dimensions.
The Xarray [7] Zarr implementation uses its own mechanism for specifying shared dimensions.
It uses a special attribute named ''_ARRAY_DIMENSIONS''.
The value of this attribute is a list of dimension names (strings).
An example might be ````["time", "lon", "lat"]````.
It is essentially equivalent to the ````_nczarr_array "dimrefs" list````, except that the latter uses fully qualified names so the referenced dimensions can be anywhere in the dataset.
It is almost equivalent to the ````_nczarr_array "dimension_references" list````, except that the latter uses fully qualified names so the referenced dimensions can be anywhere in the dataset. The Xarray dimension list differs from the netcdf-4 shared dimensions in two ways.
1. Specifying Xarray in a non-root group has no meaning in the current Xarray specification.
2. A given name can be associated with different lengths, even within a single array. This is considered an error in NCZarr.
As of _netcdf-c_ version 4.8.2, The Xarray ''_ARRAY_DIMENSIONS'' attribute is supported for both NCZarr and pure Zarr.
The Xarray ''_ARRAY_DIMENSIONS'' attribute is supported for both NCZarr and pure Zarr.
If possible, this attribute will be read/written by default,
but can be suppressed if the mode value "noxarray" is specified.
If detected, then these dimension names are used to define shared dimensions.
@ -431,6 +442,8 @@ The following conditions will cause ''_ARRAY_DIMENSIONS'' to not be written.
* The variable is not in the root group,
* Any dimension referenced by the variable is not in the root group.
Note that this attribute is not needed for Zarr Version 3, and is ignored.
# Examples {#nczarr_examples}
Here are a couple of examples using the _ncgen_ and _ncdump_ utilities.
@ -453,34 +466,17 @@ Here are a couple of examples using the _ncgen_ and _ncdump_ utilities.
```
5. Create an nczarr file using the s3 protocol with a specific profile
```
ncgen -4 -lb -o 's3://datasetbucket/rootkey\#mode=nczarr,awsprofile=unidata' dataset.cdl
ncgen -4 -lb -o "s3://datasetbucket/rootkey\#mode=nczarr&awsprofile=unidata" dataset.cdl
```
Note that the URL is internally translated to this
```
'https://s2.&lt;region&gt.amazonaws.com/datasetbucket/rootkey#mode=nczarr,awsprofile=unidata' dataset.cdl
```
# References {#nczarr_bib}
<a name="ref_aws">[1]</a> [Amazon Simple Storage Service Documentation](https://docs.aws.amazon.com/s3/index.html)<br>
<a name="ref_awssdk">[2]</a> [Amazon Simple Storage Service Library](https://github.com/aws/aws-sdk-cpp)<br>
<a name="ref_libzip">[3]</a> [The LibZip Library](https://libzip.org/)<br>
<a name="ref_nczarr">[4]</a> [NetCDF ZARR Data Model Specification](https://www.unidata.ucar.edu/blogs/developer/en/entry/netcdf-zarr-data-model-specification)<br>
<a name="ref_python">[5]</a> [Python Documentation: 8.3.
collections — High-performance dataset datatypes](https://docs.python.org/2/library/collections.html)<br>
<a name="ref_zarrv2">[6]</a> [Zarr Version 2 Specification](https://zarr.readthedocs.io/en/stable/spec/v2.html)<br>
<a name="ref_xarray">[7]</a> [XArray Zarr Encoding Specification](http://xarray.pydata.org/en/latest/internals.html#zarr-encoding-specification)<br>
<a name="dynamic_filter_loading">[8]</a> [Dynamic Filter Loading](https://support.hdfgroup.org/HDF5/doc/Advanced/DynamicallyLoadedFilters/HDF5DynamicallyLoadedFilters.pdf)<br>
<a name="official_hdf5_filters">[9]</a> [Officially Registered Custom HDF5 Filters](https://portal.hdfgroup.org/display/support/Registered+Filter+Plugins)<br>
<a name="blosc-c-impl">[10]</a> [C-Blosc Compressor Implementation](https://github.com/Blosc/c-blosc)<br>
<a name="ref_awssdk_conda">[11]</a> [Conda-forge packages / aws-sdk-cpp](https://anaconda.org/conda-forge/aws-sdk-cpp)<br>
<a name="ref_gdal">[12]</a> [GDAL Zarr](https://gdal.org/drivers/raster/zarr.html)<br>
````
"https://s2.&lt;region&gt.amazonaws.com/datasetbucket/rootkey\#mode=nczarr&awsprofile=unidata"
````
# Appendix A. Building NCZarr Support {#nczarr_build}
Currently the following build cases are known to work.
Note that this does not include S3 support.
A separate tabulation of S3 support is in the document cloud.md.
A separate tabulation of S3 support is in the document _cloud.md_.
<table>
<tr><td><u>Operating System</u><td><u>Build System</u><td><u>NCZarr</u>
@ -551,24 +547,9 @@ Some of the relevant limits are as follows:
Note that the limit is defined in terms of bytes and not (Unicode) characters.
This affects the depth to which groups can be nested because the key encodes the full path name of a group.
# Appendix C. NCZarr Version 1 Meta-Data Representation. {#nczarr_version1}
# Appendix C. JSON Attribute Convention. {#nczarr_json}
In NCZarr Version 1, the NCZarr specific metadata was represented using new objects rather than as keys in existing Zarr objects.
Due to conflicts with the Zarr specification, that format is deprecated in favor of the one described above.
However the netcdf-c NCZarr support can still read the version 1 format.
The version 1 format defines three specific objects: _.nczgroup_, _.nczarray_,_.nczattr_.
These are stored in parallel with the corresponding Zarr objects. So if there is a key of the form "/x/y/.zarray", then there is also a key "/x/y/.nczarray".
The content of these objects is the same as the contents of the corresponding keys. So the value of the ''_NCZARR_ARRAY'' key is the same as the content of the ''.nczarray'' object. The list of connections is as follows:
* ''.nczarr'' <=> ''_nczarr_superblock_''
* ''.nczgroup <=> ''_nczarr_group_''
* ''.nczarray <=> ''_nczarr_array_''
* ''.nczattr <=> ''_nczarr_attr_''
# Appendix D. JSON Attribute Convention. {#nczarr_json}
The Zarr V2 specification is somewhat vague on what is a legal
The Zarr V2 <!--(and V3)--> specification is somewhat vague on what is a legal
value for an attribute. The examples all show one of two cases:
1. A simple JSON scalar atomic values (e.g. int, float, char, etc), or
2. A JSON array of such values.
@ -581,7 +562,7 @@ complex JSON expression. An example is the GDAL Driver
convention <a href='#ref_gdal'>[12]</a>, where the value is a complex
JSON dictionary.
In order for NCZarr to be as consistent as possible with Zarr Version 2,
In order for NCZarr to be as consistent as possible with Zarr,
it is desirable to support this convention for attribute values.
This means that there must be some way to handle an attribute
whose value is not either of the two cases above. That is, its value
@ -611,12 +592,12 @@ There are mutiple cases to consider.
3. The netcdf attribute **is** of type NC_CHAR and its value &ndash; taken as a single sequence of characters &ndash;
**is** parseable as a legal JSON expression.
* Parse to produce a JSON expression and write that expression.
* Use "|U1" as the dtype and store in the NCZarr metadata.
* Use "|J0" as the dtype and store in the NCZarr metadata.
4. The netcdf attribute **is** of type NC_CHAR and its value &ndash; taken as a single sequence of characters &ndash;
**is not** parseable as a legal JSON expression.
* Convert to a JSON string and write that expression
* Use "|U1" as the dtype and store in the NCZarr metadata.
* Use ">S1" as the dtype and store in the NCZarr metadata.
## Reading an attribute:
@ -640,10 +621,7 @@ and then store it as the equivalent netcdf vector.
* If the dtype is not defined, then infer the dtype based on the first JSON value in the array,
and then store it as the equivalent netcdf vector.
3. The JSON expression is an array some of whose values are dictionaries or (sub-)arrays.
* Un-parse the expression to an equivalent sequence of characters, and then store it as of type NC_CHAR.
3. The JSON expression is a dictionary.
3. The attribute is any other JSON structure.
* Un-parse the expression to an equivalent sequence of characters, and then store it as of type NC_CHAR.
## Notes
@ -654,7 +632,7 @@ actions "read-write-read" is equivalent to a single "read" and "write-read-write
The "almost" caveat is necessary because (1) whitespace may be added or lost during the sequence of operations,
and (2) numeric precision may change.
# Appendix E. Support for string types
# Appendix D. Support for string types
Zarr supports a string type, but it is restricted to
fixed size strings. NCZarr also supports such strings,
@ -702,6 +680,182 @@ the above types should always appear as strings,
and the type that signals NC_CHAR (in NCZarr)
would be handled by Zarr as a string of length 1.
<!--
# Appendix E. Zarr Version 3: NCZarr Version 3 Meta-Data Representation. {#nczarr_version3}
For Zarr version 3, the added NCZarr specific metadata is stored
as attributes pretty much the same as for Version 2.
Specifically, the following Netcdf-4 meta-data information needs to be captured by NCZarr:
1. Shared dimensions: name and size.
2. Unlimited dimensions: which dimensions are unlimited.
3. Attribute types.
4. Netcdf types not included in Zarr: currently "char" and "string".
5. Zarr types not included in Netcdf: currently only "complex(32|64)"
This extra netcdfd-4 meta-data to attributes so as to not interfere with existing implementations.
## Supported Types
Zarr version 3 supports the following "atomic" types:
bool, int8, uint8, int16, uint16, int32, uint32, int64, uint64, float32, float64.
It also defines two structured type: complex64 and complex128.
NCZarr supports all of the atomic types.
Specialized support is provided for the following
Netcdf types: char, string.
The Zarr types bool and complex64 are not yet supported, but will be added shortly.
The type complex128 is not supported at all.
The Zarr type "bool" will appear in the netcdf types as
the enum type "_bool" whose netcdf declaration is as follows:
````
ubyte enum _bool_t {FALSE=0, TRUE=1};
````
The type complex64 will be supported by by defining this compound type:
````
compound _Complex64_t { float64 i; float64 j;}
````
Strings present a problem because there is a proposal
to add variable length strings to the Zarr version 3 specification;
fixed-length strings would not be supported at all.
But strings are important in Netcdf, so a forward compatible
representation is provided where the type is string
and its maximum size is specified.
For arrays, the Netcdf types "char" and "string" are stored
in the Zarr file as of type "uint8" and "r<8*n>", respectively
where _n_ is the maximum length of the string in bytes (not characters).
The fact that they represent "char" and "string" is encoded in the "_nczarr_array" attribute (see below).
## NCZarr Superblock
The *_nczarr_superblock* attribute is used as a useful marker to signal that a file is in fact NCZarr as opposed to Zarr.
This attribute is stored in the *zarr.info* attributes in the root group of the Zarr file.
The relevant attribute has the following format:
````
"_nczarr_superblock": {
"version": "3.0.0",
format": 3
}
````
## Group Annotations
The optional *_nczarr_group* attribute is stored in the attributes of a Zarr group within
the *zarr.json* object in that group.
The relevant attribute has the following format:
````
"_nczarr_group": {
\"dimensions\": [{name: <dimname>, size: <integer>, unlimited: 1|0},...],
\"arrays\": ["<name>",...],
\"subgroups\": ["<name>",...]
}
````
Its purpose is two-fold:
1. record the objects immediately within that group
2. define netcdf-4 dimenension objects within that group.
## Array Annotations
In order to support Netcdf concepts in Zarr, it may be necessary
to annotate a Zarr array with extra information.
The optional *_nczarr_array* attribute is stored in the attributes of a Zarr array within
the *zarr.json* object in that array.
The relevant attribute has the following format:
````
"_nczarr_array": {
\"dimension_references\": [\"/g1/g2/d1\", \"/d2\",...],
\"type_alias\": "<string indicating special type aliasing>" // optional
}
````
The *dimension_references* key is an expansion of the "dimensions" key
found in the *zarr.json* object for an array.
The problem with "dimensions" is that it specifies a simple name for each
dimension, whereas netcdf-4 requires that the array references dimension objects
that may appear in groups anywhere in the file. These references are encoded
as FQNs "pointing" to a specific dimension declaration (see *_nczarr_group* attribute
defined previously).
FQN is an acronym for "Fully Qualified Name".
It is a series of names separated by the "/" character, much
like a file system path.
It identifies the group in which the dimension is ostensibly "defined" in the Netcdf sense.
For example ````/d1```` defines a dimension "d1" defined in the root group.
Similarly ````/g1/g2/d2```` defines a dimension "d2" defined in the
group g2, which in turn is a subgroup of group g1, which is a subgroup
of the root group.
The *type_alias* key is used to annotate the type of an array
to allow discovery of netcdf-4 specific types.
Specifically, there are three current cases:
| dtype | type_alias |
| ----- | ---------- |
| uint8 | char |
| rn | string |
| uint8 | json |
If, for example, an array's dtype is specified as *uint8*, then it may be that
it is actually of unsigned 8-bit integer type. But it may actually be of some
netcdf-4 type that is encoded as *uint8* in order to be recognized by other -- pure zarr--
implementations. So, for example, if the netcdf-4 type is *char*, then the array's
dtype is *uint8*, but its type alias is *char*.
## Attribute Type Annotation
In Zarr version 3, group and array attributes are stored inside
the corresponding _zarr.info_. object under the dictionary key "attributes".
Note that this decision is still under discussion and it may be changed
to store attributes in an object separate from _zarr.info_.
Regardless of where the attributes are stored, and in order to
support netcdf-4 typed attributes, the per-attribute information
is stored as a special attribute called _\_nczarr_attrs\__ defined to hold
NCZarr specific attribute information. Currently, it only holds
the attribute typing information.
It can appear in any *zarr.json* object: group or array.
Its form is this:
````
"_nczarr_attrs": {
"attribute_types": [
{"name": "attr1", "configuration": {"type": "<dtype>"}},
...
]
}
````
There is one entry for every attribute (including itself) giving the type
of that attribute.
It should be noted that Zarr allows the value of an attribute to be an arbitrary
JSON-encoded structure. In order to support this in netcdf-4, is such a structure
is encountered as an attribute value, then it typed as *json* (see previously
described table).
## Codec Specification
The Zarr version 3 representation of codecs is slightly different
than that used by Zarr version 2.
In version 2, the codec is represented by this JSON template.
````
{"id": "<codec name>" "<param>": "<value>", "<param>": "<value>", ...}
````
In version 3, the codec is represented by this JSON template.
````
{"name": "<codec name>" "configuration": {"<param>": "<value>", "<param>": "<value>", ...}}
````
-->
# References {#nczarr_bib}
<a name="ref_aws">[1]</a> [Amazon Simple Storage Service Documentation](https://docs.aws.amazon.com/s3/index.html)<br>
<a name="ref_awssdk">[2]</a> [Amazon Simple Storage Service Library](https://github.com/aws/aws-sdk-cpp)<br>
<a name="ref_libzip">[3]</a> [The LibZip Library](https://libzip.org/)<br>
<a name="ref_nczarr">[4]</a> [NetCDF ZARR Data Model Specification](https://www.unidata.ucar.edu/blogs/developer/en/entry/netcdf-zarr-data-model-specification)<br>
<a name="ref_python">[5]</a> [Python Documentation: 8.3.
collections — High-performance dataset datatypes](https://docs.python.org/2/library/collections.html)<br>
<a name="ref_zarrv2">[6]</a> [Zarr Version 2 Specification](https://zarr.readthedocs.io/en/stable/spec/v2.html)<br>
<a name="ref_xarray">[7]</a> [XArray Zarr Encoding Specification](http://xarray.pydata.org/en/latest/internals.html#zarr-encoding-specification)<br>
<a name="dynamic_filter_loading">[8]</a> [Dynamic Filter Loading](https://support.hdfgroup.org/HDF5/doc/Advanced/DynamicallyLoadedFilters/HDF5DynamicallyLoadedFilters.pdf)<br>
<a name="official_hdf5_filters">[9]</a> [Officially Registered Custom HDF5 Filters](https://portal.hdfgroup.org/display/support/Registered+Filter+Plugins)<br>
<a name="blosc-c-impl">[10]</a> [C-Blosc Compressor Implementation](https://github.com/Blosc/c-blosc)<br>
<a name="ref_awssdk_conda">[11]</a> [Conda-forge packages / aws-sdk-cpp](https://anaconda.org/conda-forge/aws-sdk-cpp)<br>
<a name="ref_gdal">[12]</a> [GDAL Zarr](https://gdal.org/drivers/raster/zarr.html)<br>
<!--
<a name="ref_nczarrv3">[13]</a> [NetCDF ZARR Data Model Specification Version 3](https://zarr-specs.readthedocs.io/en/latest/specs.html)
-->
# Change Log {#nczarr_changelog}
[Note: minor text changes are not included.]
@ -710,6 +864,12 @@ intended to be a detailed chronology. Rather, it provides highlights
that will be of interest to NCZarr users. In order to see exact changes,
It is necessary to use the 'git diff' command.
## 03/31/2024
1. Document the change to V2 to using attributes to hold NCZarr metadata.
## 01/31/2024
1. Add description of support for Zarr version 3 as an appendix.
## 3/10/2023
1. Move most of the S3 text to the cloud.md document.
@ -729,4 +889,4 @@ include arbitrary JSON expressions; see Appendix D for more details.
__Author__: Dennis Heimbigner<br>
__Email__: dmh at ucar dot edu<br>
__Initial Version__: 4/10/2020<br>
__Last Revised__: 3/8/2023
__Last Revised__: 4/02/2024

View File

@ -6,18 +6,20 @@
# See netcdf-c/COPYRIGHT file for more info.
# Copy some test files from current source dir to out-of-tree build dir.
FILE(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.sh ${CMAKE_CURRENT_SOURCE_DIR}/*.hdf4)
FILE(COPY ${COPY_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
IF(WIN32)
FILE(COPY ${COPY_FILES} DESTINATION ${RUNTIME_OUTPUT_DIRECTORY}/)
ENDIF()
file(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.sh ${CMAKE_CURRENT_SOURCE_DIR}/*.hdf4)
file(COPY ${COPY_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
if(WIN32)
file(COPY ${COPY_FILES} DESTINATION ${RUNTIME_OUTPUT_DIRECTORY}/)
endif()
IF(USE_HDF4_FILE_TESTS AND NOT WIN32)
if(USE_HDF4_FILE_TESTS AND NOT WIN32)
build_bin_test_no_prefix(tst_interops2)
target_link_libraries(tst_interops2 netcdf ${ALL_TLL_LIBS})
build_bin_test_no_prefix(tst_interops3)
add_bin_test(hdf4_test tst_chunk_hdf4)
add_bin_test(hdf4_test tst_h4_lendian)
add_bin_test(hdf4_test tst_hdf4_extra)
add_sh_test(hdf4_test run_get_hdf4_files)
add_sh_test(hdf4_test run_formatx_hdf4)
ENDIF()
endif()

View File

@ -7,7 +7,10 @@
# Ed Hartnett, Dennis Heimbigner, Ward Fisher
include_HEADERS = netcdf.h netcdf_meta.h netcdf_mem.h netcdf_aux.h \
netcdf_filter.h netcdf_filter_build.h netcdf_filter_hdf5_build.h netcdf_dispatch.h netcdf_json.h
netcdf_filter.h netcdf_filter_build.h netcdf_filter_hdf5_build.h netcdf_dispatch.h
# Built source
include_HEADERS += netcdf_json.h
if BUILD_PARALLEL
include_HEADERS += netcdf_par.h
@ -32,6 +35,8 @@ endif
EXTRA_DIST = CMakeLists.txt XGetopt.h netcdf_meta.h.in netcdf_dispatch.h.in
BUILT_SOURCES = netcdf_json.h
# netcdf_json.h is constructed as a header-only file for use by
# nczarr code wrappers in the plugin directory. It is
# constructed by joining libdispatch/ncjson.c with
@ -40,7 +45,9 @@ EXTRA_DIST = CMakeLists.txt XGetopt.h netcdf_meta.h.in netcdf_dispatch.h.in
# static inside netcdf_json.h. This is an ugly hack to avoid
# having to reference libnetcdf in the nczarr code wrappers.
# Give the recipe for rebuilding netcdf_json.h
makepluginjson::
sed -e 's/NCJSON_H/NETCDF_JSON_H/' -e '/ncjson.h/d' <${srcdir}/ncjson.h > netcdf_json.h
# Give the recipe for building netcdf_json.h
netcdf_json.h: ${top_srcdir}/libdispatch/ncjson.c ${top_srcdir}/include/ncjson.h
sed -e 's/NCJSON_H/NETCDF_JSON_H/' -e '/ncjson.h/d' -e '/#endif[^!]*!NETCDF_JSON_H!/d' <${srcdir}/ncjson.h > netcdf_json.h
echo '#ifdef NETCDF_JSON_H' >> netcdf_json.h
sed -e '/ncjson.h/d' < ${srcdir}/../libdispatch/ncjson.c >> netcdf_json.h
echo '#endif /*NETCDF_JSON_H*/' >> netcdf_json.h

View File

@ -220,4 +220,9 @@ EXTERNL hid_t nc4_H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id,
int hdf5set_format_compatibility(hid_t fapl_id);
/* HDF5 initialization/finalization */
extern int nc4_hdf5_initialized;
extern void nc4_hdf5_initialize(void);
extern void nc4_hdf5_finalize(void);
#endif /* _HDF5INTERNAL_ */

View File

@ -137,4 +137,12 @@ EXTERNL int NC_copy_data_all(NC* nc, nc_type xtypeid, const void* memory, size_t
#define USED2INFO(nc) ((1<<(nc->dispatch->model)) & (1<<NC_FORMATX_DAP2))
#define USED4INFO(nc) ((1<<(nc->dispatch->model)) & (1<<NC_FORMATX_DAP4))
/* In DAP4 and Zarr (and maybe other places in the future)
we may have dimensions with a size, but no name.
In this case we need to create a name based on the size.
As a rule, the dimension name is NCDIMANON_<n> where n is the size
and NCDIMANON is a prefix defined here.
*/
#define NCDIMANON "_Anonymous_Dim"
#endif /* _NC_H_ */

View File

@ -15,7 +15,6 @@
#else
# include <stdint.h>
#endif /* HAVE_STDINT_H */
#include <sys/types.h> /* off_t */
#include "netcdf.h"
#ifdef USE_PARALLEL
#include "netcdf_par.h"

View File

@ -13,7 +13,6 @@
#include "netcdf.h"
#include "config.h"
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <string.h>
@ -55,9 +54,6 @@ typedef enum {NCNAT, NCVAR, NCDIM, NCATT, NCTYP, NCFLD, NCGRP, NCFIL} NC_SORT;
/** One mega-byte. */
#define MEGABYTE 1048576
/** The HDF5 ID for the szip filter. */
#define HDF5_FILTER_SZIP 4
#define X_SCHAR_MIN (-128) /**< Minimum signed char value. */
#define X_SCHAR_MAX 127 /**< Maximum signed char value. */
#define X_UCHAR_MAX 255U /**< Maximum unsigned char value. */
@ -84,9 +80,6 @@ typedef enum {NCNAT, NCVAR, NCDIM, NCATT, NCTYP, NCFLD, NCGRP, NCFIL} NC_SORT;
#define X_DOUBLE_MAX 1.7976931348623157e+308 /**< Maximum double value. */
#define X_DOUBLE_MIN (-X_DOUBLE_MAX) /**< Minimum double value. */
/** This is the number of netCDF atomic types. */
#define NUM_ATOMIC_TYPES (NC_MAX_ATOMIC_TYPE + 1)
/** Number of parameters needed for ZLIB filter. */
#define CD_NELEMS_ZLIB 1
@ -97,17 +90,16 @@ typedef enum {NCNAT, NCVAR, NCDIM, NCATT, NCTYP, NCFLD, NCGRP, NCFIL} NC_SORT;
#define NC4_DATA_SET(nc,data) ((nc)->dispatchdata = (void *)(data))
/* Reserved attribute flags: must be powers of 2. */
/** Hidden attributes; immutable and unreadable thru API. */
#define HIDDENATTRFLAG 1
/** Readonly attributes; readable, but immutable thru the API. */
#define READONLYFLAG 2
/** Subset of readonly flags; readable by name only thru the API. */
#define NAMEONLYFLAG 4
/** Per-variable attribute, as opposed to global */
#define VARFLAG 16
/** Hidden attributes; immutable and unreadable thru API. */
# define HIDDENATTRFLAG 1
/** Readonly attributes; readable, but immutable thru the API. */
# define READONLYFLAG 2
/** Subset of readonly flags; readable by name only thru the API. */
# define NAMEONLYFLAG 4
/** Mark reserved attributes that are constructed on the fly when accessed */
# define VIRTUALFLAG 8
/** Per-variable attribute, as opposed to global */
# define VARFLAG 16
/** Boolean type, to make the code easier to read. */
typedef enum {NC_FALSE = 0, NC_TRUE = 1} nc_bool_t;
@ -238,7 +230,7 @@ typedef struct NC_TYPE_INFO
size_t size; /**< Size of the type in memory, in bytes */
nc_bool_t committed; /**< True when datatype is committed in the file */
nc_type nc_type_class; /**< NC_VLEN, NC_COMPOUND, NC_OPAQUE, NC_ENUM, NC_INT, NC_FLOAT, or NC_STRING. */
void *format_type_info; /**< HDF5-specific type info. */
void *format_type_info; /**< dispatcher-specific type info. */
int varsized; /**< <! 1 if this type is (recursively) variable sized; 0 if fixed size */
/** Information for each type or class */
@ -274,7 +266,7 @@ typedef struct NC_GRP_INFO
} NC_GRP_INFO_T;
/* These constants apply to the flags field in the
* HDF5_FILE_INFO_T defined below. */
* NC_FILE_INFO_T defined below. */
#define NC_INDEF 0x01 /**< in define mode, cleared by ncendef */
/** This is the metadata we need to keep track of for each
@ -322,31 +314,6 @@ typedef struct NC_FILE_INFO
} mem;
} NC_FILE_INFO_T;
/* Collect global state info in one place */
typedef struct NCglobalstate {
int initialized;
char* tempdir; /* track a usable temp dir */
char* home; /* track $HOME */
char* cwd; /* track getcwd */
struct NCRCinfo* rcinfo; /* Currently only one rc file per session */
struct GlobalZarr { /* Zarr specific parameters */
char dimension_separator;
} zarr;
struct GlobalAWS { /* AWS S3 specific parameters/defaults */
char* default_region;
char* config_file;
char* profile;
char* access_key_id;
char* secret_access_key;
} aws;
struct Alignment { /* H5Pset_alignment parameters */
int defined; /* 1 => threshold and alignment explicitly set */
int threshold;
int alignment;
} alignment;
struct ChunkCache chunkcache;
} NCglobalstate;
/** Variable Length Datatype struct in memory. Must be identical to
* HDF5 hvl_t. (This is only used for VL sequences, not VL strings,
* which are stored in char *'s) */
@ -356,18 +323,14 @@ typedef struct
void *p; /**< Pointer to VL data */
} nc_hvl_t;
/* Misc functions */
extern int NC4_inq_atomic_type(nc_type typeid1, char *name, size_t *size);
extern int NC4_lookup_atomic_type(const char *name, nc_type* idp, size_t *sizep);
/* These functions convert between netcdf and HDF5 types. */
/* These functions convert between different netcdf types. */
extern int nc4_get_typelen_mem(NC_FILE_INFO_T *h5, nc_type xtype, size_t *len);
extern int nc4_convert_type(const void *src, void *dest, const nc_type src_type,
const nc_type dest_type, const size_t len, int *range_error,
const void *fill_value, int strict_nc3, int quantize_mode,
int nsd);
/* These functions do HDF5 things. */
/* These functions do netcdf-4 things. */
extern int nc4_reopen_dataset(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var);
extern int nc4_read_atts(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var);
@ -467,26 +430,69 @@ extern int nc4_close_netcdf4_file(NC_FILE_INFO_T *h5, int abort, NC_memio *memio
extern int nc4_find_default_chunksizes2(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var);
extern int nc4_check_chunksizes(NC_GRP_INFO_T* grp, NC_VAR_INFO_T* var, const size_t* chunksizes);
/* HDF5 initialization/finalization */
extern int nc4_hdf5_initialized;
extern void nc4_hdf5_initialize(void);
extern void nc4_hdf5_finalize(void);
/* This is only included if --enable-logging is used for configure; it
prints info about the metadata to stderr. */
#ifdef LOGGING
extern int log_metadata_nc(NC_FILE_INFO_T *h5);
#endif
/**************************************************/
/* Atomic types constants and functions */
/** This is the number of netCDF atomic types (as opposed to max) . */
#define NUM_ATOMIC_TYPES (NC_MAX_ATOMIC_TYPE + 1)
/** @internal Names of atomic types. */
extern const char* nc4_atomic_name[NUM_ATOMIC_TYPES];
/* Misc functions */
extern int NC4_inq_atomic_type(nc_type typeid1, char *name, size_t *size);
extern int NC4_lookup_atomic_type(const char *name, nc_type* idp, size_t *sizep);
extern int NC4_inq_atomic_typeid(int ncid, const char *name, nc_type *typeidp);
extern int NC4_get_atomic_typeclass(nc_type xtype, int *type_class);
/**************************************************/
/* Type alignment related functions */
extern int nc_set_alignment(int threshold, int alignment);
extern int nc_get_alignment(int* thresholdp, int* alignmentp);
/**************************************************/
/* Begin to collect global state info in one place (more to do) */
typedef struct NCglobalstate {
int initialized;
char* tempdir; /* track a usable temp dir */
char* home; /* track $HOME */
char* cwd; /* track getcwd */
struct NCRCinfo* rcinfo; /* Currently only one rc file per session */
struct GlobalZarr { /* Zarr specific parameters */
char dimension_separator;
int default_zarrformat;
} zarr;
struct GlobalAWS { /* AWS S3 specific parameters/defaults */
char* default_region;
char* config_file;
char* profile;
char* access_key_id;
char* secret_access_key;
} aws;
struct Alignment { /* H5Pset_alignment parameters */
int defined; /* 1 => threshold and alignment explicitly set */
int threshold;
int alignment;
} alignment;
struct ChunkCache chunkcache;
} NCglobalstate;
extern struct NCglobalstate* NC_getglobalstate(void);
extern void NC_freeglobalstate(void);
/**************************************************/
/* Binary searcher for reserved attributes */
extern const NC_reservedatt* NC_findreserved(const char* name);
/* Global State Management */
extern NCglobalstate* NC_getglobalstate(void);
extern void NC_freeglobalstate(void);
/* reserved attribute initializer */
extern void NC_initialize_reserved(void);
/* Generic reserved Attributes */
#define NC_ATT_REFERENCE_LIST "REFERENCE_LIST"
@ -500,9 +506,11 @@ extern void NC_freeglobalstate(void);
#define NC_ATT_NC3_STRICT_NAME "_nc3_strict"
#define NC_XARRAY_DIMS "_ARRAY_DIMENSIONS"
#define NC_ATT_CODECS "_Codecs"
/* Must match values in libnczarr/zinternal.h */
#define NC_NCZARR_SUPERBLOCK "_nczarr_superblock"
#define NC_NCZARR_GROUP "_nczarr_group"
#define NC_NCZARR_ARRAY "_nczarr_array"
#define NC_NCZARR_ATTR "_nczarr_attr"
#define NC_NCZARR_ATTR_UC "_NCZARR_ATTR"
#define NC_NCZARR_MAXSTRLEN_ATTR "_nczarr_maxstrlen"
#define NC_NCZARR_DEFAULT_MAXSTRLEN_ATTR "_nczarr_default_maxstrlen"
#endif /* _NC4INTERNAL_ */

View File

@ -52,12 +52,10 @@ void nc_log(int severity, const char *fmt, ...);
#define BAIL_QUIET BAIL
#ifdef USE_NETCDF4
#ifndef ENABLE_SET_LOG_LEVEL
#ifndef NETCDF_ENABLE_SET_LOG_LEVEL
/* Define away any calls to nc_set_log_level(), if its not enabled. */
#define nc_set_log_level(e)
#endif /* ENABLE_SET_LOG_LEVEL */
#endif
#endif /* NETCDF_ENABLE_SET_LOG_LEVEL */
#endif /* LOGGING */

View File

@ -31,7 +31,7 @@ and do the command:
/* Override for plugins */
#ifdef NETCDF_JSON_H
#define OPTEXPORT static
#else
#else /*NETCDF_JSON_H*/
#define OPTEXPORT MSC_EXTRA
#endif /*NETCDF_JSON_H*/
@ -57,7 +57,7 @@ typedef struct NCjson {
int sort; /* of this object */
char* string; /* sort != DICT|ARRAY */
struct NCjlist {
int len;
size_t len;
struct NCjson** contents;
} list; /* sort == DICT|ARRAY */
} NCjson;
@ -96,7 +96,7 @@ OPTEXPORT int NCJnewstring(int sort, const char* value, NCjson** jsonp);
OPTEXPORT int NCJnewstringn(int sort, size_t len, const char* value, NCjson** jsonp);
/* Get dict key value by name */
OPTEXPORT int NCJdictget(const NCjson* dict, const char* key, NCjson** valuep);
OPTEXPORT int NCJdictget(const NCjson* dict, const char* key, const NCjson** valuep);
/* Convert one json sort to value of another type; don't use union so we can know when to reclaim sval */
OPTEXPORT int NCJcvt(const NCjson* value, int outsort, struct NCJconst* output);
@ -108,7 +108,14 @@ OPTEXPORT int NCJaddstring(NCjson* json, int sort, const char* s);
OPTEXPORT int NCJappend(NCjson* object, NCjson* value);
/* Insert key-value pair into a dict object. key will be copied */
OPTEXPORT int NCJinsert(NCjson* object, char* key, NCjson* value);
OPTEXPORT int NCJinsert(NCjson* object, const char* key, NCjson* value);
/* Insert key-value pair as strings into a dict object.
key and value will be copied */
OPTEXPORT int NCJinsertstring(NCjson* object, const char* key, const char* value);
/* Insert key-value pair where value is an int */
OPTEXPORT int NCJinsertint(NCjson* object, const char* key, long long ivalue);
/* Unparser to convert NCjson object to text in buffer */
OPTEXPORT int NCJunparse(const NCjson* json, unsigned flags, char** textp);
@ -121,7 +128,7 @@ OPTEXPORT int NCJclone(const NCjson* json, NCjson** clonep);
OPTEXPORT void NCJdump(const NCjson* json, unsigned flags, FILE*);
/* convert NCjson* object to output string */
OPTEXPORT const char* NCJtotext(const NCjson* json);
#endif
#endif /*NETCDF_JSON_H*/
#if defined(__cplusplus)
}
@ -131,8 +138,10 @@ OPTEXPORT const char* NCJtotext(const NCjson* json);
#define NCJsort(x) ((x)->sort)
#define NCJstring(x) ((x)->string)
#define NCJlength(x) ((x)==NULL ? 0 : (x)->list.len)
#define NCJdictlength(x) ((x)==NULL ? 0 : (x)->list.len/2)
#define NCJcontents(x) ((x)->list.contents)
#define NCJith(x,i) ((x)->list.contents[i])
#define NCJdictith(x,i) ((x)->list.contents[2*i])
/* Setters */
#define NCJsetsort(x,s) (x)->sort=(s)
@ -147,3 +156,4 @@ OPTEXPORT const char* NCJtotext(const NCjson* json);
#endif /*NCJSON_H*/

View File

@ -10,7 +10,7 @@
#include <stdarg.h>
#include "ncexternl.h"
#define NCCATCH
#undef NCCATCH
#define NCENVLOGGING "NCLOGGING"
#define NCENVTRACING "NCTRACING"

View File

@ -15,7 +15,7 @@
/* Track the server type, if known */
typedef enum NCS3SVC {NCS3UNK=0, /* unknown */
NCS3=1, /* s3.amazon.aws */
NCS3GS=0 /* storage.googleapis.com */
NCS3GS=2 /* storage.googleapis.com */
} NCS3SVC;
typedef struct NCS3INFO {
@ -45,6 +45,7 @@ struct NCglobalstate;
extern "C" {
#endif
/* API for ncs3sdk_XXX.[c|cpp] */
EXTERNL int NC_s3sdkinitialize(void);
EXTERNL int NC_s3sdkfinalize(void);
EXTERNL void* NC_s3sdkcreateclient(NCS3INFO* context);
@ -60,8 +61,7 @@ EXTERNL int NC_s3sdksearch(void* s3client0, const char* bucket, const char* pref
EXTERNL int NC_s3sdkdeletekey(void* client0, const char* bucket, const char* pathkey, char** errmsgp);
/* From ds3util.c */
EXTERNL int NC_s3sdkinitialize(void);
EXTERNL int NC_s3sdkfinalize(void);
EXTERNL void NC_s3sdkenvironment(void);
EXTERNL int NC_getdefaults3region(NCURI* uri, const char** regionp);
EXTERNL int NC_s3urlprocess(NCURI* url, NCS3INFO* s3, NCURI** newurlp);

View File

@ -119,6 +119,7 @@ extern "C" {
0x0002
All upper 16 bits are unused except
0x20000
0x40000
*/
/* Lower 16 bits */

View File

@ -31,7 +31,7 @@ and do the command:
/* Override for plugins */
#ifdef NETCDF_JSON_H
#define OPTEXPORT static
#else
#else /*NETCDF_JSON_H*/
#define OPTEXPORT MSC_EXTRA
#endif /*NETCDF_JSON_H*/
@ -57,7 +57,7 @@ typedef struct NCjson {
int sort; /* of this object */
char* string; /* sort != DICT|ARRAY */
struct NCjlist {
int len;
size_t len;
struct NCjson** contents;
} list; /* sort == DICT|ARRAY */
} NCjson;
@ -96,7 +96,7 @@ OPTEXPORT int NCJnewstring(int sort, const char* value, NCjson** jsonp);
OPTEXPORT int NCJnewstringn(int sort, size_t len, const char* value, NCjson** jsonp);
/* Get dict key value by name */
OPTEXPORT int NCJdictget(const NCjson* dict, const char* key, NCjson** valuep);
OPTEXPORT int NCJdictget(const NCjson* dict, const char* key, const NCjson** valuep);
/* Convert one json sort to value of another type; don't use union so we can know when to reclaim sval */
OPTEXPORT int NCJcvt(const NCjson* value, int outsort, struct NCJconst* output);
@ -108,7 +108,14 @@ OPTEXPORT int NCJaddstring(NCjson* json, int sort, const char* s);
OPTEXPORT int NCJappend(NCjson* object, NCjson* value);
/* Insert key-value pair into a dict object. key will be copied */
OPTEXPORT int NCJinsert(NCjson* object, char* key, NCjson* value);
OPTEXPORT int NCJinsert(NCjson* object, const char* key, NCjson* value);
/* Insert key-value pair as strings into a dict object.
key and value will be copied */
OPTEXPORT int NCJinsertstring(NCjson* object, const char* key, const char* value);
/* Insert key-value pair where value is an int */
OPTEXPORT int NCJinsertint(NCjson* object, const char* key, long long ivalue);
/* Unparser to convert NCjson object to text in buffer */
OPTEXPORT int NCJunparse(const NCjson* json, unsigned flags, char** textp);
@ -121,7 +128,7 @@ OPTEXPORT int NCJclone(const NCjson* json, NCjson** clonep);
OPTEXPORT void NCJdump(const NCjson* json, unsigned flags, FILE*);
/* convert NCjson* object to output string */
OPTEXPORT const char* NCJtotext(const NCjson* json);
#endif
#endif /*NETCDF_JSON_H*/
#if defined(__cplusplus)
}
@ -131,8 +138,10 @@ OPTEXPORT const char* NCJtotext(const NCjson* json);
#define NCJsort(x) ((x)->sort)
#define NCJstring(x) ((x)->string)
#define NCJlength(x) ((x)==NULL ? 0 : (x)->list.len)
#define NCJdictlength(x) ((x)==NULL ? 0 : (x)->list.len/2)
#define NCJcontents(x) ((x)->list.contents)
#define NCJith(x,i) ((x)->list.contents[i])
#define NCJdictith(x,i) ((x)->list.contents[2*i])
/* Setters */
#define NCJsetsort(x,s) (x)->sort=(s)
@ -147,6 +156,8 @@ OPTEXPORT const char* NCJtotext(const NCjson* json);
#endif /*NETCDF_JSON_H*/
#ifdef NETCDF_JSON_H
/* Copyright 2018, UCAR/Unidata.
See the COPYRIGHT file for more information.
*/
@ -219,7 +230,7 @@ typedef struct NCJparser {
} NCJparser;
typedef struct NCJbuf {
int len; /* |text|; does not include nul terminator */
size_t len; /* |text|; does not include nul terminator */
char* text; /* NULL || nul terminated */
} NCJbuf;
@ -257,7 +268,7 @@ static int NCJyytext(NCJparser*, char* start, size_t pdlen);
static void NCJreclaimArray(struct NCjlist*);
static void NCJreclaimDict(struct NCjlist*);
static int NCJunescape(NCJparser* parser);
static int unescape1(int c);
static char unescape1(char c);
static int listappend(struct NCjlist* list, NCjson* element);
static int NCJcloneArray(const NCjson* array, NCjson** clonep);
@ -265,7 +276,7 @@ static int NCJcloneDict(const NCjson* dict, NCjson** clonep);
static int NCJunparseR(const NCjson* json, NCJbuf* buf, unsigned flags);
static int bytesappendquoted(NCJbuf* buf, const char* s);
static int bytesappend(NCJbuf* buf, const char* s);
static int bytesappendc(NCJbuf* bufp, const char c);
static int bytesappendc(NCJbuf* bufp, char c);
/* Hide everything for plugins */
#ifdef NETCDF_JSON_H
@ -276,7 +287,9 @@ static int NCJnewstring(int sort, const char* value, NCjson** jsonp);
static int NCJnewstringn(int sort, size_t len, const char* value, NCjson** jsonp);
static int NCJclone(const NCjson* json, NCjson** clonep);
static int NCJaddstring(NCjson* json, int sort, const char* s);
static int NCJinsert(NCjson* object, char* key, NCjson* jvalue);
static int NCJinsert(NCjson* object, const char* key, NCjson* jvalue);
static int NCJinsertstring(NCjson* object, const char* key, const char* value);
static int NCJinsertint(NCjson* object, const char* key, long long ivalue);
static int NCJappend(NCjson* object, NCjson* value);
static int NCJunparse(const NCjson* json, unsigned flags, char** textp);
#else /*!NETCDF_JSON_H*/
@ -516,13 +529,12 @@ done:
static int
NCJlex(NCJparser* parser)
{
int c;
int token = NCJ_UNDEF;
char* start;
size_t count;
while(token == 0) { /* avoid need to goto when retrying */
c = *parser->pos;
char c = *parser->pos;
if(c == '\0') {
token = NCJ_EOF;
} else if(c <= ' ' || c == '\177') {/* ignore whitespace */
@ -541,7 +553,7 @@ NCJlex(NCJparser* parser)
}
/* Pushback c */
parser->pos--;
count = ((parser->pos) - start);
count = (size_t)((parser->pos) - start);
if(NCJyytext(parser,start,count)) goto done;
/* Discriminate the word string to get the proper sort */
if(testbool(parser->yytext) == NCJ_OK)
@ -568,7 +580,7 @@ NCJlex(NCJparser* parser)
token = NCJ_UNDEF;
goto done;
}
count = ((parser->pos) - start) - 1; /* -1 for trailing quote */
count = (size_t)((parser->pos) - start) - 1; /* -1 for trailing quote */
if(NCJyytext(parser,start,count)==NCJ_ERR) goto done;
if(NCJunescape(parser)==NCJ_ERR) goto done;
token = NCJ_STRING;
@ -763,7 +775,7 @@ done:
}
OPTSTATIC int
NCJdictget(const NCjson* dict, const char* key, NCjson** valuep)
NCJdictget(const NCjson* dict, const char* key, const NCjson** valuep)
{
int i,stat = NCJ_OK;
@ -789,7 +801,7 @@ NCJunescape(NCJparser* parser)
{
char* p = parser->yytext;
char* q = p;
int c;
char c;
for(;(c=*p++);) {
if(c == NCJ_ESCAPE) {
c = *p++;
@ -799,9 +811,9 @@ NCJunescape(NCJparser* parser)
case 'n': c = '\n'; break;
case 'r': c = '\r'; break;
case 't': c = '\t'; break;
case NCJ_QUOTE: c = c; break;
case NCJ_ESCAPE: c = c; break;
default: c = c; break;/* technically not Json conformant */
case NCJ_QUOTE: break;
case NCJ_ESCAPE: break;
default: break;/* technically not Json conformant */
}
}
*q++ = c;
@ -811,8 +823,8 @@ NCJunescape(NCJparser* parser)
}
/* Unescape a single character */
static int
unescape1(int c)
static char
unescape1(char c)
{
switch (c) {
case 'b': c = '\b'; break;
@ -820,7 +832,7 @@ unescape1(int c)
case 'n': c = '\n'; break;
case 'r': c = '\r'; break;
case 't': c = '\t'; break;
default: c = c; break;/* technically not Json conformant */
default: break;/* technically not Json conformant */
}
return c;
}
@ -1049,7 +1061,7 @@ done:
/* Insert key-value pair into a dict object. key will be strdup'd */
OPTSTATIC int
NCJinsert(NCjson* object, char* key, NCjson* jvalue)
NCJinsert(NCjson* object, const char* key, NCjson* jvalue)
{
int stat = NCJ_OK;
NCjson* jkey = NULL;
@ -1062,6 +1074,36 @@ done:
return NCJTHROW(stat);
}
/* Insert key-value pair as strings into a dict object.
key and value will be strdup'd */
OPTSTATIC int
NCJinsertstring(NCjson* object, const char* key, const char* value)
{
int stat = NCJ_OK;
NCjson* jvalue = NULL;
if(value == NULL)
NCJnew(NCJ_NULL,&jvalue);
else
NCJnewstring(NCJ_STRING,value,&jvalue);
NCJinsert(object,key,jvalue);
done:
return NCJTHROW(stat);
}
/* Insert key-value pair with value being an integer */
OPTSTATIC int
NCJinsertint(NCjson* object, const char* key, long long ivalue)
{
int stat = NCJ_OK;
NCjson* jvalue = NULL;
char digits[128];
snprintf(digits,sizeof(digits),"%lld",ivalue);
NCJnewstring(NCJ_STRING,digits,&jvalue);
NCJinsert(object,key,jvalue);
done:
return NCJTHROW(stat);
}
/* Append value to an array or dict object. */
OPTSTATIC int
NCJappend(NCjson* object, NCjson* value)
@ -1155,7 +1197,7 @@ static int
escape(const char* text, NCJbuf* buf)
{
const char* p = text;
int c;
char c;
for(;(c=*p++);) {
char replace = 0;
switch (c) {
@ -1265,3 +1307,4 @@ netcdf_supresswarnings(void)
ignore = (void*)NCJtotext;
ignore = ignore;
}
#endif /*NETCDF_JSON_H*/

View File

@ -40,7 +40,6 @@
#define NC_VERSION "@NC_VERSION@"
#define NC_HAS_NC2 @NC_HAS_NC2@ /*!< API version 2 support. */
#define NC_HAS_NC4 @NC_HAS_NC4@ /*!< API version 4 support. */
#define NC_HAS_HDF4 @NC_HAS_HDF4@ /*!< HDF4 support. */
#define NC_HAS_HDF5 @NC_HAS_HDF5@ /*!< HDF5 support. */
#define NC_HAS_SZIP @NC_HAS_SZIP@ /*!< szip support */
@ -50,7 +49,6 @@
#define NC_HAS_BYTERANGE @NC_HAS_BYTERANGE@ /*!< Byterange support. */
#define NC_HAS_DISKLESS @NC_HAS_DISKLESS@ /*!< diskless support. */
#define NC_HAS_MMAP @NC_HAS_MMAP@ /*!< mmap support. */
#define NC_HAS_JNA @NC_HAS_JNA@ /*!< jna support. */
#define NC_HAS_PNETCDF @NC_HAS_PNETCDF@ /*!< PnetCDF support. */
#define NC_HAS_PARALLEL4 @NC_HAS_PARALLEL4@ /*!< parallel IO support via HDF5 */
#define NC_HAS_PARALLEL @NC_HAS_PARALLEL@ /*!< parallel IO support via HDF5 and/or PnetCDF. */

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,9 @@
/* A Bison parser, made by GNU Bison 3.0.4. */
/* A Bison parser, made by GNU Bison 3.8.2. */
/* Bison interface for Yacc-like parsers in C
Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2021 Free Software Foundation,
Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -15,7 +16,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
along with this program. If not, see <https://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
@ -30,6 +31,10 @@
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
/* DO NOT RELY ON FEATURES THAT ARE NOT DOCUMENTED in the manual,
especially those whose name start with YY_ or yy_. They are
private implementation details that can be changed or removed. */
#ifndef YY_DCE_DCE_TAB_H_INCLUDED
# define YY_DCE_DCE_TAB_H_INCLUDED
/* Debug traces. */
@ -40,15 +45,20 @@
extern int dcedebug;
#endif
/* Token type. */
/* Token kinds. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
enum yytokentype
{
SCAN_WORD = 258,
SCAN_STRINGCONST = 259,
SCAN_NUMBERCONST = 260
YYEMPTY = -2,
YYEOF = 0, /* "end of file" */
YYerror = 256, /* error */
YYUNDEF = 257, /* "invalid token" */
SCAN_WORD = 258, /* SCAN_WORD */
SCAN_STRINGCONST = 259, /* SCAN_STRINGCONST */
SCAN_NUMBERCONST = 260 /* SCAN_NUMBERCONST */
};
typedef enum yytokentype yytoken_kind_t;
#endif
/* Value type. */
@ -60,6 +70,8 @@ typedef int YYSTYPE;
int dceparse (DCEparsestate* parsestate);
#endif /* !YY_DCE_DCE_TAB_H_INCLUDED */

View File

@ -3,6 +3,7 @@
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
*********************************************************************/
#include "config.h"
#include "ncdispatch.h"
#include "ncd4dispatch.h"
#include "d4includes.h"

View File

@ -1363,7 +1363,7 @@ makeAnonDim(NCD4parser* parser, const char* sizestr)
ret = parseLL(sizestr,&size);
if(ret) return NULL;
snprintf(name,NC_MAX_NAME,"/_AnonymousDim%lld",size);
snprintf(name,NC_MAX_NAME,"/%s_%lld",NCDIMANON,size);
/* See if it exists already */
dim = lookupFQN(parser,name,NCD4_DIM);
if(dim == NULL) {/* create it */

View File

@ -56,6 +56,8 @@ static int computefieldinfo(struct NCAUX_CMPD* cmpd);
static int filterspec_cvt(const char* txt, size_t* nparamsp, unsigned int* params);
EXTERNL int nc_dump_data(int ncid, nc_type xtype, void* memory, size_t count, char** bufp);
/**************************************************/
/*
This code is a variant of the H5detect.c code from HDF5.
@ -922,6 +924,7 @@ ncaux_inq_any_type(int ncid, nc_type typeid, char *name, size_t *sizep, nc_type
return NC_inq_any_type(ncid, typeid, name, sizep, basetypep, nfieldsp, classp);
}
#ifdef USE_NETCDF4
/**
@param ncid - only needed for a compound type
@param xtype - type for which alignment is requested
@ -932,6 +935,7 @@ ncaux_type_alignment(int xtype, int ncid, size_t* alignp)
/* Defer to the internal version */
return NC_type_alignment(ncid, xtype, alignp);
}
#endif
/**
Dump the output tree of data from a call
@ -948,6 +952,6 @@ This function is just a wrapper around nc_dump__data.
EXTERNL int
ncaux_dump_data(int ncid, int xtype, void* memory, size_t count, char** bufp)
{
EXTERNL int nc_dump_data(int ncid, nc_type xtype, void* memory, size_t count, char** bufp);
return nc_dump_data(ncid, xtype, memory, count, bufp);
}

View File

@ -12,11 +12,17 @@
#include "nc_logging.h"
#include "nclist.h"
static int NC_find_equal_type(int ncid1, nc_type xtype1, int ncid2, nc_type *xtype2);
#ifdef USE_NETCDF4
static int searchgroup(int ncid1, int tid1, int grp, int* tid2);
static int searchgrouptree(int ncid1, int tid1, int grp, int* tid2);
#endif /*USE_NETCDF4*/
#ifdef USE_NETCDF4
/**
* @internal Compare two netcdf types for equality. Must have the
* ncids as well, to find user-defined types.
@ -203,6 +209,8 @@ done:
return ret;
}
#endif /* USE_NETCDF4 */
/**
* @internal Given a type in one file, find its equal (if any) in
* another file. It sounds so simple, but it's a real pain!
@ -232,15 +240,15 @@ NC_find_equal_type(int ncid1, nc_type xtype1, int ncid2, nc_type *xtype2)
return NC_NOERR;
}
#ifdef USE_NETCDF4
/* Recursively search group ncid2 and its children
to find a type that is equal (using compare_type)
to xtype1. */
ret = NC_rec_find_nc_type(ncid1, xtype1 , ncid2, xtype2);
#endif /* USE_NETCDF4 */
return ret;
}
#endif /* USE_NETCDF4 */
/**
* This will copy a variable that is an array of primitive type and
* its attributes from one file to another, assuming dimensions in the
@ -726,4 +734,5 @@ done:
return ret;
}
#endif
#endif /* USE_NETCDF4 */

View File

@ -34,8 +34,6 @@ See LICENSE.txt for license information.
#define MAXPATH 1024
/* Define vectors of zeros and ones for use with various nc_get_varX functions */
/* Note, this form of initialization fails under Cygwin */
size_t NC_coord_zero[NC_MAX_VAR_DIMS] = {0};
@ -143,3 +141,335 @@ NCDISPATCH_finalize(void)
NC_freeglobalstate(); /* should be one of the last things done */
return status;
}
/**************************************************/
/* Global State constants and state */
/* The singleton global state object */
static NCglobalstate* nc_globalstate = NULL;
/* Forward */
static int NC_createglobalstate(void);
/** \defgroup global_state Global state functions. */
/** \{
\ingroup global state
*/
/* NCglobal state management */
static int
NC_createglobalstate(void)
{
int stat = NC_NOERR;
const char* tmp = NULL;
if(nc_globalstate == NULL) {
nc_globalstate = calloc(1,sizeof(NCglobalstate));
}
/* Initialize struct pointers */
if((nc_globalstate->rcinfo = calloc(1,sizeof(struct NCRCinfo)))==NULL)
{stat = NC_ENOMEM; goto done;}
if((nc_globalstate->rcinfo->entries = nclistnew())==NULL)
{stat = NC_ENOMEM; goto done;}
if((nc_globalstate->rcinfo->s3profiles = nclistnew())==NULL)
{stat = NC_ENOMEM; goto done;}
/* Get environment variables */
if(getenv(NCRCENVIGNORE) != NULL)
nc_globalstate->rcinfo->ignore = 1;
tmp = getenv(NCRCENVRC);
if(tmp != NULL && strlen(tmp) > 0)
nc_globalstate->rcinfo->rcfile = strdup(tmp);
/* Initialize chunk cache defaults */
nc_globalstate->chunkcache.size = DEFAULT_CHUNK_CACHE_SIZE; /**< Default chunk cache size. */
nc_globalstate->chunkcache.nelems = DEFAULT_CHUNKS_IN_CACHE; /**< Default chunk cache number of elements. */
nc_globalstate->chunkcache.preemption = DEFAULT_CHUNK_CACHE_PREEMPTION; /**< Default chunk cache preemption. */
done:
return stat;
}
/* Get global state */
NCglobalstate*
NC_getglobalstate(void)
{
if(nc_globalstate == NULL)
NC_createglobalstate();
return nc_globalstate;
}
void
NC_freeglobalstate(void)
{
if(nc_globalstate != NULL) {
nullfree(nc_globalstate->tempdir);
nullfree(nc_globalstate->home);
nullfree(nc_globalstate->cwd);
nullfree(nc_globalstate->aws.default_region);
nullfree(nc_globalstate->aws.config_file);
nullfree(nc_globalstate->aws.profile);
nullfree(nc_globalstate->aws.access_key_id);
nullfree(nc_globalstate->aws.secret_access_key);
if(nc_globalstate->rcinfo) {
NC_rcclear(nc_globalstate->rcinfo);
free(nc_globalstate->rcinfo);
}
free(nc_globalstate);
nc_globalstate = NULL;
}
}
/** \} */
/**************************************************/
/** \defgroup atomic_types Atomic Type functions */
/** \{
\ingroup atomic_types
*/
/* The sizes of types may vary from platform to platform, but within
* netCDF files, type sizes are fixed. */
#define NC_CHAR_LEN sizeof(char) /**< @internal Size of char. */
#define NC_STRING_LEN sizeof(char *) /**< @internal Size of char *. */
#define NC_BYTE_LEN 1 /**< @internal Size of byte. */
#define NC_SHORT_LEN 2 /**< @internal Size of short. */
#define NC_INT_LEN 4 /**< @internal Size of int. */
#define NC_FLOAT_LEN 4 /**< @internal Size of float. */
#define NC_DOUBLE_LEN 8 /**< @internal Size of double. */
#define NC_INT64_LEN 8 /**< @internal Size of int64. */
/** @internal Names of atomic types. */
const char* nc4_atomic_name[NUM_ATOMIC_TYPES] = {"none", "byte", "char",
"short", "int", "float",
"double", "ubyte",
"ushort", "uint",
"int64", "uint64", "string"};
static const size_t nc4_atomic_size[NUM_ATOMIC_TYPES] = {0, NC_BYTE_LEN, NC_CHAR_LEN, NC_SHORT_LEN,
NC_INT_LEN, NC_FLOAT_LEN, NC_DOUBLE_LEN,
NC_BYTE_LEN, NC_SHORT_LEN, NC_INT_LEN, NC_INT64_LEN,
NC_INT64_LEN, NC_STRING_LEN};
/**
* @internal Get the name and size of an atomic type. For strings, 1 is
* returned.
*
* @param typeid1 Type ID.
* @param name Gets the name of the type.
* @param size Gets the size of one element of the type in bytes.
*
* @return ::NC_NOERR No error.
* @return ::NC_EBADID Bad ncid.
* @return ::NC_EBADTYPE Type not found.
* @author Dennis Heimbigner
*/
int
NC4_inq_atomic_type(nc_type typeid1, char *name, size_t *size)
{
if (typeid1 >= NUM_ATOMIC_TYPES)
return NC_EBADTYPE;
if (name)
strcpy(name, nc4_atomic_name[typeid1]);
if (size)
*size = nc4_atomic_size[typeid1];
return NC_NOERR;
}
/**
* @internal Get the id and size of an atomic type by name.
*
* @param name [in] the name of the type.
* @param idp [out] the type index of the type.
* @param sizep [out] the size of one element of the type in bytes.
*
* @return ::NC_NOERR No error.
* @return ::NC_EBADID Bad ncid.
* @return ::NC_EBADTYPE Type not found.
* @author Dennis Heimbigner
*/
int
NC4_lookup_atomic_type(const char *name, nc_type* idp, size_t *sizep)
{
int i;
if (name == NULL || strlen(name) == 0)
return NC_EBADTYPE;
for(i=0;i<NUM_ATOMIC_TYPES;i++) {
if(strcasecmp(name,nc4_atomic_name[i])==0) {
if(idp) *idp = i;
if(sizep) *sizep = nc4_atomic_size[i];
return NC_NOERR;
}
}
return NC_EBADTYPE;
}
/**
* @internal Get the id of an atomic type from the name.
*
* @param ncid File and group ID.
* @param name Name of type
* @param typeidp Pointer that will get the type ID.
*
* @return ::NC_NOERR No error.
* @return ::NC_EBADTYPE Type not found.
* @author Ed Hartnett
*/
int
NC4_inq_atomic_typeid(int ncid, const char *name, nc_type *typeidp)
{
int i;
NC_UNUSED(ncid);
/* Handle atomic types. */
for (i = 0; i < NUM_ATOMIC_TYPES; i++) {
if (!strcmp(name, nc4_atomic_name[i]))
{
if (typeidp)
*typeidp = i;
return NC_NOERR;
}
}
return NC_EBADTYPE;
}
/**
* @internal Get the class of a type
*
* @param xtype NetCDF type ID.
* @param type_class Pointer that gets class of type, NC_INT,
* NC_FLOAT, NC_CHAR, or NC_STRING, NC_ENUM, NC_VLEN, NC_COMPOUND, or
* NC_OPAQUE.
*
* @return ::NC_NOERR No error.
* @author Ed Hartnett, Dennis Heimbigner
*/
int
NC4_get_atomic_typeclass(nc_type xtype, int *type_class)
{
assert(type_class);
switch (xtype) {
case NC_BYTE:
case NC_UBYTE:
case NC_SHORT:
case NC_USHORT:
case NC_INT:
case NC_UINT:
case NC_INT64:
case NC_UINT64:
/* NC_INT is class used for all integral types */
*type_class = NC_INT;
break;
case NC_FLOAT:
case NC_DOUBLE:
/* NC_FLOAT is class used for all floating-point types */
*type_class = NC_FLOAT;
break;
case NC_CHAR:
*type_class = NC_CHAR;
break;
case NC_STRING:
*type_class = NC_STRING;
break;
default:
return NC_EBADTYPE;
}
return NC_NOERR;
}
/** \} */
/**************************************************/
/** \defgroup alignment Alignment functions. */
/** \{
\ingroup alignment
*/
/**
Provide a function to store global data alignment
information.
Repeated calls to nc_set_alignment will overwrite any existing values.
If defined, then for every file created or opened after the call to
nc_set_alignment, and for every new variable added to the file, the
most recently set threshold and alignment values will be applied
to that variable.
The nc_set_alignment function causes new data written to a
netCDF-4 file to be aligned on disk to a specified block
size. To be effective, alignment should be the system disk block
size, or a multiple of it. This setting is effective with MPI
I/O and other parallel systems.
This is a trade-off of write speed versus file size. Alignment
leaves holes between file objects. The default of no alignment
writes file objects contiguously, without holes. Alignment has
no impact on file readability.
Alignment settings apply only indirectly, through the file open
functions. Call nc_set_alignment first, then nc_create or
nc_open for one or more files. Current alignment settings are
locked in when each file is opened, then forgotten when the same
file is closed. For illustration, it is possible to write
different files at the same time with different alignments, by
interleaving nc_set_alignment and nc_open calls.
Alignment applies to all newly written low-level file objects at
or above the threshold size, including chunks of variables,
attributes, and internal infrastructure. Alignment is not locked
in to a data variable. It can change between data chunks of the
same variable, based on a file's history.
Refer to H5Pset_alignment in HDF5 documentation for more
specific details, interactions, and additional rules.
@param threshold The minimum size to which alignment is applied.
@param alignment The alignment value.
@return ::NC_NOERR No error.
@return ::NC_EINVAL Invalid input.
@author Dennis Heimbigner
@ingroup datasets
*/
int
nc_set_alignment(int threshold, int alignment)
{
NCglobalstate* gs = NC_getglobalstate();
gs->alignment.threshold = threshold;
gs->alignment.alignment = alignment;
gs->alignment.defined = 1;
return NC_NOERR;
}
/**
Provide get function to retrieve global data alignment
information.
The nc_get_alignment function return the last values set by
nc_set_alignment. If nc_set_alignment has not been called, then
it returns the value 0 for both threshold and alignment.
@param thresholdp Return the current minimum size to which alignment is applied or zero.
@param alignmentp Return the current alignment value or zero.
@return ::NC_NOERR No error.
@return ::NC_EINVAL Invalid input.
@author Dennis Heimbigner
@ingroup datasets
*/
int
nc_get_alignment(int* thresholdp, int* alignmentp)
{
NCglobalstate* gs = NC_getglobalstate();
if(thresholdp) *thresholdp = gs->alignment.threshold;
if(alignmentp) *alignmentp = gs->alignment.alignment;
return NC_NOERR;
}
/** \} */

View File

@ -218,98 +218,6 @@ done:
return stat;
}
/**************************************************/
/* Support direct user defined filters */
#ifdef ENABLE_CLIENTSIDE_FILTERS
/** Register filer client.
* @note Use void* to avoid having to include hdf.h
*
* @param id Filter ID
* @param info Pointer that gets info.
*
* @return NC_NOERR if the filter is available
* @return NC_EBADID if ncid is invalid
* @return NC_ENOFILTER if filter is not available.
* @author Dennis Heimbigner
*/
EXTERNL int
nc_filter_client_register(unsigned int id, void* info)
{
int stat = NC_NOERR;
#ifdef USE_HDF5
NC_FILTER_OBJ_HDF5 client;
if(id == 0 ||info == NULL)
return NC_EINVAL;
memset(&client,0,sizeof(client));
client.hdr.format = NC_FILTER_FORMAT_HDF5;
client.sort = NC_FILTER_SORT_CLIENT;
client.u.client.id = id;
client.u.client.info = info;
/* Note use of a global function, not part of the dispatch table */
stat = nc4_global_filter_action(NCFILTER_CLIENT_REG, id, &client);
#else
stat = NC_ENOTBUILT;
#endif
return stat;
}
/** Unregister filer client.
* @note Use void* to avoid having to include hdf.h
*
* @param id Filter ID
*
* @return NC_NOERR if the filter is available
* @author Dennis Heimbigner
*/
EXTERNL int
nc_filter_client_unregister(unsigned int id)
{
int stat = NC_NOERR;
#ifdef USE_HDF5
stat = nc4_global_filter_action(NCFILTER_CLIENT_UNREG, id, NULL);
#else
stat = NC_ENOTBUILT;
#endif
return stat;
}
/** Inquire about filer client.
* @note Use void* to avoid having to include hdf.h
*
* @param id Filter ID
* @param infop Pointer that gets info.
*
* @return NC_NOERR if the filter is available
* @author Dennis Heimbigner
*/
EXTERNL int
nc_filter_client_inq(unsigned int id, void* infop)
{
int stat = NC_NOERR;
#ifdef USE_HDF5
H5Z_class2_t* hct = (H5Z_class2_t*)infop;
NC_FILTER_OBJ_HDF5 client;
if(id == 0 ||infop == NULL)
return NC_EINVAL;
memset(&client,0,sizeof(client));
client.hdr.format = NC_FILTER_FORMAT_HDF5;
client.sort = NC_FILTER_SORT_CLIENT;
client.u.client.id = id;
client.u.client.info = hct;
/* Note use of a global function, not part of the dispatch table */
stat = nc4_global_filter_action(NCFILTER_CLIENT_INQ, id, &client);
if(stat == NC_NOERR) {
*hct = *(H5Z_class2_t*)client.u.client.info;
}
#else
stat = NC_ENOTBUILT;
#endif
return stat;
}
#endif /*ENABLE_CLIENTSIDE_FILTERS*/
/**************************************************/
/* Functions for accessing standardized filters */

View File

@ -1,243 +0,0 @@
/*
* Copyright 2018, University Corporation for Atmospheric Research
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
*/
#include "config.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef _MSC_VER
#include <io.h>
#endif
#include "netcdf.h"
#include "netcdf_filter.h"
#include "ncdispatch.h"
#include "nc4internal.h"
#include "ncjson.h"
/*
NCZarr filter API
*/
/**************************************************/
/* Per-variable filters */
/**
Find the set of filters (if any) associated with a variable.
Assumes NCZarr format using json
\param ncid NetCDF or group ID, from a previous call to nc_open(),
\param varid Variable ID
\param jsonp a JSON formatted string is returned in this argument
\returns ::NC_NOERR No error.
\returns ::NC_ENOTNC4 Not a netCDF-4 file.
\returns ::NC_EBADID Bad ncid
\returns ::NC_ENOTVAR Invalid variable ID.
\returns ::NC_EINVAL Invalid arguments
\ingroup variables
\author Dennis Heimbigner
*/
EXTERNL int
nc_inq_var_filterx_ids(int ncid, int varid, char** textp)
{
NC* ncp;
int stat = NC_NOERR;
TRACE(nc_inq_var_filterx_ids);
if((stat = NC_check_id(ncid,&ncp))) return stat;
if((stat = ncp->dispatch->inq_var_filterx_ids(ncid,varid,textp))) goto done;
done:
return stat;
}
/**
Find the the param info about filter (if any)
associated with a variable and with specified id.
Assumes HDF5 format using unsigned ints.
\param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
\param varid Variable ID
\param id The filter id of interest
\param nparamsp (Out) Storage which will get the number of parameters to the filter
\param params (Out) Storage which will get associated parameters.
Note: the caller must allocate and free.
\returns ::NC_NOERR No error.
\returns ::NC_ENOTNC4 Not a netCDF-4 file.
\returns ::NC_EBADID Bad ncid.
\returns ::NC_ENOTVAR Invalid variable ID.
\returns ::NC_ENOFILTER Specified filter not defined for this variable.
\ingroup variables
\author Dennis Heimbigner
*/
EXTERNL int
nc_inq_var_filterx_info(int ncid, int varid, const char* id, char** textp)
{
NC* ncp;
int stat = NC_check_id(ncid,&ncp);
TRACE(nc_inq_var_filterx_info);
if(stat != NC_NOERR) return stat;
if((stat = ncp->dispatch->inq_var_filterx_info(ncid,varid,id,textp))) goto done;
done:
return stat;
}
/**
Define a new variable filter
Assumes HDF5 format using unsigned ints.
Only variables with chunked storage can use filters.
@param ncid File and group ID.
@param varid Variable ID.
@param id Filter ID.
@param nparams Number of filter parameters.
@param parms Filter parameters.
@return ::NC_NOERR No error.
@return ::NC_EINVAL Variable must be chunked.
@return ::NC_EBADID Bad ID.
@author Dennis Heimbigner
*/
EXTERNL int
nc_def_var_filterx(int ncid, int varid, const char* json)
{
NC* ncp;
int stat = NC_check_id(ncid,&ncp);
TRACE(nc_def_var_filterx);
if(stat != NC_NOERR) return stat;
if((stat = ncp->dispatch->def_var_filterx(ncid,varid,json))) goto done;
done:
return stat;
}
/**
Find the first filter (if any) associated with a variable.
\param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
\param varid Variable ID
\param textp Storage which will get the filter info (id + parameters) in json format
This is redundant over the multi-filter API, so
it can be implemented in terms of those functions.
\returns ::NC_NOERR No error.
\returns ::NC_ENOTNC4 Not a netCDF-4 file.
\returns ::NC_EBADID Bad ncid.
\returns ::NC_ENOTVAR Invalid variable ID.
\ingroup variables
\author Dennis Heimbigner
*/
EXTERNL int
nc_inq_var_filterx(int ncid, int varid, char** textp)
{
NC* ncp;
int stat = NC_NOERR;
char* text = NULL;
NCjson* json = NULL;
NCjson* jid = NULL;
TRACE(nc_inq_var_filterx);
if((stat = NC_check_id(ncid,&ncp))) goto done;
/* Get the filters on this variable */
if((stat = nc_inq_var_filterx_ids(ncid,varid,&text))) goto done;
/* Parse it */
if((stat = NCJparse(text,0,&json))) goto done;
if(json->sort != NCJ_ARRAY)
{stat = NC_EFILTER; goto done;}
if(NCJlength(json) == 0 || NCJcontents(json) == NULL)
{stat = NC_ENOFILTER; goto done;}
jid = NCJith(json,0);
if(jid->sort == NCJ_DICT || jid->sort == NCJ_ARRAY)
{stat = NC_EFILTER; goto done;}
/* Get info about the first filter */
if((stat = nc_inq_var_filterx_info(ncid,varid,NCJstring(jid),textp)))
{stat = NC_ENOFILTER; goto done;}
done:
NCJreclaim(json);
return stat;
}
/**************************************************/
/* Support direct user defined filters */
#ifdef ENABLE_CLIENTSIDE_FILTERS
/* Use void* to avoid having to include hdf.h*/
EXTERNL int
nc_filterx_client_register(unsigned int id, void* info)
{
int stat = NC_NOERR;
#ifdef USE_HDF5
NC_FILTERX_OBJ_HDF5 client;
if(id == 0 ||info == NULL)
return NC_EINVAL;
memset(&client,0,sizeof(client));
client.hdr.format = NC_FILTERX_FORMAT_HDF5;
client.sort = NC_FILTERX_SORT_CLIENT;
client.u.client.id = id;
client.u.client.info = info;
/* Note use of a global function, not part of the dispatch table */
stat = nc4_global_filterx_action(NCFILTER_CLIENT_REG, id, &client);
#else
stat = NC_ENOTBUILT;
#endif
return stat;
}
EXTERNL int
nc_filterx_client_unregister(unsigned int id)
{
int stat = NC_NOERR;
#ifdef USE_HDF5
stat = nc4_global_filterx_action(NCFILTER_CLIENT_UNREG, id, NULL);
#else
stat = NC_ENOTBUILT;
#endif
return stat;
}
/* Use void* to avoid having to include hdf.h*/
EXTERNL int
nc_filterx_client_inq(unsigned int id, void* infop)
{
int stat = NC_NOERR;
#ifdef USE_HDF5
H5Z_class2_t* hct = (H5Z_class2_t*)infop;
NC_FILTERX_OBJ_HDF5 client;
if(id == 0 ||infop == NULL)
return NC_EINVAL;
memset(&client,0,sizeof(client));
client.hdr.format = NC_FILTERX_FORMAT_HDF5;
client.sort = NC_FILTERX_SORT_CLIENT;
client.u.client.id = id;
client.u.client.info = hct;
/* Note use of a global function, not part of the dispatch table */
stat = nc4_global_filterx_action(NCFILTER_CLIENT_INQ, id, &client);
if(stat == NC_NOERR) {
*hct = *(H5Z_class2_t*)client.u.client.info;
}
#else
stat = NC_ENOTBUILT;
#endif
return stat;
}
#endif /*ENABLE_CLIENTSIDE_FILTERS*/

View File

@ -52,8 +52,8 @@ DAPSUBSTRATE(NC* nc)
typedef struct Position{char* memory; ptrdiff_t offset;} Position;
/* Forward */
#ifdef USE_NETCDF4
static int dump_datar(int ncid, nc_type xtype, Position*, NCbytes* buf);
#ifdef USE_NETCDF4
static int dump_compound(int ncid, nc_type xtype, size_t size, size_t nfields, Position* offset, NCbytes* buf);
static int dump_vlen(int ncid, nc_type xtype, nc_type basetype, Position* offset, NCbytes* buf);
static int dump_enum(int ncid, nc_type xtype, nc_type basetype, Position* offset, NCbytes* buf);

View File

@ -43,9 +43,6 @@ enum URLFORMAT {UF_NONE=0, UF_VIRTUAL=1, UF_PATH=2, UF_S3=3, UF_OTHER=4};
static const char* awsconfigfiles[] = {".aws/config",".aws/credentials",NULL};
#define NCONFIGFILES (sizeof(awsconfigfiles)/sizeof(char*))
static int ncs3_initialized = 0;
static int ncs3_finalized = 0;
/**************************************************/
/* Forward */
@ -56,38 +53,21 @@ static int awsparse(const char* text, NClist* profiles);
/**************************************************/
/* Capture environmental Info */
EXTERNL int
NC_s3sdkinitialize(void)
EXTERNL void
NC_s3sdkenvironment(void)
{
if(!ncs3_initialized) {
ncs3_initialized = 1;
ncs3_finalized = 0;
}
{
/* Get various environment variables as defined by the AWS sdk */
NCglobalstate* gs = NC_getglobalstate();
if(getenv("AWS_REGION")!=NULL)
gs->aws.default_region = nulldup(getenv("AWS_REGION"));
else if(getenv("AWS_DEFAULT_REGION")!=NULL)
gs->aws.default_region = nulldup(getenv("AWS_DEFAULT_REGION"));
else if(gs->aws.default_region == NULL)
gs->aws.default_region = nulldup(AWS_GLOBAL_DEFAULT_REGION);
gs->aws.access_key_id = nulldup(getenv("AWS_ACCESS_KEY_ID"));
gs->aws.config_file = nulldup(getenv("AWS_CONFIG_FILE"));
gs->aws.profile = nulldup(getenv("AWS_PROFILE"));
gs->aws.secret_access_key = nulldup(getenv("AWS_SECRET_ACCESS_KEY"));
}
return NC_NOERR;
}
EXTERNL int
NC_s3sdkfinalize(void)
{
if(!ncs3_finalized) {
ncs3_initialized = 0;
ncs3_finalized = 1;
}
return NC_NOERR;
/* Get various environment variables as defined by the AWS sdk */
NCglobalstate* gs = NC_getglobalstate();
if(getenv("AWS_REGION")!=NULL)
gs->aws.default_region = nulldup(getenv("AWS_REGION"));
else if(getenv("AWS_DEFAULT_REGION")!=NULL)
gs->aws.default_region = nulldup(getenv("AWS_DEFAULT_REGION"));
else if(gs->aws.default_region == NULL)
gs->aws.default_region = nulldup(AWS_GLOBAL_DEFAULT_REGION);
gs->aws.access_key_id = nulldup(getenv("AWS_ACCESS_KEY_ID"));
gs->aws.config_file = nulldup(getenv("AWS_CONFIG_FILE"));
gs->aws.profile = nulldup(getenv("AWS_PROFILE"));
gs->aws.secret_access_key = nulldup(getenv("AWS_SECRET_ACCESS_KEY"));
}
/**************************************************/
@ -130,17 +110,24 @@ NC_s3urlrebuild(NCURI* url, NCS3INFO* s3, NCURI** newurlp)
/* split the path by "/" */
if((stat = NC_split_delim(url->path,'/',pathsegments))) goto done;
/* Distinguish path-style from virtual-host style from s3: and from other.
Virtual: https://<bucket-name>.s3.<region>.amazonaws.com/<path> (1)
or: https://<bucket-name>.s3.amazonaws.com/<path> -- region defaults (to us-east-1) (2)
Path: https://s3.<region>.amazonaws.com/<bucket-name>/<path> (3)
or: https://s3.amazonaws.com/<bucket-name>/<path> -- region defaults to us-east-1 (4)
S3: s3://<bucket-name>/<path> (5)
Google: https://storage.googleapis.com/<bucket-name>/<path> (6)
or: gs3://<bucket-name>/<path> (7)
Other: https://<host>/<bucket-name>/<path> (8)
*/
if(url->host == NULL || strlen(url->host) == 0)
/* Distinguish path-style from virtual-host style from s3: and from other.
Virtual:
(1) https://<bucket-name>.s3.<region>.amazonaws.com/<path>
(2) https://<bucket-name>.s3.amazonaws.com/<path> -- region defaults (to us-east-1)
Path:
(3) https://s3.<region>.amazonaws.com/<bucket-name>/<path>
(4) https://s3.amazonaws.com/<bucket-name>/<path> -- region defaults to us-east-1
S3:
(5) s3://<bucket-name>/<path>
Google:
(6) https://storage.googleapis.com/<bucket-name>/<path>
(7) gs3://<bucket-name>/<path>
Other:
(8) https://<host>/<bucket-name>/<path>
(9) https://<bucket-name>.s3.<region>.domain.example.com/<path>
(10)https://s3.<region>.example.com/<bucket>/<path>
*/
if(url->host == NULL || strlen(url->host) == 0)
{stat = NC_EURL; goto done;}
/* Reduce the host to standard form such as s3.amazonaws.com by pulling out the
@ -188,12 +175,21 @@ NC_s3urlrebuild(NCURI* url, NCS3INFO* s3, NCURI** newurlp)
/* region is unknown */
/* bucket is unknown at this point */
svc = NCS3GS;
} else { /* Presume Format (8) */
if((host = strdup(url->host))==NULL)
{stat = NC_ENOMEM; goto done;}
/* region is unknown */
/* bucket is unknown */
}
} else { /* Presume Formats (8),(9),(10) */
if (nclistlength(hostsegments) > 3 && strcasecmp(nclistget(hostsegments, 1), "s3") == 0){
bucket = nclistremove(hostsegments, 0);
region = nclistremove(hostsegments, 2);
host = strdup(url->host + sizeof(bucket) + 1);
}else{
if (nclistlength(hostsegments) > 2 && strcasecmp(nclistget(hostsegments, 0), "s3") == 0){
region = nclistremove(hostsegments, 1);
}
if ((host = strdup(url->host)) == NULL){
stat = NC_ENOMEM;
goto done;
}
}
}
/* region = (1) from url, (2) s3->region, (3) default */
if(region == NULL && s3 != NULL)

View File

@ -868,7 +868,7 @@ nc_def_var_szip(int ncid, int varid, int options_mask, int pixels_per_block)
/* This will cause H5Pset_szip to be called when the var is
* created. */
unsigned int params[2] = {(unsigned int)options_mask, (unsigned int)pixels_per_block};
if ((ret = nc_def_var_filter(ncid, varid, HDF5_FILTER_SZIP, 2, params)))
if ((ret = nc_def_var_filter(ncid, varid, H5Z_FILTER_SZIP, 2, params)))
return ret;
return NC_NOERR;
@ -1312,7 +1312,7 @@ NC_check_nulls(int ncid, int varid, const size_t *start, size_t **count,
int
nc_free_string(size_t len, char **data)
{
int i;
size_t i;
for (i = 0; i < len; i++)
free(data[i]);
return NC_NOERR;

View File

@ -128,7 +128,9 @@ static int NCJnewstring(int sort, const char* value, NCjson** jsonp);
static int NCJnewstringn(int sort, size_t len, const char* value, NCjson** jsonp);
static int NCJclone(const NCjson* json, NCjson** clonep);
static int NCJaddstring(NCjson* json, int sort, const char* s);
static int NCJinsert(NCjson* object, char* key, NCjson* jvalue);
static int NCJinsert(NCjson* object, const char* key, NCjson* jvalue);
static int NCJinsertstring(NCjson* object, const char* key, const char* value);
static int NCJinsertint(NCjson* object, const char* key, long long ivalue);
static int NCJappend(NCjson* object, NCjson* value);
static int NCJunparse(const NCjson* json, unsigned flags, char** textp);
#else /*!NETCDF_JSON_H*/
@ -614,7 +616,7 @@ done:
}
OPTSTATIC int
NCJdictget(const NCjson* dict, const char* key, NCjson** valuep)
NCJdictget(const NCjson* dict, const char* key, const NCjson** valuep)
{
int i,stat = NCJ_OK;
@ -900,7 +902,7 @@ done:
/* Insert key-value pair into a dict object. key will be strdup'd */
OPTSTATIC int
NCJinsert(NCjson* object, char* key, NCjson* jvalue)
NCJinsert(NCjson* object, const char* key, NCjson* jvalue)
{
int stat = NCJ_OK;
NCjson* jkey = NULL;
@ -913,6 +915,36 @@ done:
return NCJTHROW(stat);
}
/* Insert key-value pair as strings into a dict object.
key and value will be strdup'd */
OPTSTATIC int
NCJinsertstring(NCjson* object, const char* key, const char* value)
{
int stat = NCJ_OK;
NCjson* jvalue = NULL;
if(value == NULL)
NCJnew(NCJ_NULL,&jvalue);
else
NCJnewstring(NCJ_STRING,value,&jvalue);
NCJinsert(object,key,jvalue);
done:
return NCJTHROW(stat);
}
/* Insert key-value pair with value being an integer */
OPTSTATIC int
NCJinsertint(NCjson* object, const char* key, long long ivalue)
{
int stat = NCJ_OK;
NCjson* jvalue = NULL;
char digits[128];
snprintf(digits,sizeof(digits),"%lld",ivalue);
NCJnewstring(NCJ_STRING,digits,&jvalue);
NCJinsert(object,key,jvalue);
done:
return NCJTHROW(stat);
}
/* Append value to an array or dict object. */
OPTSTATIC int
NCJappend(NCjson* object, NCjson* value)

View File

@ -133,10 +133,9 @@ NC_s3sdkinitialize(void)
if(!ncs3_initialized) {
ncs3_initialized = 1;
ncs3_finalized = 0;
#ifdef DEBUG
//ncs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Debug;
//ncs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Debug;
ncs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Trace;
ncs3options.httpOptions.installSigPipeHandler = true;
ncs3options.loggingOptions.logger_create_fn = [] { return std::make_shared<Aws::Utils::Logging::ConsoleLogSystem>(Aws::Utils::Logging::LogLevel::Trace); };
@ -144,6 +143,9 @@ NC_s3sdkinitialize(void)
#endif
Aws::InitAPI(ncs3options);
/* Get environment information */
NC_s3sdkenvironment();
}
return NCUNTRACE(NC_NOERR);
}
@ -500,7 +502,6 @@ NC_s3sdkwriteobject(void* s3client0, const char* bucket, const char* pathkey, s
int stat = NC_NOERR;
const char* key = NULL;
const char* mcontent = (char*)content;
NCTRACE(11,"bucket=%s pathkey=%s count=%lld content=%p",bucket,pathkey,count,content);
AWSS3CLIENT s3client = (AWSS3CLIENT)s3client0;
@ -535,7 +536,7 @@ NC_s3sdkwriteobject(void* s3client0, const char* bucket, const char* pathkey, s
put_request.SetContentLength((long long)count);
std::shared_ptr<Aws::IOStream> data = std::shared_ptr<Aws::IOStream>(new Aws::StringStream());
data->rdbuf()->pubsetbuf((char*)content,count);
data->rdbuf()->pubsetbuf((char*)content,(std::streamsize)count);
put_request.SetBody(data);
auto put_result = AWSS3GET(s3client)->PutObject(put_request);
if(!put_result.IsSuccess()) {

View File

@ -108,6 +108,37 @@ static int queryinsert(NClist* list, char* ekey, char* evalue);
#define NT(x) ((x)==NULL?"null":x)
/**************************************************/
static int ncs3_initialized = 0;
static int ncs3_finalized = 0;
EXTERNL int
NC_s3sdkinitialize(void)
{
if(!ncs3_initialized) {
ncs3_initialized = 1;
ncs3_finalized = 0;
}
/* Get environment information */
NC_s3sdkenvironment();
return NC_NOERR;
}
EXTERNL int
NC_s3sdkfinalize(void)
{
if(!ncs3_finalized) {
ncs3_initialized = 0;
ncs3_finalized = 1;
}
return NC_NOERR;
}
/**************************************************/
#if 0
static void
dumps3info(NCS3INFO* s3info, const char* tag)

View File

@ -22,5 +22,7 @@ if (NETCDF_ENABLE_DLL)
target_compile_definitions(netcdfhdf4 PRIVATE DLL_NETCDF DLL_EXPORT)
endif()
target_link_libraries(netcdfhdf4 PUBLIC ${HDF4_LIBRARIES})
# Remember to package this file for CMake builds.
ADD_EXTRA_DIST(${libhdf4_SOURCES} CMakeLists.txt)

View File

@ -11,7 +11,6 @@
#include "config.h"
#include "netcdf.h"
#include "ncpathmgr.h"
#include "ncpathmgr.h"
#include "hdf5internal.h"
/** @internal These flags may not be set for create. */

View File

@ -29,135 +29,6 @@
/* Forward */
static int NC4_hdf5_filter_free(struct NC_HDF5_Filter* spec);
/**************************************************/
/* Filter registration support */
#ifdef ENABLE_CLIENTSIDE_FILTERS
/* Mnemonic */
#define FILTERACTIVE 1
/* WARNING: GLOBAL VARIABLE */
/* Define list of registered filters */
static NClist* NC4_registeredfilters = NULL; /** List<NC_FILTER_CLIENT_HDF5*> */
/**************************************************/
/* Filter registration support */
static int
clientfilterlookup(unsigned int id)
{
int i;
if(NC4_registeredfilters == NULL)
NC4_registeredfilters = nclistnew();
for(i=0;i<nclistlength(NC4_registeredfilters);i++) {
NC_FILTER_CLIENT_HDF5* x = nclistget(NC4_registeredfilters,i);
if(x != NULL && x->id == id) {
return i; /* return position */
}
}
return -1;
}
static void
reclaiminfo(NC_FILTER_CLIENT_HDF5* info)
{
nullfree(info);
}
static int
filterremove(int pos)
{
NC_FILTER_CLIENT_HDF5* info = NULL;
if(NC4_registeredfilters == NULL)
return THROW(NC_EINVAL);
if(pos < 0 || pos >= nclistlength(NC4_registeredfilters))
return THROW(NC_EINVAL);
info = nclistget(NC4_registeredfilters,pos);
reclaiminfo(info);
nclistremove(NC4_registeredfilters,pos);
return NC_NOERR;
}
static NC_FILTER_CLIENT_HDF5*
dupfilterinfo(NC_FILTER_CLIENT_HDF5* info)
{
NC_FILTER_CLIENT_HDF5* dup = NULL;
if(info == NULL) goto fail;
if((dup = calloc(1,sizeof(NC_FILTER_CLIENT_HDF5))) == NULL) goto fail;
*dup = *info;
return dup;
fail:
reclaiminfo(dup);
return NULL;
}
int
nc4_global_filter_action(int op, unsigned int id, NC_FILTER_OBJ_HDF5* infop)
{
int stat = NC_NOERR;
H5Z_class2_t* h5filterinfo = NULL;
herr_t herr;
int pos = -1;
NC_FILTER_CLIENT_HDF5* dup = NULL;
NC_FILTER_CLIENT_HDF5* elem = NULL;
NC_FILTER_CLIENT_HDF5 ncf;
NC_UNUSED(format);
switch (op) {
case NCFILTER_CLIENT_REG: /* Ignore id argument */
if(infop == NULL) {stat = NC_EINVAL; goto done;}
assert(NC_FILTER_FORMAT_HDF5 == infop->hdr.format);
assert(NC_FILTER_SORT_CLIENT == infop->sort);
elem = (NC_FILTER_CLIENT_HDF5*)&infop->u.client;
h5filterinfo = elem->info;
/* Another sanity check */
if(id != h5filterinfo->id)
{stat = NC_EINVAL; goto done;}
/* See if this filter is already defined */
if((pos = clientfilterlookup(id)) >= 0)
{stat = NC_ENAMEINUSE; goto done;} /* Already defined */
if((herr = H5Zregister(h5filterinfo)) < 0)
{stat = NC_EFILTER; goto done;}
/* Save a copy of the passed in info */
ncf.id = id;
ncf.info = elem->info;
if((dup=dupfilterinfo(&ncf)) == NULL)
{stat = NC_ENOMEM; goto done;}
nclistpush(NC4_registeredfilters,dup);
break;
case NCFILTER_CLIENT_UNREG:
if(id <= 0)
{stat = NC_ENOTNC4; goto done;}
/* See if this filter is already defined */
if((pos = clientfilterlookup(id)) < 0)
{stat = NC_ENOFILTER; goto done;} /* Not defined */
if((herr = H5Zunregister(id)) < 0)
{stat = NC_EFILTER; goto done;}
if((stat=filterremove(pos))) goto done;
break;
case NCFILTER_CLIENT_INQ:
if(infop == NULL) goto done;
/* Look up the id in our local table */
if((pos = clientfilterlookup(id)) < 0)
{stat = NC_ENOFILTER; goto done;} /* Not defined */
elem = (NC_FILTER_CLIENT_HDF5*)nclistget(NC4_registeredfilters,pos);
if(elem == NULL) {stat = NC_EINTERNAL; goto done;}
if(infop != NULL) {
infop->u.client = *elem;
}
break;
default:
{stat = NC_EINTERNAL; goto done;}
}
done:
return THROW(stat);
}
#endif /*ENABLE_CLIENTSIDE_FILTERS*/
/**************************************************/
/**************************************************/
/**
* @file

View File

@ -1965,11 +1965,9 @@ exit:
static void
hdf5free(void* memory)
{
#ifndef JNA
/* On Windows using the microsoft runtime, it is an error
for one library to free memory allocated by a different library.*/
if(memory != NULL) H5free_memory(memory);
#endif
}
/**
@ -2073,9 +2071,6 @@ read_type(NC_GRP_INFO_T *grp, hid_t hdf_typeid, char *type_name)
int nmembers;
unsigned int m;
char* member_name = NULL;
#ifdef JNA
char jna[1001];
#endif
type->nc_type_class = NC_COMPOUND;
if((retval = NC4_set_varsize(type))) return retval;
@ -2108,12 +2103,6 @@ read_type(NC_GRP_INFO_T *grp, hid_t hdf_typeid, char *type_name)
retval = NC_EBADNAME;
break;
}
#ifdef JNA
else {
strncpy(jna,member_name,1000);
member_name = jna;
}
#endif
/* Offset in bytes on *this* platform. */
member_offset = H5Tget_member_offset(native_typeid, m);
@ -2220,9 +2209,6 @@ read_type(NC_GRP_INFO_T *grp, hid_t hdf_typeid, char *type_name)
nc_type base_nc_type = NC_NAT;
void *value;
char *member_name = NULL;
#ifdef JNA
char jna[1001];
#endif
type->nc_type_class = NC_ENUM;
if((retval = NC4_set_varsize(type))) return retval;
@ -2261,11 +2247,6 @@ read_type(NC_GRP_INFO_T *grp, hid_t hdf_typeid, char *type_name)
if (!(member_name = H5Tget_member_name(hdf_typeid, i)))
return NC_EHDFERR;
#ifdef JNA
strncpy(jna,member_name,1000);
member_name = jna;
#endif
if (strlen(member_name) > NC_MAX_NAME)
return NC_EBADNAME;

View File

@ -239,61 +239,6 @@ NCZ_get_superblock(NC_FILE_INFO_T* file, int* superblockp)
/**************************************************/
/* Utilities */
#if 0
/**
@internal Open the root group object
@param dataset - [in] the root dataset object
@param rootp - [out] created root group
@return NC_NOERR
@author Dennis Heimbigner
*/
static int
ncz_open_rootgroup(NC_FILE_INFO_T* dataset)
{
int stat = NC_NOERR;
int i;
NCZ_FILE_INFO_T* zfile = NULL;
NC_GRP_INFO_T* root = NULL;
void* content = NULL;
char* rootpath = NULL;
NCjson* json = NULL;
ZTRACE(3,"dataset=",dataset->hdr.name);
zfile = dataset->format_file_info;
/* Root should already be defined */
root = dataset->root_grp;
assert(root != NULL);
if((stat=nczm_concat(NULL,ZGROUP,&rootpath)))
goto done;
if((stat = NCZ_downloadjson(zfile->map, rootpath, &json)))
goto done;
/* Process the json */
for(i=0;i<nclistlength(json->contents);i+=2) {
const NCjson* key = nclistget(json->contents,i);
const NCjson* value = nclistget(json->contents,i+1);
if(strcmp(NCJstring(key),"zarr_format")==0) {
int zversion;
if(sscanf(NCJstring(value),"%d",&zversion)!=1)
{stat = NC_ENOTNC; goto done;}
/* Verify against the dataset */
if(zversion != zfile->zarr.zarr_version)
{stat = NC_ENOTNC; goto done;}
}
}
done:
if(json) NCJreclaim(json);
nullfree(rootpath);
nullfree(content);
return ZUNTRACE(stat);
}
#endif
static const char*
controllookup(NClist* controls, const char* key)
{
@ -315,7 +260,7 @@ applycontrols(NCZ_FILE_INFO_T* zinfo)
int stat = NC_NOERR;
const char* value = NULL;
NClist* modelist = nclistnew();
int noflags = 0; /* track non-default negative flags */
size64_t noflags = 0; /* track non-default negative flags */
if((value = controllookup(zinfo->controllist,"mode")) != NULL) {
if((stat = NCZ_comma_parse(value,modelist))) goto done;
@ -352,76 +297,3 @@ done:
nclistfreeall(modelist);
return stat;
}
#if 0
/**
@internal Rewrite attributes into a group or var
@param map - [in] the map object for storage
@param container - [in] the containing object
@param jattrs - [in] the json for .zattrs
@param jtypes - [in] the json for .ztypes
@return NC_NOERR
@author Dennis Heimbigner
*/
int
ncz_unload_jatts(NCZ_FILE_INFO_T* zinfo, NC_OBJ* container, NCjson* jattrs, NCjson* jtypes)
{
int stat = NC_NOERR;
char* fullpath = NULL;
char* akey = NULL;
char* tkey = NULL;
NCZMAP* map = zinfo->map;
assert((NCJsort(jattrs) == NCJ_DICT));
assert((NCJsort(jtypes) == NCJ_DICT));
if(container->sort == NCGRP) {
NC_GRP_INFO_T* grp = (NC_GRP_INFO_T*)container;
/* Get grp's fullpath name */
if((stat = NCZ_grpkey(grp,&fullpath)))
goto done;
} else {
NC_VAR_INFO_T* var = (NC_VAR_INFO_T*)container;
/* Get var's fullpath name */
if((stat = NCZ_varkey(var,&fullpath)))
goto done;
}
/* Construct the path to the .zattrs object */
if((stat = nczm_concat(fullpath,ZATTRS,&akey)))
goto done;
/* Always write as V2 */
{
NCjson* k = NULL;
NCjson* v = NULL;
/* remove any previous version */
if(!NCJremove(jattrs,NCZ_V2_ATTRS,1,&k,&v)) {
NCJreclaim(k); NCJreclaim(v);
}
}
if(!(zinfo->controls.flags & FLAG_PUREZARR)) {
/* Insert the jtypes into the set of attributes */
if((stat = NCJinsert(jattrs,NCZ_V2_ATTRS,jtypes))) goto done;
}
/* Upload the .zattrs object */
if((stat=NCZ_uploadjson(map,tkey,jattrs)))
goto done;
done:
if(stat) {
NCJreclaim(jattrs);
NCJreclaim(jtypes);
}
nullfree(fullpath);
nullfree(akey);
nullfree(tkey);
return stat;
}
#endif

View File

@ -41,15 +41,15 @@ EXTERNL int ncz_unload_jatts(NCZ_FILE_INFO_T*, NC_OBJ* container, NCjson* jattrs
EXTERNL int ncz_close_file(NC_FILE_INFO_T* file, int abort);
/* zcvt.c */
EXTERNL int NCZ_json2cvt(NCjson* jsrc, struct ZCVT* zcvt, nc_type* typeidp);
EXTERNL int NCZ_convert1(NCjson* jsrc, nc_type, NCbytes*);
EXTERNL int NCZ_json2cvt(const NCjson* jsrc, struct ZCVT* zcvt, nc_type* typeidp);
EXTERNL int NCZ_convert1(const NCjson* jsrc, nc_type, NCbytes*);
EXTERNL int NCZ_stringconvert1(nc_type typid, char* src, NCjson* jvalue);
EXTERNL int NCZ_stringconvert(nc_type typid, size_t len, void* data0, NCjson** jdatap);
/* zsync.c */
EXTERNL int ncz_sync_file(NC_FILE_INFO_T* file, int isclose);
EXTERNL int ncz_sync_grp(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, int isclose);
EXTERNL int ncz_sync_atts(NC_FILE_INFO_T*, NC_OBJ* container, NCindex* attlist, int isclose);
EXTERNL int ncz_sync_atts(NC_FILE_INFO_T*, NC_OBJ* container, NCindex* attlist, NCjson* jatts, NCjson* jtypes, int isclose);
EXTERNL int ncz_read_grp(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp);
EXTERNL int ncz_read_atts(NC_FILE_INFO_T* file, NC_OBJ* container);
EXTERNL int ncz_read_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp);
@ -62,12 +62,10 @@ EXTERNL int NCZ_grpkey(const NC_GRP_INFO_T* grp, char** pathp);
EXTERNL int NCZ_varkey(const NC_VAR_INFO_T* var, char** pathp);
EXTERNL int NCZ_dimkey(const NC_DIM_INFO_T* dim, char** pathp);
EXTERNL int ncz_splitkey(const char* path, NClist* segments);
EXTERNL int NCZ_readdict(NCZMAP* zmap, const char* key, NCjson** jsonp);
EXTERNL int NCZ_readarray(NCZMAP* zmap, const char* key, NCjson** jsonp);
EXTERNL int ncz_nctypedecode(const char* snctype, nc_type* nctypep);
EXTERNL int ncz_nctype2dtype(nc_type nctype, int endianness, int purezarr,int len, char** dnamep);
EXTERNL int ncz_dtype2nctype(const char* dtype, nc_type typehint, int purezarr, nc_type* nctypep, int* endianp, int* typelenp);
EXTERNL int NCZ_inferattrtype(NCjson* value, nc_type typehint, nc_type* typeidp);
EXTERNL int NCZ_inferattrtype(const NCjson* value, nc_type typehint, nc_type* typeidp);
EXTERNL int NCZ_inferinttype(unsigned long long u64, int negative);
EXTERNL int ncz_fill_value_sort(nc_type nctype, int*);
EXTERNL int NCZ_createobject(NCZMAP* zmap, const char* key, size64_t size);
@ -89,7 +87,7 @@ EXTERNL int NCZ_get_maxstrlen(NC_OBJ* obj);
EXTERNL int NCZ_fixed2char(const void* fixed, char** charp, size_t count, int maxstrlen);
EXTERNL int NCZ_char2fixed(const char** charp, void* fixed, size_t count, int maxstrlen);
EXTERNL int NCZ_copy_data(NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, const void* memory, size_t count, int reading, void* copy);
EXTERNL int NCZ_iscomplexjson(NCjson* value, nc_type typehint);
EXTERNL int NCZ_iscomplexjson(const NCjson* value, nc_type typehint);
/* zwalk.c */
EXTERNL int NCZ_read_chunk(int ncid, int varid, size64_t* zindices, void* chunkdata);

View File

@ -51,7 +51,7 @@ ncz_getattlist(NC_GRP_INFO_T *grp, int varid, NC_VAR_INFO_T **varp, NCindex **at
{
NC_VAR_INFO_T *var;
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, varid)))
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, (size_t)varid)))
return NC_ENOTVAR;
assert(var->hdr.id == varid);
@ -120,7 +120,7 @@ ncz_get_att_special(NC_FILE_INFO_T* h5, NC_VAR_INFO_T* var, const char* name,
/* The global reserved attributes */
if(strcmp(name,NCPROPS)==0) {
int len;
size_t len;
if(h5->provenance.ncproperties == NULL)
{stat = NC_ENOTATT; goto done;}
if(mem_type == NC_NAT) mem_type = NC_CHAR;
@ -138,7 +138,7 @@ ncz_get_att_special(NC_FILE_INFO_T* h5, NC_VAR_INFO_T* var, const char* name,
if(strcmp(name,SUPERBLOCKATT)==0)
iv = (unsigned long long)h5->provenance.superblockversion;
else /* strcmp(name,ISNETCDF4ATT)==0 */
iv = NCZ_isnetcdf4(h5);
iv = (unsigned long long)NCZ_isnetcdf4(h5);
if(mem_type == NC_NAT) mem_type = NC_INT;
if(data)
switch (mem_type) {
@ -279,8 +279,8 @@ NCZ_del_att(int ncid, int varid, const char *name)
NC_FILE_INFO_T *h5;
NC_ATT_INFO_T *att;
NCindex* attlist = NULL;
int i;
size_t deletedid;
size_t i;
int deletedid;
int retval;
/* Name must be provided. */
@ -516,7 +516,7 @@ ncz_put_att(NC_GRP_INFO_T* grp, int varid, const char *name, nc_type file_type,
/* For an existing att, if we're not in define mode, the len
must not be greater than the existing len for classic model. */
if (!(h5->flags & NC_INDEF) &&
len * nc4typelen(file_type) > (size_t)att->len * nc4typelen(att->nc_typeid))
len * (size_t)nc4typelen(file_type) > (size_t)att->len * (size_t)nc4typelen(att->nc_typeid))
{
if (h5->cmode & NC_CLASSIC_MODEL)
return NC_ENOTINDEFINE;
@ -980,7 +980,7 @@ int
ncz_create_fillvalue(NC_VAR_INFO_T* var)
{
int stat = NC_NOERR;
int i;
size_t i;
NC_ATT_INFO_T* fv = NULL;
/* Have the var's attributes been read? */

View File

@ -258,7 +258,7 @@ NCZ_compute_all_slice_projections(
NCZSliceProjections* results)
{
int stat = NC_NOERR;
size64_t r;
int r;
for(r=0;r<common->rank;r++) {
/* Compute each of the rank SliceProjections instances */

View File

@ -72,7 +72,7 @@ zclose_group(NC_GRP_INFO_T *grp)
{
int stat = NC_NOERR;
NCZ_GRP_INFO_T* zgrp;
int i;
size_t i;
assert(grp && grp->format_grp_info != NULL);
LOG((3, "%s: grp->name %s", __func__, grp->hdr.name));
@ -103,6 +103,9 @@ zclose_group(NC_GRP_INFO_T *grp)
/* Close the zgroup. */
zgrp = grp->format_grp_info;
LOG((4, "%s: closing group %s", __func__, grp->hdr.name));
nullfree(zgrp->zgroup.prefix);
NCJreclaim(zgrp->zgroup.obj);
NCJreclaim(zgrp->zgroup.atts);
nullfree(zgrp);
grp->format_grp_info = NULL; /* avoid memory errors */
@ -123,7 +126,7 @@ zclose_gatts(NC_GRP_INFO_T* grp)
{
int stat = NC_NOERR;
NC_ATT_INFO_T *att;
int a;
size_t a;
for(a = 0; a < ncindexsize(grp->att); a++) {
NCZ_ATT_INFO_T* zatt = NULL;
att = (NC_ATT_INFO_T* )ncindexith(grp->att, a);
@ -149,10 +152,9 @@ NCZ_zclose_var1(NC_VAR_INFO_T* var)
int stat = NC_NOERR;
NCZ_VAR_INFO_T* zvar;
NC_ATT_INFO_T* att;
int a;
size_t a;
assert(var && var->format_var_info);
zvar = var->format_var_info;;
for(a = 0; a < ncindexsize(var->att); a++) {
NCZ_ATT_INFO_T* zatt;
att = (NC_ATT_INFO_T*)ncindexith(var->att, a);
@ -170,9 +172,14 @@ NCZ_zclose_var1(NC_VAR_INFO_T* var)
#endif
/* Reclaim the type */
if(var->type_info) (void)zclose_type(var->type_info);
/* reclaim dispatch info */
zvar = var->format_var_info;;
if(zvar->cache) NCZ_free_chunk_cache(zvar->cache);
/* reclaim xarray */
if(zvar->xarray) nclistfreeall(zvar->xarray);
nullfree(zvar->zarray.prefix);
NCJreclaim(zvar->zarray.obj);
NCJreclaim(zvar->zarray.atts);
nullfree(zvar);
var->format_var_info = NULL; /* avoid memory errors */
return stat;
@ -191,7 +198,7 @@ zclose_vars(NC_GRP_INFO_T* grp)
{
int stat = NC_NOERR;
NC_VAR_INFO_T* var;
int i;
size_t i;
for(i = 0; i < ncindexsize(grp->vars); i++) {
var = (NC_VAR_INFO_T*)ncindexith(grp->vars, i);
@ -215,7 +222,7 @@ zclose_dims(NC_GRP_INFO_T* grp)
{
int stat = NC_NOERR;
NC_DIM_INFO_T* dim;
int i;
size_t i;
for(i = 0; i < ncindexsize(grp->dim); i++) {
NCZ_DIM_INFO_T* zdim;
@ -265,7 +272,7 @@ static int
zclose_types(NC_GRP_INFO_T* grp)
{
int stat = NC_NOERR;
int i;
size_t i;
NC_TYPE_INFO_T* type;
for(i = 0; i < ncindexsize(grp->type); i++)
@ -289,7 +296,7 @@ static int
zwrite_vars(NC_GRP_INFO_T *grp)
{
int stat = NC_NOERR;
int i;
size_t i;
assert(grp && grp->format_grp_info != NULL);
LOG((3, "%s: grp->name %s", __func__, grp->hdr.name));

View File

@ -15,7 +15,7 @@
Code taken directly from libdap4/d4cvt.c
*/
static const int ncz_type_size[NC_MAX_ATOMIC_TYPE+1] = {
static const size_t ncz_type_size[NC_MAX_ATOMIC_TYPE+1] = {
0, /*NC_NAT*/
sizeof(char), /*NC_BYTE*/
sizeof(char), /*NC_CHAR*/
@ -101,7 +101,7 @@ done:
/* Warning: not free returned zcvt.strv; it may point into a string in jsrc */
int
NCZ_json2cvt(NCjson* jsrc, struct ZCVT* zcvt, nc_type* typeidp)
NCZ_json2cvt(const NCjson* jsrc, struct ZCVT* zcvt, nc_type* typeidp)
{
int stat = NC_NOERR;
nc_type srctype = NC_NAT;
@ -154,7 +154,7 @@ done:
/* Convert a singleton NCjson value to a memory equivalent value of specified dsttype; */
int
NCZ_convert1(NCjson* jsrc, nc_type dsttype, NCbytes* buf)
NCZ_convert1(const NCjson* jsrc, nc_type dsttype, NCbytes* buf)
{
int stat = NC_NOERR;
nc_type srctype;
@ -536,7 +536,7 @@ int
NCZ_stringconvert(nc_type typeid, size_t len, void* data0, NCjson** jdatap)
{
int stat = NC_NOERR;
int i;
size_t i;
char* src = data0; /* so we can do arithmetic on it */
size_t typelen;
char* str = NULL;

View File

@ -979,7 +979,7 @@ NCZ_filter_build(const NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, const NCjson* j
{
int i,stat = NC_NOERR;
NCZ_Filter* filter = NULL;
NCjson* jvalue = NULL;
const NCjson* jvalue = NULL;
NCZ_Plugin* plugin = NULL;
NCZ_Codec codec = codec_empty;
NCZ_HDF5 hdf5 = hdf5_empty;

View File

@ -22,7 +22,6 @@
#define NCZ_CHUNKSIZE_FACTOR (10)
#define NCZ_MIN_CHUNK_SIZE (2)
/**************************************************/
/* Constants */
@ -39,56 +38,43 @@
# endif
#endif
/* V1 reserved objects */
#define NCZMETAROOT "/.nczarr"
#define NCZGROUP ".nczgroup"
#define NCZARRAY ".nczarray"
#define NCZATTRS ".nczattrs"
/* Deprecated */
#define NCZVARDEP ".nczvar"
#define NCZATTRDEP ".nczattr"
#define ZMETAROOT "/.zgroup"
#define ZMETAATTR "/.zattrs"
#define ZGROUP ".zgroup"
#define ZATTRS ".zattrs"
#define ZARRAY ".zarray"
/* Pure Zarr pseudo names */
#define ZDIMANON "_zdim"
/* V2 Reserved Attributes */
/*
Inserted into /.zgroup
For nczarr version 2.x.x, the following (key,value)
pairs are stored in .zgroup and/or .zarray.
Inserted into /.zattrs in root group
_nczarr_superblock: {"version": "2.0.0"}
Inserted into any .zgroup
Inserted into any group level .zattrs
"_nczarr_group": "{
\"dimensions\": {\"d1\": \"1\", \"d2\": \"1\",...}
\"variables\": [\"v1\", \"v2\", ...]
\"dimensions\": [{name: <dimname>, size: <integer>, unlimited: 1|0},...],
\"arrays\": [\"v1\", \"v2\", ...]
\"groups\": [\"g1\", \"g2\", ...]
}"
Inserted into any .zarray
Inserted into any array level .zattrs
"_nczarr_array": "{
\"dimensions\": [\"/g1/g2/d1\", \"/d2\",...]
\"storage\": \"scalar\"|\"contiguous\"|\"compact\"|\"chunked\"
\"dimension_references\": [\"/g1/g2/d1\", \"/d2\",...]
\"storage\": \"scalar\"|\"contiguous\"|\"chunked\"
}"
Inserted into any .zattrs ? or should it go into the container?
"_nczarr_attrs": "{
Inserted into any .zattrs
"_nczarr_attr": "{
\"types\": {\"attr1\": \"<i4\", \"attr2\": \"<i1\",...}
}
+
+Note: _nczarr_attrs type include non-standard use of a zarr type "|U1" => NC_CHAR.
+
*/
#define NCZ_V2_SUPERBLOCK "_nczarr_superblock"
#define NCZ_V2_GROUP "_nczarr_group"
#define NCZ_V2_ARRAY "_nczarr_array"
#define NCZ_V2_ATTR NC_NCZARR_ATTR
#define NCZ_V2_SUPERBLOCK_UC "_NCZARR_SUPERBLOCK"
#define NCZ_V2_GROUP_UC "_NCZARR_GROUP"
#define NCZ_V2_ARRAY_UC "_NCZARR_ARRAY"
#define NCZ_V2_ATTR_UC NC_NCZARR_ATTR_UC
#define NCZ_V2_ATTR "_nczarr_attr" /* Must match value in include/nc4internal.h */
#define NCZARRCONTROL "nczarr"
#define PUREZARRCONTROL "zarr"
@ -96,6 +82,9 @@ Inserted into any .zattrs ? or should it go into the container?
#define NOXARRAYCONTROL "noxarray"
#define XARRAYSCALAR "_scalar_"
#define NC_NCZARR_MAXSTRLEN_ATTR "_nczarr_maxstrlen"
#define NC_NCZARR_DEFAULT_MAXSTRLEN_ATTR "_nczarr_default_maxstrlen"
#define LEGAL_DIM_SEPARATORS "./"
#define DFALT_DIM_SEPARATOR '.'
@ -151,7 +140,7 @@ typedef struct NCZ_FILE_INFO {
# define FLAG_SHOWFETCH 2
# define FLAG_LOGGING 4
# define FLAG_XARRAYDIMS 8
# define FLAG_NCZARR_V1 16
# define FLAG_NCZARR_KEY 16 /* _nczarr_xxx keys are stored in object and not in _nczarr_attrs */
NCZM_IMPL mapimpl;
} controls;
int default_maxstrlen; /* default max str size for variables of type string */
@ -170,18 +159,13 @@ typedef struct NCZ_ATT_INFO {
/* Struct to hold ZARR-specific info for a group. */
typedef struct NCZ_GRP_INFO {
NCZcommon common;
#if 0
/* The jcontent field stores the following:
1. List of (name,length) for dims in the group
2. List of (name,type) for user-defined types in the group
3. List of var names in the group
4. List of subgroups names in the group
*/
NClist* dims;
NClist* types; /* currently not used */
NClist* vars;
NClist* grps;
#endif
/* Read .zgroup and .zattrs once */
struct ZARROBJ {
char* prefix; /* prefix of .zgroup and .zattrs */
NCjson* obj; /* .zgroup|.zarray */
NCjson* atts;
int nczv1; /* 1 => _nczarr_xxx are in obj and not attributes */
} zgroup;
} NCZ_GRP_INFO_T;
/* Struct to hold ZARR-specific info for a variable. */
@ -196,6 +180,9 @@ typedef struct NCZ_VAR_INFO {
char dimension_separator; /* '.' | '/' */
NClist* incompletefilters;
int maxstrlen; /* max length of strings for this variable */
/* Read .zarray and .zattrs once */
struct ZARROBJ zarray;
struct ZARROBJ zattrs;
} NCZ_VAR_INFO_T;
/* Struct to hold ZARR-specific info for a field. */

View File

@ -499,20 +499,21 @@ s3clear(void* s3client, const char* bucket, const char* rootkey)
{
int stat = NC_NOERR;
char** list = NULL;
char** p;
size_t nkeys = 0;
if(s3client && bucket && rootkey) {
if((stat = NC_s3sdksearch(s3client, bucket, rootkey, &nkeys, &list, NULL)))
goto done;
if(list != NULL) {
for(p=list;*p;p++) {
size_t i;
for(i=0;i<nkeys;i++) {
char* p = list[i];
/* If the key is the rootkey, skip it */
if(strcmp(rootkey,*p)==0) continue;
if(strcmp(rootkey,p)==0) continue;
#ifdef S3DEBUG
fprintf(stderr,"s3clear: %s\n",*p);
fprintf(stderr,"s3clear: %s\n",p);
#endif
if((stat = NC_s3sdkdeletekey(s3client, bucket, *p, NULL)))
if((stat = NC_s3sdkdeletekey(s3client, bucket, p, NULL)))
goto done;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -226,8 +226,9 @@ ncz_splitkey(const char* key, NClist* segments)
@internal Down load a .z... structure into memory
@param zmap - [in] controlling zarr map
@param key - [in] .z... object to load
@param jsonp - [out] root of the loaded json
@param jsonp - [out] root of the loaded json (NULL if key does not exist)
@return NC_NOERR
@return NC_EXXX
@author Dennis Heimbigner
*/
int
@ -238,17 +239,22 @@ NCZ_downloadjson(NCZMAP* zmap, const char* key, NCjson** jsonp)
char* content = NULL;
NCjson* json = NULL;
if((stat = nczmap_len(zmap, key, &len)))
goto done;
switch(stat = nczmap_len(zmap, key, &len)) {
case NC_NOERR: break;
case NC_ENOOBJECT: case NC_EEMPTY:
stat = NC_NOERR;
goto exit;
default: goto done;
}
if((content = malloc(len+1)) == NULL)
{stat = NC_ENOMEM; goto done;}
if((stat = nczmap_read(zmap, key, 0, len, (void*)content)))
goto done;
content[len] = '\0';
if((stat = NCJparse(content,0,&json)) < 0)
{stat = NC_ENCZARR; goto done;}
exit:
if(jsonp) {*jsonp = json; json = NULL;}
done:
@ -310,13 +316,9 @@ NCZ_createdict(NCZMAP* zmap, const char* key, NCjson** jsonp)
NCjson* json = NULL;
/* See if it already exists */
stat = NCZ_downloadjson(zmap,key,&json);
if(stat != NC_NOERR) {
if(stat == NC_EEMPTY) {/* create it */
if((stat = nczmap_def(zmap,key,NCZ_ISMETA)))
goto done;
} else
goto done;
if((stat = NCZ_downloadjson(zmap,key,&json))) goto done;
ifjson == NULL) {
if((stat = nczmap_def(zmap,key,NCZ_ISMETA))) goto done;
} else {
/* Already exists, fail */
stat = NC_EINVAL;
@ -346,18 +348,14 @@ NCZ_createarray(NCZMAP* zmap, const char* key, NCjson** jsonp)
int stat = NC_NOERR;
NCjson* json = NULL;
stat = NCZ_downloadjson(zmap,key,&json);
if(stat != NC_NOERR) {
if(stat == NC_EEMPTY) {/* create it */
if((stat = nczmap_def(zmap,key,NCZ_ISMETA)))
goto done;
/* Create the initial array */
if((stat = NCJnew(NCJ_ARRAY,&json)))
goto done;
} else {
stat = NC_EINVAL;
goto done;
}
if((stat = NCZ_downloadjson(zmap,key,&json))) goto done;
if(json == NULL) { /* create it */
if((stat = nczmap_def(zmap,key,NCZ_ISMETA))) goto done;
/* Create the initial array */
if((stat = NCJnew(NCJ_ARRAY,&json))) goto done;
} else {
stat = NC_EINVAL;
goto done;
}
if(json->sort != NCJ_ARRAY) {stat = NC_ENCZARR; goto done;}
if(jsonp) {*jsonp = json; json = NULL;}
@ -367,54 +365,6 @@ done:
}
#endif /*0*/
/**
@internal Get contents of a meta object; fail it it does not exist
@param zmap - [in] map
@param key - [in] key of the object
@param jsonp - [out] return parsed json
@return NC_NOERR
@return NC_EEMPTY [object did not exist]
@author Dennis Heimbigner
*/
int
NCZ_readdict(NCZMAP* zmap, const char* key, NCjson** jsonp)
{
int stat = NC_NOERR;
NCjson* json = NULL;
if((stat = NCZ_downloadjson(zmap,key,&json)))
goto done;
if(NCJsort(json) != NCJ_DICT) {stat = NC_ENCZARR; goto done;}
if(jsonp) {*jsonp = json; json = NULL;}
done:
NCJreclaim(json);
return stat;
}
/**
@internal Get contents of a meta object; fail it it does not exist
@param zmap - [in] map
@param key - [in] key of the object
@param jsonp - [out] return parsed json
@return NC_NOERR
@return NC_EEMPTY [object did not exist]
@author Dennis Heimbigner
*/
int
NCZ_readarray(NCZMAP* zmap, const char* key, NCjson** jsonp)
{
int stat = NC_NOERR;
NCjson* json = NULL;
if((stat = NCZ_downloadjson(zmap,key,&json)))
goto done;
if(NCJsort(json) != NCJ_ARRAY) {stat = NC_ENCZARR; goto done;}
if(jsonp) {*jsonp = json; json = NULL;}
done:
NCJreclaim(json);
return stat;
}
#if 0
/**
@internal Given an nc_type, produce the corresponding
@ -664,7 +614,7 @@ primarily on the first atomic value encountered
recursively.
*/
int
NCZ_inferattrtype(NCjson* value, nc_type typehint, nc_type* typeidp)
NCZ_inferattrtype(const NCjson* value, nc_type typehint, nc_type* typeidp)
{
int i,stat = NC_NOERR;
nc_type typeid;
@ -1093,7 +1043,7 @@ checksimplejson(NCjson* json, int depth)
/* Return 1 if the attribute will be stored as a complex JSON valued attribute; return 0 otherwise */
int
NCZ_iscomplexjson(NCjson* json, nc_type typehint)
NCZ_iscomplexjson(const NCjson* json, nc_type typehint)
{
int i, stat = 0;

View File

@ -78,7 +78,7 @@ NCZ_set_var_chunk_cache(int ncid, int varid, size_t cachesize, size_t nelems, fl
assert(grp && h5);
/* Find the var. */
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, varid)))
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, (size_t)varid)))
{retval = NC_ENOTVAR; goto done;}
assert(var && var->hdr.id == varid);
@ -140,7 +140,7 @@ fprintf(stderr,"xxx: adjusting cache for: %s\n",var->hdr.name);
zcache->chunksize = zvar->chunksize;
zcache->chunkcount = 1;
if(var->ndims > 0) {
int i;
size_t i;
for(i=0;i<var->ndims;i++) {
zcache->chunkcount *= var->chunksizes[i];
}
@ -184,7 +184,7 @@ NCZ_create_chunk_cache(NC_VAR_INFO_T* var, size64_t chunksize, char dimsep, NCZC
cache->chunkcount = 1;
if(var->ndims > 0) {
int i;
size_t i;
for(i=0;i<var->ndims;i++) {
cache->chunkcount *= var->chunksizes[i];
}
@ -297,7 +297,7 @@ NCZ_read_cache_chunk(NCZChunkCache* cache, const size64_t* indices, void** datap
/* Create a new entry */
if((entry = calloc(1,sizeof(NCZCacheEntry)))==NULL)
{stat = NC_ENOMEM; goto done;}
memcpy(entry->indices,indices,rank*sizeof(size64_t));
memcpy(entry->indices,indices,(size_t)rank*sizeof(size64_t));
/* Create the key for this cache */
if((stat = NCZ_buildchunkpath(cache,indices,&entry->key))) goto done;
entry->hashkey = hkey;
@ -496,7 +496,8 @@ done:
int
NCZ_ensure_fill_chunk(NCZChunkCache* cache)
{
int i, stat = NC_NOERR;
int stat = NC_NOERR;
size_t i;
NC_VAR_INFO_T* var = cache->var;
nc_type typeid = var->type_info->hdr.id;
size_t typesize = var->type_info->size;
@ -605,7 +606,7 @@ int
NCZ_buildchunkkey(size_t R, const size64_t* chunkindices, char dimsep, char** keyp)
{
int stat = NC_NOERR;
int r;
size_t r;
NCbytes* key = ncbytesnew();
if(keyp) *keyp = NULL;
@ -670,7 +671,7 @@ put_chunk(NCZChunkCache* cache, NCZCacheEntry* entry)
if((stat = NC_reclaim_data_all(file->controller,tid,entry->data,cache->chunkcount))) goto done;
entry->data = NULL;
entry->data = strchunk; strchunk = NULL;
entry->size = cache->chunkcount * maxstrlen;
entry->size = (cache->chunkcount * (size64_t)maxstrlen);
entry->isfixedstring = 1;
}
@ -865,7 +866,7 @@ NCZ_dumpxcacheentry(NCZChunkCache* cache, NCZCacheEntry* e, NCbytes* buf)
{
char s[8192];
char idx[64];
int i;
size_t i;
ncbytescat(buf,"{");
snprintf(s,sizeof(s),"modified=%u isfiltered=%u indices=",

View File

@ -31,7 +31,6 @@ Benchmarks: @HAS_BENCHMARKS@
NetCDF-2 API: @HAS_NC2@
HDF4 Support: @HAS_HDF4@
HDF5 Support: @HAS_HDF5@
NetCDF-4 API: @HAS_NC4@
CDF5 Support: @HAS_CDF5@
NC-4 Parallel Support: @HAS_PARALLEL4@
PnetCDF Support: @HAS_PNETCDF@
@ -44,10 +43,10 @@ S3 Support: @HAS_S3@
S3 SDK: @WHICH_S3_SDK@
NCZarr Support: @HAS_NCZARR@
NCZarr Zip Support: @HAS_NCZARR_ZIP@
Diskless Support: @HAS_DISKLESS@
MMap Support: @HAS_MMAP@
JNA Support: @HAS_JNA@
ERANGE Fill Support: @HAS_ERANGE_FILL@
Relaxed Boundary Check: @RELAX_COORD_BOUND@

View File

@ -56,10 +56,13 @@ NC4_initialize(void)
if(getenv(NCLOGLEVELENV) != NULL) {
char* slevel = getenv(NCLOGLEVELENV);
long level = atol(slevel);
#ifdef USE_NETCDF4
if(level >= 0)
nc_set_log_level((int)level);
}
#endif
#endif
NC_initialize_reserved();
return ret;
}

View File

@ -36,30 +36,37 @@
*/
/** @internal List of reserved attributes.
WARNING: This list must be in (strcmp) sorted order for binary search. */
static const NC_reservedatt NC_reserved[] = {
WARNING: This list will be sorted in (strcmp) sorted order for binary search.
So order here does not matter; the table will be modified by sorting.
*/
static NC_reservedatt NC_reserved[] = {
{NC_ATT_CLASS, READONLYFLAG|HIDDENATTRFLAG}, /*CLASS*/
{NC_ATT_DIMENSION_LIST, READONLYFLAG|HIDDENATTRFLAG}, /*DIMENSION_LIST*/
{NC_ATT_NAME, READONLYFLAG|HIDDENATTRFLAG}, /*NAME*/
{NC_ATT_REFERENCE_LIST, READONLYFLAG|HIDDENATTRFLAG}, /*REFERENCE_LIST*/
{NC_XARRAY_DIMS, READONLYFLAG|NAMEONLYFLAG|HIDDENATTRFLAG}, /*_ARRAY_DIMENSIONS*/
{NC_XARRAY_DIMS, READONLYFLAG|HIDDENATTRFLAG}, /*_ARRAY_DIMENSIONS*/
{NC_ATT_CODECS, VARFLAG|READONLYFLAG|NAMEONLYFLAG}, /*_Codecs*/
{NC_ATT_FORMAT, READONLYFLAG}, /*_Format*/
{ISNETCDF4ATT, READONLYFLAG|NAMEONLYFLAG}, /*_IsNetcdf4*/
{ISNETCDF4ATT, READONLYFLAG|NAMEONLYFLAG|VIRTUALFLAG}, /*_IsNetcdf4*/
{NCPROPS,READONLYFLAG|NAMEONLYFLAG|HIDDENATTRFLAG}, /*_NCProperties*/
{NC_NCZARR_ATTR_UC, READONLYFLAG|HIDDENATTRFLAG}, /*_NCZARR_ATTR */
{NC_ATT_COORDINATES, READONLYFLAG|HIDDENATTRFLAG}, /*_Netcdf4Coordinates*/
{NC_ATT_DIMID_NAME, READONLYFLAG|HIDDENATTRFLAG}, /*_Netcdf4Dimid*/
{SUPERBLOCKATT, READONLYFLAG|NAMEONLYFLAG}, /*_SuperblockVersion*/
{SUPERBLOCKATT, READONLYFLAG|NAMEONLYFLAG|VIRTUALFLAG}, /*_SuperblockVersion*/
{NC_ATT_NC3_STRICT_NAME, READONLYFLAG}, /*_nc3_strict*/
{NC_ATT_NC3_STRICT_NAME, READONLYFLAG}, /*_nc3_strict*/
{NC_NCZARR_ATTR, READONLYFLAG|HIDDENATTRFLAG}, /*_nczarr_attr */
{NC_NCZARR_GROUP, READONLYFLAG|HIDDENATTRFLAG}, /*_nczarr_group */
{NC_NCZARR_ARRAY, READONLYFLAG|HIDDENATTRFLAG}, /*_nczarr_array */
{NC_NCZARR_SUPERBLOCK, READONLYFLAG|HIDDENATTRFLAG}, /*_nczarr_superblock */
};
#define NRESERVED (sizeof(NC_reserved) / sizeof(NC_reservedatt)) /*|NC_reservedatt|*/
#define NRESERVED (sizeof(NC_reserved) / sizeof(NC_reservedatt)) /*|NC_reservedatt*/
/*Forward */
static int NC4_move_in_NCList(NC* nc, int new_id);
static int bincmp(const void* arg1, const void* arg2);
static int sortcmp(const void* arg1, const void* arg2);
#if NC_HAS_LOGGING
#if LOGGING
/* This is the severity level of messages which will be logged. Use
severity 0 for errors, 1 for important log messages, 2 for less
important, etc. */
@ -129,7 +136,7 @@ nc_log(int severity, const char *fmt, ...)
fprintf(f, "\n");
fflush(f);
}
#endif /* NC_HAS_LOGGING */
#endif /* LOGGING */
/**
* @internal Check and normalize and name.
@ -1706,7 +1713,7 @@ nc4_normalize_name(const char *name, char *norm_name)
return NC_NOERR;
}
#ifdef ENABLE_SET_LOG_LEVEL
#ifdef NETCDF_ENABLE_SET_LOG_LEVEL
/**
* Initialize parallel I/O logging. For parallel I/O builds, open log
@ -1719,7 +1726,7 @@ nc4_init_logging(void)
{
int ret = NC_NOERR;
#if NC_HAS_LOGGING
#if LOGGING
#if NC_HAS_PARALLEL4
if (!LOG_FILE && nc_log_level >= 0)
{
@ -1745,7 +1752,7 @@ nc4_init_logging(void)
return NC_EINTERNAL;
}
#endif /* NC_HAS_PARALLEL4 */
#endif /* NC_HAS_LOGGING */
#endif /* LOGGING */
return ret;
}
@ -1759,7 +1766,7 @@ nc4_init_logging(void)
void
nc4_finalize_logging(void)
{
#if NC_HAS_LOGGING
#if LOGGING
#if NC_HAS_PARALLEL4
if (LOG_FILE)
{
@ -1767,7 +1774,7 @@ nc4_finalize_logging(void)
LOG_FILE = NULL;
}
#endif /* NC_HAS_PARALLEL4 */
#endif /* NC_HAS_LOGGING */
#endif /* LOGGING */
}
/**
@ -1786,7 +1793,7 @@ nc4_finalize_logging(void)
int
nc_set_log_level(int new_level)
{
#if NC_HAS_LOGGING
#if LOGGING
/* Remember the new level. */
nc_log_level = new_level;
@ -1807,9 +1814,9 @@ nc_set_log_level(int new_level)
return NC_NOERR;
}
#endif /* ENABLE_SET_LOG_LEVEL */
#endif /* NETCDF_ENABLE_SET_LOG_LEVEL */
#if NC_HAS_LOGGING
#if LOGGING
#define MAX_NESTS 10
/**
* @internal Recursively print the metadata of a group.
@ -1978,7 +1985,7 @@ log_metadata_nc(NC_FILE_INFO_T *h5)
return NC_NOERR;
}
#endif /*NC_HAS_LOGGING */
#endif /*LOGGING */
/**
* @internal Show the in-memory metadata for a netcdf file. This
@ -1995,7 +2002,7 @@ int
NC4_show_metadata(int ncid)
{
int retval = NC_NOERR;
#if NC_HAS_LOGGING
#if LOGGING
NC_FILE_INFO_T *h5;
int old_log_level = nc_log_level;
@ -2007,7 +2014,7 @@ NC4_show_metadata(int ncid)
nc_log_level = 2;
retval = log_metadata_nc(h5);
nc_log_level = old_log_level;
#endif /*NC_HAS_LOGGING*/
#endif /*LOGGING*/
return retval;
}
@ -2021,6 +2028,7 @@ NC4_show_metadata(int ncid)
const NC_reservedatt*
NC_findreserved(const char* name)
{
#if 0
int n = NRESERVED;
int L = 0;
int R = (n - 1);
@ -2037,8 +2045,12 @@ NC_findreserved(const char* name)
R = (m - 1);
}
return NULL;
#else
return (const NC_reservedatt*)bsearch(name,NC_reserved,NRESERVED,sizeof(NC_reservedatt),bincmp);
#endif
}
/* Ed Hartness requires this function */
static int
NC4_move_in_NCList(NC* nc, int new_id)
{
@ -2051,153 +2063,25 @@ NC4_move_in_NCList(NC* nc, int new_id)
return stat;
}
/**************************************************/
/* NCglobal state management */
static NCglobalstate* nc_globalstate = NULL;
static int
NC_createglobalstate(void)
sortcmp(const void* arg1, const void* arg2)
{
int stat = NC_NOERR;
const char* tmp = NULL;
if(nc_globalstate == NULL) {
nc_globalstate = calloc(1,sizeof(NCglobalstate));
}
/* Initialize struct pointers */
if((nc_globalstate->rcinfo = calloc(1,sizeof(struct NCRCinfo)))==NULL)
{stat = NC_ENOMEM; goto done;}
if((nc_globalstate->rcinfo->entries = nclistnew())==NULL)
{stat = NC_ENOMEM; goto done;}
if((nc_globalstate->rcinfo->s3profiles = nclistnew())==NULL)
{stat = NC_ENOMEM; goto done;}
/* Get environment variables */
if(getenv(NCRCENVIGNORE) != NULL)
nc_globalstate->rcinfo->ignore = 1;
tmp = getenv(NCRCENVRC);
if(tmp != NULL && strlen(tmp) > 0)
nc_globalstate->rcinfo->rcfile = strdup(tmp);
/* Initialize chunk cache defaults */
nc_globalstate->chunkcache.size = DEFAULT_CHUNK_CACHE_SIZE; /**< Default chunk cache size. */
nc_globalstate->chunkcache.nelems = DEFAULT_CHUNKS_IN_CACHE; /**< Default chunk cache number of elements. */
nc_globalstate->chunkcache.preemption = DEFAULT_CHUNK_CACHE_PREEMPTION; /**< Default chunk cache preemption. */
done:
return stat;
NC_reservedatt* r1 = (NC_reservedatt*)arg1;
NC_reservedatt* r2 = (NC_reservedatt*)arg2;
return strcmp(r1->name,r2->name);
}
/* Get global state */
NCglobalstate*
NC_getglobalstate(void)
static int
bincmp(const void* arg1, const void* arg2)
{
if(nc_globalstate == NULL)
NC_createglobalstate();
return nc_globalstate;
const char* name = (const char*)arg1;
NC_reservedatt* ra = (NC_reservedatt*)arg2;
return strcmp(name,ra->name);
}
void
NC_freeglobalstate(void)
NC_initialize_reserved(void)
{
if(nc_globalstate != NULL) {
nullfree(nc_globalstate->tempdir);
nullfree(nc_globalstate->home);
nullfree(nc_globalstate->cwd);
nullfree(nc_globalstate->aws.default_region);
nullfree(nc_globalstate->aws.config_file);
nullfree(nc_globalstate->aws.profile);
nullfree(nc_globalstate->aws.access_key_id);
nullfree(nc_globalstate->aws.secret_access_key);
if(nc_globalstate->rcinfo) {
NC_rcclear(nc_globalstate->rcinfo);
free(nc_globalstate->rcinfo);
}
free(nc_globalstate);
nc_globalstate = NULL;
}
}
/**************************************************/
/* Specific property functions */
/**
Provide a function to store global data alignment
information.
Repeated calls to nc_set_alignment will overwrite any existing values.
If defined, then for every file created or opened after the call to
nc_set_alignment, and for every new variable added to the file, the
most recently set threshold and alignment values will be applied
to that variable.
The nc_set_alignment function causes new data written to a
netCDF-4 file to be aligned on disk to a specified block
size. To be effective, alignment should be the system disk block
size, or a multiple of it. This setting is effective with MPI
I/O and other parallel systems.
This is a trade-off of write speed versus file size. Alignment
leaves holes between file objects. The default of no alignment
writes file objects contiguously, without holes. Alignment has
no impact on file readability.
Alignment settings apply only indirectly, through the file open
functions. Call nc_set_alignment first, then nc_create or
nc_open for one or more files. Current alignment settings are
locked in when each file is opened, then forgotten when the same
file is closed. For illustration, it is possible to write
different files at the same time with different alignments, by
interleaving nc_set_alignment and nc_open calls.
Alignment applies to all newly written low-level file objects at
or above the threshold size, including chunks of variables,
attributes, and internal infrastructure. Alignment is not locked
in to a data variable. It can change between data chunks of the
same variable, based on a file's history.
Refer to H5Pset_alignment in HDF5 documentation for more
specific details, interactions, and additional rules.
@param threshold The minimum size to which alignment is applied.
@param alignment The alignment value.
@return ::NC_NOERR No error.
@return ::NC_EINVAL Invalid input.
@author Dennis Heimbigner
@ingroup datasets
*/
int
nc_set_alignment(int threshold, int alignment)
{
NCglobalstate* gs = NC_getglobalstate();
gs->alignment.threshold = threshold;
gs->alignment.alignment = alignment;
gs->alignment.defined = 1;
return NC_NOERR;
}
/**
Provide get function to retrieve global data alignment
information.
The nc_get_alignment function return the last values set by
nc_set_alignment. If nc_set_alignment has not been called, then
it returns the value 0 for both threshold and alignment.
@param thresholdp Return the current minimum size to which alignment is applied or zero.
@param alignmentp Return the current alignment value or zero.
@return ::NC_NOERR No error.
@return ::NC_EINVAL Invalid input.
@author Dennis Heimbigner
@ingroup datasets
*/
int
nc_get_alignment(int* thresholdp, int* alignmentp)
{
NCglobalstate* gs = NC_getglobalstate();
if(thresholdp) *thresholdp = gs->alignment.threshold;
if(alignmentp) *alignmentp = gs->alignment.alignment;
return NC_NOERR;
/* Guarantee the reserved attribute list is sorted */
qsort((void*)NC_reserved,NRESERVED,sizeof(NC_reservedatt),sortcmp);
}

View File

@ -15,34 +15,6 @@
#include "nc4dispatch.h"
#include <stddef.h>
#if 0
#ifdef NETCDF_ENABLE_DAP4
EXTERNL NC* NCD4_get_substrate_nc(int ncid);
#endif
#endif
/* The sizes of types may vary from platform to platform, but within
* netCDF files, type sizes are fixed. */
#define NC_CHAR_LEN sizeof(char) /**< @internal Size of char. */
#define NC_STRING_LEN sizeof(char *) /**< @internal Size of char *. */
#define NC_BYTE_LEN 1 /**< @internal Size of byte. */
#define NC_SHORT_LEN 2 /**< @internal Size of short. */
#define NC_INT_LEN 4 /**< @internal Size of int. */
#define NC_FLOAT_LEN 4 /**< @internal Size of float. */
#define NC_DOUBLE_LEN 8 /**< @internal Size of double. */
#define NC_INT64_LEN 8 /**< @internal Size of int64. */
/** @internal Names of atomic types. */
const char* nc4_atomic_name[NUM_ATOMIC_TYPES] = {"none", "byte", "char",
"short", "int", "float",
"double", "ubyte",
"ushort", "uint",
"int64", "uint64", "string"};
static const size_t nc4_atomic_size[NUM_ATOMIC_TYPES] = {0, NC_BYTE_LEN, NC_CHAR_LEN, NC_SHORT_LEN,
NC_INT_LEN, NC_FLOAT_LEN, NC_DOUBLE_LEN,
NC_BYTE_LEN, NC_SHORT_LEN, NC_INT_LEN, NC_INT64_LEN,
NC_INT64_LEN, NC_STRING_LEN};
/**
* @internal Find all user-defined types for a location. This finds
* all user-defined types in a group.
@ -90,64 +62,6 @@ NC4_inq_typeids(int ncid, int *ntypes, int *typeids)
return NC_NOERR;
}
/**
* @internal Get the name and size of an atomic type. For strings, 1 is
* returned.
*
* @param typeid1 Type ID.
* @param name Gets the name of the type.
* @param size Gets the size of one element of the type in bytes.
*
* @return ::NC_NOERR No error.
* @return ::NC_EBADID Bad ncid.
* @return ::NC_EBADTYPE Type not found.
* @author Dennis Heimbigner
*/
int
NC4_inq_atomic_type(nc_type typeid1, char *name, size_t *size)
{
LOG((2, "nc_inq_atomic_type: typeid %d", typeid1));
if (typeid1 >= NUM_ATOMIC_TYPES)
return NC_EBADTYPE;
if (name)
strcpy(name, nc4_atomic_name[typeid1]);
if (size)
*size = nc4_atomic_size[typeid1];
return NC_NOERR;
}
/**
* @internal Get the id and size of an atomic type by name.
*
* @param name [in] the name of the type.
* @param idp [out] the type index of the type.
* @param sizep [out] the size of one element of the type in bytes.
*
* @return ::NC_NOERR No error.
* @return ::NC_EBADID Bad ncid.
* @return ::NC_EBADTYPE Type not found.
* @author Dennis Heimbigner
*/
int
NC4_lookup_atomic_type(const char *name, nc_type* idp, size_t *sizep)
{
int i;
LOG((2, "nc_lookup_atomic_type: name %s ", name));
if (name == NULL || strlen(name) == 0)
return NC_EBADTYPE;
for(i=0;i<NUM_ATOMIC_TYPES;i++) {
if(strcasecmp(name,nc4_atomic_name[i])==0) {
if(idp) *idp = i;
if(sizep) *sizep = nc4_atomic_size[i];
return NC_NOERR;
}
}
return NC_EBADTYPE;
}
/**
* @internal Get the name and size of a type.
* For VLEN the base type len is returned.
@ -175,10 +89,7 @@ NC4_inq_type(int ncid, nc_type typeid1, char *name, size_t *size)
/* If this is an atomic type, the answer is easy. */
if (typeid1 < NUM_ATOMIC_TYPES)
{
if (name)
strcpy(name, nc4_atomic_name[typeid1]);
if (size)
*size = nc4_atomic_size[typeid1];
if((retval=NC4_inq_atomic_type(typeid1, name, size))) return retval;
return NC_NOERR;
}
@ -197,8 +108,6 @@ NC4_inq_type(int ncid, nc_type typeid1, char *name, size_t *size)
{
if (type->nc_type_class == NC_VLEN)
*size = sizeof(nc_vlen_t);
else if (type->nc_type_class == NC_STRING)
*size = NC_STRING_LEN;
else
*size = type->size;
}
@ -256,9 +165,9 @@ NC4_inq_user_type(int ncid, nc_type typeid1, char *name, size_t *size,
{
if (type->nc_type_class == NC_VLEN)
*size = sizeof(nc_vlen_t);
else if (type->nc_type_class == NC_STRING)
*size = NC_STRING_LEN;
else
else if (type->nc_type_class == NC_STRING) {
if((retval=NC4_inq_type(ncid,typeid1,NULL,size))) return retval;
} else
*size = type->size;
}
if (name)

View File

@ -23,7 +23,6 @@ libsprivate="@LIBS@"
has_dap="@HAS_DAP@"
has_dap4="@HAS_DAP4@"
has_nc2="@HAS_NC2@"
has_nc4="@HAS_NC4@"
has_hdf4="@HAS_HDF4@"
has_pnetcdf="@HAS_PNETCDF@"
has_hdf5="@HAS_HDF5@"
@ -57,8 +56,7 @@ Available values for OPTION include:
--has-dap4 whether DAP4 is enabled in this build
--has-dap same as --has-dap2 (Deprecated)
--has-nc2 whether NetCDF-2 API is enabled
--has-nc4 whether NetCDF-4/HDF-5 is enabled in this build
--has-hdf5 whether HDF5 is used in build (always the same as --has-nc4)
--has-hdf5 whether HDF5 is used in build
--has-hdf4 whether HDF4 was used in build
--has-logging whether logging is enabled with --enable-logging.
--has-pnetcdf whether PnetCDF was used in build
@ -100,7 +98,6 @@ all()
echo " --has-dap2 -> $has_dap"
echo " --has-dap4 -> $has_dap4"
echo " --has-nc2 -> $has_nc2"
echo " --has-nc4 -> $has_nc4"
echo " --has-hdf5 -> $has_hdf5"
echo " --has-hdf4 -> $has_hdf4"
echo " --has-logging -> $has_logging"
@ -189,10 +186,6 @@ while test $# -gt 0; do
echo $has_nc2
;;
--has-nc4)
echo $has_nc4
;;
--has-hdf5)
echo $has_hdf5
;;

View File

@ -49,7 +49,7 @@ IF(USE_HDF5 AND NETCDF_ENABLE_FILTER_TESTING)
build_bin_test(test_filter_order)
build_bin_test(test_filter_repeat)
build_bin_test(tst_filter_vlen)
if(NOT MINGW)
if(NOT MINGW AND NOT CYGWIN)
ADD_SH_TEST(nc_test4 tst_filter)
endif()
ADD_SH_TEST(nc_test4 tst_specific_filters)

View File

@ -129,7 +129,7 @@ ref_tst_interops4.nc CMakeLists.txt run_grp_rename.sh \
run_empty_vlen_test.sh ref_hdf5_compat1.nc ref_hdf5_compat2.nc \
ref_hdf5_compat3.nc tst_misc.sh tdset.h5 tst_szip.sh ref_szip.h5 \
ref_szip.cdl tst_filter.sh bzip2.cdl noop.cdl ref_filtered.cdl \
ref_unfiltered.cdl ref_bzip2.c findplugin.in ref_unfilteredvv.cdl \
ref_unfiltered.cdl ref_bzip2.c ref_unfilteredvv.cdl \
ref_filteredvv.cdl ref_multi.cdl \
ref_ncgenF.cdl ref_nccopyF.cdl \
ref_filter_repeat.txt ref_fillonly.cdl test_fillonly.sh \

View File

@ -6,7 +6,7 @@ if test "x$srcdir" = x ; then srcdir=`pwd`; fi
# This shell script tests BOM support in ncgen
set -e
set -x
# add hack for sunos
export srcdir;

View File

@ -82,8 +82,10 @@ void
markcdf4(const char* msg)
{
enhanced_flag = 1;
if(markcdf4_msg == NULL)
if(markcdf4_msg == NULL) {
markcdf4_msg = (char*)msg;
}
}
char*

View File

@ -128,10 +128,10 @@ static Symbol* makespecial(int tag, Symbol* vsym, Symbol* tsym, void* data, int
static int containsfills(Datalist* list);
static void vercheck(int ncid);
static long long extractint(NCConstant* con);
static Symbol* identkeyword(const Symbol*);
#ifdef USE_NETCDF4
static int parsefilterflag(const char* sdata0, Specialdata* special);
static int parsecodecsflag(const char* sdata0, Specialdata* special);
static Symbol* identkeyword(const Symbol*);
#ifdef GENDEBUG1
static void printfilters(int nfilters, NC_H5_Filterspec** filters);

View File

@ -20,7 +20,7 @@
#define yy_flex_debug ncg_flex_debug
#define yyin ncgin
#define yyleng ncgleng
#define yyncgenllex
#define yylex ncglex
#define yylineno ncglineno
#define yyout ncgout
#define yyrestart ncgrestart
@ -113,7 +113,7 @@
#ifdef yylex
#define ncglex_ALREADY_DEFINED
#else
#define yyncgenllex
#define yylex ncglex
#endif
#ifdef yyrestart
@ -1584,8 +1584,8 @@ int yy_flex_debug = 0;
#define YY_MORE_ADJ 0
#define YY_RESTORE_YY_MORE_OFFSET
char *yytext;
#line 1 "ncgen/ncgen.l"
#line 2 "ncgen/ncgen.l"
#line 1 "ncgen.l"
#line 2 "ncgen.l"
/*********************************************************************
* Copyright 1993, UCAR/Unidata
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
@ -1993,7 +1993,7 @@ YY_DECL
}
{
#line 226 "ncgen/ncgen.l"
#line 226 "ncgen.l"
#line 1998 "ncgenl.c"
@ -2054,14 +2054,14 @@ do_action: /* This label is used only to access EOF actions. */
case 1:
YY_RULE_SETUP
#line 227 "ncgen/ncgen.l"
#line 227 "ncgen.l"
{ /* whitespace */
break;
}
YY_BREAK
case 2:
YY_RULE_SETUP
#line 231 "ncgen/ncgen.l"
#line 231 "ncgen.l"
{ /* comment */
break;
}
@ -2069,7 +2069,7 @@ YY_RULE_SETUP
case 3:
/* rule 3 can match eol */
YY_RULE_SETUP
#line 235 "ncgen/ncgen.l"
#line 235 "ncgen.l"
{int len; char* s = NULL;
/* In netcdf4, this will be used in a variety
of places, so only remove escapes */
@ -2093,7 +2093,7 @@ yytext[MAXTRST-1] = '\0';
YY_BREAK
case 4:
YY_RULE_SETUP
#line 256 "ncgen/ncgen.l"
#line 256 "ncgen.l"
{ /* drop leading 0x; pad to even number of chars */
char* p = yytext+2;
int len = yyleng - 2;
@ -2108,143 +2108,143 @@ YY_RULE_SETUP
YY_BREAK
case 5:
YY_RULE_SETUP
#line 268 "ncgen/ncgen.l"
#line 268 "ncgen.l"
{return lexdebug(COMPOUND);}
YY_BREAK
case 6:
YY_RULE_SETUP
#line 269 "ncgen/ncgen.l"
#line 269 "ncgen.l"
{return lexdebug(ENUM);}
YY_BREAK
case 7:
YY_RULE_SETUP
#line 270 "ncgen/ncgen.l"
#line 270 "ncgen.l"
{return lexdebug(OPAQUE_);}
YY_BREAK
case 8:
YY_RULE_SETUP
#line 272 "ncgen/ncgen.l"
#line 272 "ncgen.l"
{return lexdebug(FLOAT_K);}
YY_BREAK
case 9:
YY_RULE_SETUP
#line 273 "ncgen/ncgen.l"
#line 273 "ncgen.l"
{return lexdebug(DOUBLE_K);}
YY_BREAK
case 10:
YY_RULE_SETUP
#line 274 "ncgen/ncgen.l"
#line 274 "ncgen.l"
{return lexdebug(CHAR_K);}
YY_BREAK
case 11:
YY_RULE_SETUP
#line 275 "ncgen/ncgen.l"
#line 275 "ncgen.l"
{return lexdebug(BYTE_K);}
YY_BREAK
case 12:
YY_RULE_SETUP
#line 276 "ncgen/ncgen.l"
#line 276 "ncgen.l"
{return lexdebug(SHORT_K);}
YY_BREAK
case 13:
YY_RULE_SETUP
#line 277 "ncgen/ncgen.l"
#line 277 "ncgen.l"
{return lexdebug(INT_K);}
YY_BREAK
case 14:
YY_RULE_SETUP
#line 278 "ncgen/ncgen.l"
#line 278 "ncgen.l"
{return lexdebug(identcheck(UBYTE_K));}
YY_BREAK
case 15:
YY_RULE_SETUP
#line 279 "ncgen/ncgen.l"
#line 279 "ncgen.l"
{return lexdebug(identcheck(USHORT_K));}
YY_BREAK
case 16:
YY_RULE_SETUP
#line 280 "ncgen/ncgen.l"
#line 280 "ncgen.l"
{return lexdebug(identcheck(UINT_K));}
YY_BREAK
case 17:
YY_RULE_SETUP
#line 281 "ncgen/ncgen.l"
#line 281 "ncgen.l"
{return lexdebug(identcheck(INT64_K));}
YY_BREAK
case 18:
YY_RULE_SETUP
#line 282 "ncgen/ncgen.l"
#line 282 "ncgen.l"
{return lexdebug(identcheck(UINT64_K));}
YY_BREAK
case 19:
YY_RULE_SETUP
#line 283 "ncgen/ncgen.l"
#line 283 "ncgen.l"
{return lexdebug(identcheck(STRING_K));}
YY_BREAK
case 20:
YY_RULE_SETUP
#line 285 "ncgen/ncgen.l"
#line 285 "ncgen.l"
{return lexdebug(FLOAT_K);}
YY_BREAK
case 21:
YY_RULE_SETUP
#line 286 "ncgen/ncgen.l"
#line 286 "ncgen.l"
{return lexdebug(INT_K);}
YY_BREAK
case 22:
YY_RULE_SETUP
#line 287 "ncgen/ncgen.l"
#line 287 "ncgen.l"
{return lexdebug(INT_K);}
YY_BREAK
case 23:
YY_RULE_SETUP
#line 288 "ncgen/ncgen.l"
#line 288 "ncgen.l"
{return lexdebug(identcheck(UINT_K));}
YY_BREAK
case 24:
YY_RULE_SETUP
#line 289 "ncgen/ncgen.l"
#line 289 "ncgen.l"
{return lexdebug(identcheck(UINT_K));}
YY_BREAK
case 25:
YY_RULE_SETUP
#line 292 "ncgen/ncgen.l"
#line 292 "ncgen.l"
{int32_val = -1;
return lexdebug(NC_UNLIMITED_K);}
YY_BREAK
case 26:
YY_RULE_SETUP
#line 295 "ncgen/ncgen.l"
#line 295 "ncgen.l"
{return lexdebug(TYPES);}
YY_BREAK
case 27:
YY_RULE_SETUP
#line 296 "ncgen/ncgen.l"
#line 296 "ncgen.l"
{return lexdebug(DIMENSIONS);}
YY_BREAK
case 28:
YY_RULE_SETUP
#line 297 "ncgen/ncgen.l"
#line 297 "ncgen.l"
{return lexdebug(VARIABLES);}
YY_BREAK
case 29:
YY_RULE_SETUP
#line 298 "ncgen/ncgen.l"
#line 298 "ncgen.l"
{return lexdebug(DATA);}
YY_BREAK
case 30:
YY_RULE_SETUP
#line 299 "ncgen/ncgen.l"
#line 299 "ncgen.l"
{return lexdebug(GROUP);}
YY_BREAK
case 31:
YY_RULE_SETUP
#line 301 "ncgen/ncgen.l"
#line 301 "ncgen.l"
{BEGIN(TEXT);return lexdebug(NETCDF);}
YY_BREAK
case 32:
YY_RULE_SETUP
#line 303 "ncgen/ncgen.l"
#line 303 "ncgen.l"
{ /* missing value (pre-2.4 backward compatibility) */
if (yytext[0] == '-') {
double_val = -INFINITY;
@ -2257,7 +2257,7 @@ YY_RULE_SETUP
YY_BREAK
case 33:
YY_RULE_SETUP
#line 312 "ncgen/ncgen.l"
#line 312 "ncgen.l"
{ /* missing value (pre-2.4 backward compatibility) */
double_val = NAN;
specialconstants = 1;
@ -2266,7 +2266,7 @@ YY_RULE_SETUP
YY_BREAK
case 34:
YY_RULE_SETUP
#line 318 "ncgen/ncgen.l"
#line 318 "ncgen.l"
{/* missing value (pre-2.4 backward compatibility)*/
if (yytext[0] == '-') {
float_val = -INFINITYF;
@ -2279,7 +2279,7 @@ YY_RULE_SETUP
YY_BREAK
case 35:
YY_RULE_SETUP
#line 327 "ncgen/ncgen.l"
#line 327 "ncgen.l"
{ /* missing value (pre-2.4 backward compatibility) */
float_val = NANF;
specialconstants = 1;
@ -2288,7 +2288,7 @@ YY_RULE_SETUP
YY_BREAK
case 36:
YY_RULE_SETUP
#line 333 "ncgen/ncgen.l"
#line 333 "ncgen.l"
{
#ifdef USE_NETCDF4
if(l_flag == L_C || l_flag == L_BINARY)
@ -2301,7 +2301,7 @@ YY_RULE_SETUP
YY_BREAK
case 37:
YY_RULE_SETUP
#line 343 "ncgen/ncgen.l"
#line 343 "ncgen.l"
{
bbClear(lextext);
bbAppendn(lextext,(char*)yytext,(size_t)yyleng+1); /* include null */
@ -2312,7 +2312,7 @@ YY_RULE_SETUP
YY_BREAK
case 38:
YY_RULE_SETUP
#line 352 "ncgen/ncgen.l"
#line 352 "ncgen.l"
{struct Specialtoken* st;
bbClear(lextext);
bbAppendn(lextext,(char*)yytext,(size_t)yyleng+1); /* include null */
@ -2326,7 +2326,7 @@ YY_RULE_SETUP
case 39:
/* rule 39 can match eol */
YY_RULE_SETUP
#line 362 "ncgen/ncgen.l"
#line 362 "ncgen.l"
{
char c;
char* p; char* q;
@ -2346,7 +2346,7 @@ YY_RULE_SETUP
YY_BREAK
case 40:
YY_RULE_SETUP
#line 379 "ncgen/ncgen.l"
#line 379 "ncgen.l"
{ char* id = NULL; size_t len;
len = strlen(yytext);
len = (size_t)unescape(yytext,len,ISIDENT,&id);
@ -2361,7 +2361,7 @@ YY_RULE_SETUP
YY_BREAK
case 41:
YY_RULE_SETUP
#line 391 "ncgen/ncgen.l"
#line 391 "ncgen.l"
{
/*
We need to try to see what size of integer ((u)int).
@ -2442,7 +2442,7 @@ done: return 0;
YY_BREAK
case 42:
YY_RULE_SETUP
#line 469 "ncgen/ncgen.l"
#line 469 "ncgen.l"
{
int c;
int token = 0;
@ -2494,7 +2494,7 @@ YY_RULE_SETUP
YY_BREAK
case 43:
YY_RULE_SETUP
#line 517 "ncgen/ncgen.l"
#line 517 "ncgen.l"
{
if (sscanf((char*)yytext, "%le", &double_val) != 1) {
snprintf(errstr, sizeof(errstr),"bad long or double constant: %s",(char*)yytext);
@ -2505,7 +2505,7 @@ YY_RULE_SETUP
YY_BREAK
case 44:
YY_RULE_SETUP
#line 524 "ncgen/ncgen.l"
#line 524 "ncgen.l"
{
if (sscanf((char*)yytext, "%e", &float_val) != 1) {
snprintf(errstr, sizeof(errstr),"bad float constant: %s",(char*)yytext);
@ -2517,7 +2517,7 @@ YY_RULE_SETUP
case 45:
/* rule 45 can match eol */
YY_RULE_SETUP
#line 531 "ncgen/ncgen.l"
#line 531 "ncgen.l"
{
(void) sscanf((char*)&yytext[1],"%c",&byte_val);
return lexdebug(BYTE_CONST);
@ -2525,7 +2525,7 @@ YY_RULE_SETUP
YY_BREAK
case 46:
YY_RULE_SETUP
#line 535 "ncgen/ncgen.l"
#line 535 "ncgen.l"
{
int oct = unescapeoct(&yytext[2]);
if(oct < 0) {
@ -2538,7 +2538,7 @@ YY_RULE_SETUP
YY_BREAK
case 47:
YY_RULE_SETUP
#line 544 "ncgen/ncgen.l"
#line 544 "ncgen.l"
{
int hex = unescapehex(&yytext[3]);
if(byte_val < 0) {
@ -2551,7 +2551,7 @@ YY_RULE_SETUP
YY_BREAK
case 48:
YY_RULE_SETUP
#line 553 "ncgen/ncgen.l"
#line 553 "ncgen.l"
{
switch ((char)yytext[2]) {
case 'a': byte_val = '\007'; break; /* not everyone under-
@ -2574,7 +2574,7 @@ YY_RULE_SETUP
case 49:
/* rule 49 can match eol */
YY_RULE_SETUP
#line 572 "ncgen/ncgen.l"
#line 572 "ncgen.l"
{
lineno++ ;
break;
@ -2582,7 +2582,7 @@ YY_RULE_SETUP
YY_BREAK
case 50:
YY_RULE_SETUP
#line 577 "ncgen/ncgen.l"
#line 577 "ncgen.l"
{/*initial*/
BEGIN(ST_C_COMMENT);
break;
@ -2591,21 +2591,21 @@ YY_RULE_SETUP
case 51:
/* rule 51 can match eol */
YY_RULE_SETUP
#line 582 "ncgen/ncgen.l"
#line 582 "ncgen.l"
{/* continuation */
break;
}
YY_BREAK
case 52:
YY_RULE_SETUP
#line 586 "ncgen/ncgen.l"
#line 586 "ncgen.l"
{/* final */
BEGIN(INITIAL);
break;
}
YY_BREAK
case YY_STATE_EOF(ST_C_COMMENT):
#line 591 "ncgen/ncgen.l"
#line 591 "ncgen.l"
{/* final, error */
fprintf(stderr,"unterminated /**/ comment");
BEGIN(INITIAL);
@ -2614,14 +2614,14 @@ case YY_STATE_EOF(ST_C_COMMENT):
YY_BREAK
case 53:
YY_RULE_SETUP
#line 597 "ncgen/ncgen.l"
#line 597 "ncgen.l"
{/* Note: this next rule will not work for UTF8 characters */
return lexdebug(yytext[0]) ;
}
YY_BREAK
case 54:
YY_RULE_SETUP
#line 600 "ncgen/ncgen.l"
#line 600 "ncgen.l"
ECHO;
YY_BREAK
#line 2627 "ncgenl.c"
@ -3630,7 +3630,7 @@ void yyfree (void * ptr )
#define YYTABLES_NAME "yytables"
#line 600 "ncgen/ncgen.l"
#line 600 "ncgen.l"
static int
lexdebug(int token)

View File

@ -74,7 +74,7 @@
#define yychar ncgchar
/* First part of user prologue. */
#line 11 "ncgen/ncgen.y"
#line 11 "ncgen.y"
/*
static char SccsId[] = "$Id: ncgen.y,v 1.42 2010/05/18 21:32:46 dmh Exp $";
@ -195,10 +195,10 @@ static Symbol* makespecial(int tag, Symbol* vsym, Symbol* tsym, void* data, int
static int containsfills(Datalist* list);
static void vercheck(int ncid);
static long long extractint(NCConstant* con);
static Symbol* identkeyword(const Symbol*);
#ifdef USE_NETCDF4
static int parsefilterflag(const char* sdata0, Specialdata* special);
static int parsecodecsflag(const char* sdata0, Specialdata* special);
static Symbol* identkeyword(const Symbol*);
#ifdef GENDEBUG1
static void printfilters(int nfilters, NC_H5_Filterspec** filters);
@ -1845,19 +1845,19 @@ yyreduce:
switch (yyn)
{
case 2: /* ncdesc: NETCDF datasetid rootgroup */
#line 246 "ncgen/ncgen.y"
#line 246 "ncgen.y"
{if (error_count > 0) YYABORT;}
#line 1851 "ncgeny.c"
break;
case 3: /* datasetid: DATASETID */
#line 249 "ncgen/ncgen.y"
#line 249 "ncgen.y"
{createrootgroup(datasetname);}
#line 1857 "ncgeny.c"
break;
case 8: /* $@1: %empty */
#line 268 "ncgen/ncgen.y"
#line 268 "ncgen.y"
{
Symbol* id = (yyvsp[-1].sym);
markcdf4("Group specification");
@ -1869,25 +1869,25 @@ yyreduce:
break;
case 9: /* $@2: %empty */
#line 277 "ncgen/ncgen.y"
#line 277 "ncgen.y"
{listpop(groupstack);}
#line 1875 "ncgeny.c"
break;
case 12: /* typesection: TYPES */
#line 283 "ncgen/ncgen.y"
#line 283 "ncgen.y"
{}
#line 1881 "ncgeny.c"
break;
case 13: /* typesection: TYPES typedecls */
#line 285 "ncgen/ncgen.y"
#line 285 "ncgen.y"
{markcdf4("Type specification");}
#line 1887 "ncgeny.c"
break;
case 16: /* typename: ident */
#line 291 "ncgen/ncgen.y"
#line 291 "ncgen.y"
{ /* Use when defining a type */
(yyvsp[0].sym)->objectclass = NC_TYPE;
if(dupobjectcheck(NC_TYPE,(yyvsp[0].sym)))
@ -1899,19 +1899,19 @@ yyreduce:
break;
case 17: /* type_or_attr_decl: typedecl */
#line 300 "ncgen/ncgen.y"
#line 300 "ncgen.y"
{}
#line 1905 "ncgeny.c"
break;
case 18: /* type_or_attr_decl: attrdecl ';' */
#line 300 "ncgen/ncgen.y"
#line 300 "ncgen.y"
{}
#line 1911 "ncgeny.c"
break;
case 25: /* enumdecl: primtype ENUM typename '{' enumidlist '}' */
#line 314 "ncgen/ncgen.y"
#line 314 "ncgen.y"
{
size_t i;
addtogroup((yyvsp[-3].sym)); /* sets prefix*/
@ -1942,13 +1942,13 @@ yyreduce:
break;
case 26: /* enumidlist: enumid */
#line 343 "ncgen/ncgen.y"
#line 343 "ncgen.y"
{(yyval.mark)=listlength(stack); listpush(stack,(void*)(yyvsp[0].sym));}
#line 1948 "ncgeny.c"
break;
case 27: /* enumidlist: enumidlist ',' enumid */
#line 345 "ncgen/ncgen.y"
#line 345 "ncgen.y"
{
size_t i;
(yyval.mark)=(yyvsp[-2].mark);
@ -1967,7 +1967,7 @@ yyreduce:
break;
case 28: /* enumid: ident '=' constint */
#line 362 "ncgen/ncgen.y"
#line 362 "ncgen.y"
{
(yyvsp[-2].sym)->objectclass=NC_TYPE;
(yyvsp[-2].sym)->subclass=NC_ECONST;
@ -1978,7 +1978,7 @@ yyreduce:
break;
case 29: /* opaquedecl: OPAQUE_ '(' INT_CONST ')' typename */
#line 371 "ncgen/ncgen.y"
#line 371 "ncgen.y"
{
vercheck(NC_OPAQUE);
addtogroup((yyvsp[0].sym)); /*sets prefix*/
@ -1992,7 +1992,7 @@ yyreduce:
break;
case 30: /* vlendecl: typeref '(' '*' ')' typename */
#line 383 "ncgen/ncgen.y"
#line 383 "ncgen.y"
{
Symbol* basetype = (yyvsp[-4].sym);
vercheck(NC_VLEN);
@ -2008,7 +2008,7 @@ yyreduce:
break;
case 31: /* compounddecl: COMPOUND typename '{' fields '}' */
#line 397 "ncgen/ncgen.y"
#line 397 "ncgen.y"
{
size_t i,j;
vercheck(NC_COMPOUND);
@ -2042,19 +2042,19 @@ yyreduce:
break;
case 32: /* fields: field ';' */
#line 429 "ncgen/ncgen.y"
#line 429 "ncgen.y"
{(yyval.mark)=(yyvsp[-1].mark);}
#line 2048 "ncgeny.c"
break;
case 33: /* fields: fields field ';' */
#line 430 "ncgen/ncgen.y"
#line 430 "ncgen.y"
{(yyval.mark)=(yyvsp[-2].mark);}
#line 2054 "ncgeny.c"
break;
case 34: /* field: typeref fieldlist */
#line 434 "ncgen/ncgen.y"
#line 434 "ncgen.y"
{
size_t i;
(yyval.mark)=(yyvsp[0].mark);
@ -2070,103 +2070,103 @@ yyreduce:
break;
case 35: /* primtype: CHAR_K */
#line 447 "ncgen/ncgen.y"
#line 447 "ncgen.y"
{ (yyval.sym) = primsymbols[NC_CHAR]; }
#line 2076 "ncgeny.c"
break;
case 36: /* primtype: BYTE_K */
#line 448 "ncgen/ncgen.y"
#line 448 "ncgen.y"
{ (yyval.sym) = primsymbols[NC_BYTE]; }
#line 2082 "ncgeny.c"
break;
case 37: /* primtype: SHORT_K */
#line 449 "ncgen/ncgen.y"
#line 449 "ncgen.y"
{ (yyval.sym) = primsymbols[NC_SHORT]; }
#line 2088 "ncgeny.c"
break;
case 38: /* primtype: INT_K */
#line 450 "ncgen/ncgen.y"
#line 450 "ncgen.y"
{ (yyval.sym) = primsymbols[NC_INT]; }
#line 2094 "ncgeny.c"
break;
case 39: /* primtype: FLOAT_K */
#line 451 "ncgen/ncgen.y"
#line 451 "ncgen.y"
{ (yyval.sym) = primsymbols[NC_FLOAT]; }
#line 2100 "ncgeny.c"
break;
case 40: /* primtype: DOUBLE_K */
#line 452 "ncgen/ncgen.y"
#line 452 "ncgen.y"
{ (yyval.sym) = primsymbols[NC_DOUBLE]; }
#line 2106 "ncgeny.c"
break;
case 41: /* primtype: UBYTE_K */
#line 453 "ncgen/ncgen.y"
#line 453 "ncgen.y"
{ vercheck(NC_UBYTE); (yyval.sym) = primsymbols[NC_UBYTE]; }
#line 2112 "ncgeny.c"
break;
case 42: /* primtype: USHORT_K */
#line 454 "ncgen/ncgen.y"
#line 454 "ncgen.y"
{ vercheck(NC_USHORT); (yyval.sym) = primsymbols[NC_USHORT]; }
#line 2118 "ncgeny.c"
break;
case 43: /* primtype: UINT_K */
#line 455 "ncgen/ncgen.y"
#line 455 "ncgen.y"
{ vercheck(NC_UINT); (yyval.sym) = primsymbols[NC_UINT]; }
#line 2124 "ncgeny.c"
break;
case 44: /* primtype: INT64_K */
#line 456 "ncgen/ncgen.y"
#line 456 "ncgen.y"
{ vercheck(NC_INT64); (yyval.sym) = primsymbols[NC_INT64]; }
#line 2130 "ncgeny.c"
break;
case 45: /* primtype: UINT64_K */
#line 457 "ncgen/ncgen.y"
#line 457 "ncgen.y"
{ vercheck(NC_UINT64); (yyval.sym) = primsymbols[NC_UINT64]; }
#line 2136 "ncgeny.c"
break;
case 46: /* primtype: STRING_K */
#line 458 "ncgen/ncgen.y"
#line 458 "ncgen.y"
{ vercheck(NC_STRING); (yyval.sym) = primsymbols[NC_STRING]; }
#line 2142 "ncgeny.c"
break;
case 48: /* dimsection: DIMENSIONS */
#line 462 "ncgen/ncgen.y"
#line 462 "ncgen.y"
{}
#line 2148 "ncgeny.c"
break;
case 49: /* dimsection: DIMENSIONS dimdecls */
#line 463 "ncgen/ncgen.y"
#line 463 "ncgen.y"
{}
#line 2154 "ncgeny.c"
break;
case 52: /* dim_or_attr_decl: dimdeclist */
#line 470 "ncgen/ncgen.y"
#line 470 "ncgen.y"
{}
#line 2160 "ncgeny.c"
break;
case 53: /* dim_or_attr_decl: attrdecl */
#line 470 "ncgen/ncgen.y"
#line 470 "ncgen.y"
{}
#line 2166 "ncgeny.c"
break;
case 56: /* dimdecl: dimd '=' constint */
#line 478 "ncgen/ncgen.y"
#line 478 "ncgen.y"
{
(yyvsp[-2].sym)->dim.declsize = (size_t)extractint((yyvsp[0].constant));
#ifdef GENDEBUG1
@ -2178,7 +2178,7 @@ fprintf(stderr,"dimension: %s = %llu\n",(yyvsp[-2].sym)->name,(unsigned long lon
break;
case 57: /* dimdecl: dimd '=' NC_UNLIMITED_K */
#line 486 "ncgen/ncgen.y"
#line 486 "ncgen.y"
{
(yyvsp[-2].sym)->dim.declsize = NC_UNLIMITED;
(yyvsp[-2].sym)->dim.isunlimited = 1;
@ -2190,7 +2190,7 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 58: /* dimd: ident */
#line 496 "ncgen/ncgen.y"
#line 496 "ncgen.y"
{
(yyvsp[0].sym)->objectclass=NC_DIM;
if(dupobjectcheck(NC_DIM,(yyvsp[0].sym)))
@ -2204,31 +2204,31 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 60: /* vasection: VARIABLES */
#line 508 "ncgen/ncgen.y"
#line 508 "ncgen.y"
{}
#line 2210 "ncgeny.c"
break;
case 61: /* vasection: VARIABLES vadecls */
#line 509 "ncgen/ncgen.y"
#line 509 "ncgen.y"
{}
#line 2216 "ncgeny.c"
break;
case 64: /* vadecl_or_attr: vardecl */
#line 516 "ncgen/ncgen.y"
#line 516 "ncgen.y"
{}
#line 2222 "ncgeny.c"
break;
case 65: /* vadecl_or_attr: attrdecl */
#line 516 "ncgen/ncgen.y"
#line 516 "ncgen.y"
{}
#line 2228 "ncgeny.c"
break;
case 66: /* vardecl: typeref varlist */
#line 519 "ncgen/ncgen.y"
#line 519 "ncgen.y"
{
size_t i;
stackbase=(yyvsp[0].mark);
@ -2252,7 +2252,7 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 67: /* varlist: varspec */
#line 541 "ncgen/ncgen.y"
#line 541 "ncgen.y"
{(yyval.mark)=listlength(stack);
listpush(stack,(void*)(yyvsp[0].sym));
}
@ -2260,13 +2260,13 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 68: /* varlist: varlist ',' varspec */
#line 545 "ncgen/ncgen.y"
#line 545 "ncgen.y"
{(yyval.mark)=(yyvsp[-2].mark); listpush(stack,(void*)(yyvsp[0].sym));}
#line 2266 "ncgeny.c"
break;
case 69: /* varspec: varident dimspec */
#line 549 "ncgen/ncgen.y"
#line 549 "ncgen.y"
{
size_t i;
Dimset dimset;
@ -2297,31 +2297,31 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 70: /* dimspec: %empty */
#line 577 "ncgen/ncgen.y"
#line 577 "ncgen.y"
{(yyval.mark)=listlength(stack);}
#line 2303 "ncgeny.c"
break;
case 71: /* dimspec: '(' dimlist ')' */
#line 578 "ncgen/ncgen.y"
#line 578 "ncgen.y"
{(yyval.mark)=(yyvsp[-1].mark);}
#line 2309 "ncgeny.c"
break;
case 72: /* dimlist: dimref */
#line 581 "ncgen/ncgen.y"
#line 581 "ncgen.y"
{(yyval.mark)=listlength(stack); listpush(stack,(void*)(yyvsp[0].sym));}
#line 2315 "ncgeny.c"
break;
case 73: /* dimlist: dimlist ',' dimref */
#line 583 "ncgen/ncgen.y"
#line 583 "ncgen.y"
{(yyval.mark)=(yyvsp[-2].mark); listpush(stack,(void*)(yyvsp[0].sym));}
#line 2321 "ncgeny.c"
break;
case 74: /* dimref: path */
#line 587 "ncgen/ncgen.y"
#line 587 "ncgen.y"
{Symbol* dimsym = (yyvsp[0].sym);
dimsym->objectclass = NC_DIM;
/* Find the actual dimension*/
@ -2336,7 +2336,7 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 75: /* fieldlist: fieldspec */
#line 601 "ncgen/ncgen.y"
#line 601 "ncgen.y"
{(yyval.mark)=listlength(stack);
listpush(stack,(void*)(yyvsp[0].sym));
}
@ -2344,13 +2344,13 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 76: /* fieldlist: fieldlist ',' fieldspec */
#line 605 "ncgen/ncgen.y"
#line 605 "ncgen.y"
{(yyval.mark)=(yyvsp[-2].mark); listpush(stack,(void*)(yyvsp[0].sym));}
#line 2350 "ncgeny.c"
break;
case 77: /* fieldspec: ident fielddimspec */
#line 610 "ncgen/ncgen.y"
#line 610 "ncgen.y"
{
size_t i;
Dimset dimset;
@ -2381,31 +2381,31 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 78: /* fielddimspec: %empty */
#line 638 "ncgen/ncgen.y"
#line 638 "ncgen.y"
{(yyval.mark)=listlength(stack);}
#line 2387 "ncgeny.c"
break;
case 79: /* fielddimspec: '(' fielddimlist ')' */
#line 639 "ncgen/ncgen.y"
#line 639 "ncgen.y"
{(yyval.mark)=(yyvsp[-1].mark);}
#line 2393 "ncgeny.c"
break;
case 80: /* fielddimlist: fielddim */
#line 643 "ncgen/ncgen.y"
#line 643 "ncgen.y"
{(yyval.mark)=listlength(stack); listpush(stack,(void*)(yyvsp[0].sym));}
#line 2399 "ncgeny.c"
break;
case 81: /* fielddimlist: fielddimlist ',' fielddim */
#line 645 "ncgen/ncgen.y"
#line 645 "ncgen.y"
{(yyval.mark)=(yyvsp[-2].mark); listpush(stack,(void*)(yyvsp[0].sym));}
#line 2405 "ncgeny.c"
break;
case 82: /* fielddim: UINT_CONST */
#line 650 "ncgen/ncgen.y"
#line 650 "ncgen.y"
{ /* Anonymous integer dimension.
Can only occur in type definitions*/
char anon[32];
@ -2419,7 +2419,7 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 83: /* fielddim: INT_CONST */
#line 660 "ncgen/ncgen.y"
#line 660 "ncgen.y"
{ /* Anonymous integer dimension.
Can only occur in type definitions*/
char anon[32];
@ -2437,7 +2437,7 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 84: /* varref: ambiguous_ref */
#line 680 "ncgen/ncgen.y"
#line 680 "ncgen.y"
{Symbol* vsym = (yyvsp[0].sym);
if(vsym->objectclass != NC_VAR) {
derror("Undefined or forward referenced variable: %s",vsym->name);
@ -2449,7 +2449,7 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 85: /* typeref: ambiguous_ref */
#line 691 "ncgen/ncgen.y"
#line 691 "ncgen.y"
{Symbol* tsym = (yyvsp[0].sym);
if(tsym->objectclass != NC_TYPE) {
derror("Undefined or forward referenced type: %s",tsym->name);
@ -2461,7 +2461,7 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 86: /* ambiguous_ref: path */
#line 702 "ncgen/ncgen.y"
#line 702 "ncgen.y"
{Symbol* tvsym = (yyvsp[0].sym); Symbol* sym;
/* disambiguate*/
tvsym->objectclass = NC_VAR;
@ -2484,49 +2484,49 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 87: /* ambiguous_ref: primtype */
#line 720 "ncgen/ncgen.y"
#line 720 "ncgen.y"
{(yyval.sym)=(yyvsp[0].sym);}
#line 2490 "ncgeny.c"
break;
case 88: /* attrdecllist: %empty */
#line 727 "ncgen/ncgen.y"
#line 727 "ncgen.y"
{}
#line 2496 "ncgeny.c"
break;
case 89: /* attrdecllist: attrdecl ';' attrdecllist */
#line 727 "ncgen/ncgen.y"
#line 727 "ncgen.y"
{}
#line 2502 "ncgeny.c"
break;
case 90: /* attrdecl: ':' _NCPROPS '=' conststring */
#line 731 "ncgen/ncgen.y"
#line 731 "ncgen.y"
{(yyval.sym) = makespecial(_NCPROPS_FLAG,NULL,NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2508 "ncgeny.c"
break;
case 91: /* attrdecl: ':' _ISNETCDF4 '=' constbool */
#line 733 "ncgen/ncgen.y"
#line 733 "ncgen.y"
{(yyval.sym) = makespecial(_ISNETCDF4_FLAG,NULL,NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2514 "ncgeny.c"
break;
case 92: /* attrdecl: ':' _SUPERBLOCK '=' constint */
#line 735 "ncgen/ncgen.y"
#line 735 "ncgen.y"
{(yyval.sym) = makespecial(_SUPERBLOCK_FLAG,NULL,NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2520 "ncgeny.c"
break;
case 93: /* attrdecl: ':' ident '=' datalist */
#line 737 "ncgen/ncgen.y"
#line 737 "ncgen.y"
{ (yyval.sym)=makeattribute((yyvsp[-2].sym),NULL,NULL,(yyvsp[0].datalist),ATTRGLOBAL);}
#line 2526 "ncgeny.c"
break;
case 94: /* attrdecl: typeref ambiguous_ref ':' ident '=' datalist */
#line 739 "ncgen/ncgen.y"
#line 739 "ncgen.y"
{Symbol* tsym = (yyvsp[-5].sym); Symbol* vsym = (yyvsp[-4].sym); Symbol* asym = (yyvsp[-2].sym);
if(vsym->objectclass == NC_VAR) {
(yyval.sym)=makeattribute(asym,vsym,tsym,(yyvsp[0].datalist),ATTRVAR);
@ -2539,7 +2539,7 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 95: /* attrdecl: ambiguous_ref ':' ident '=' datalist */
#line 748 "ncgen/ncgen.y"
#line 748 "ncgen.y"
{Symbol* sym = (yyvsp[-4].sym); Symbol* asym = (yyvsp[-2].sym);
if(sym->objectclass == NC_VAR) {
(yyval.sym)=makeattribute(asym,sym,NULL,(yyvsp[0].datalist),ATTRVAR);
@ -2554,97 +2554,97 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 96: /* attrdecl: ambiguous_ref ':' _FILLVALUE '=' datalist */
#line 759 "ncgen/ncgen.y"
#line 759 "ncgen.y"
{(yyval.sym) = makespecial(_FILLVALUE_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].datalist),ISLIST);}
#line 2560 "ncgeny.c"
break;
case 97: /* attrdecl: typeref ambiguous_ref ':' _FILLVALUE '=' datalist */
#line 761 "ncgen/ncgen.y"
#line 761 "ncgen.y"
{(yyval.sym) = makespecial(_FILLVALUE_FLAG,(yyvsp[-4].sym),(yyvsp[-5].sym),(void*)(yyvsp[0].datalist),ISLIST);}
#line 2566 "ncgeny.c"
break;
case 98: /* attrdecl: ambiguous_ref ':' _STORAGE '=' conststring */
#line 763 "ncgen/ncgen.y"
#line 763 "ncgen.y"
{(yyval.sym) = makespecial(_STORAGE_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2572 "ncgeny.c"
break;
case 99: /* attrdecl: ambiguous_ref ':' _CHUNKSIZES '=' intlist */
#line 765 "ncgen/ncgen.y"
#line 765 "ncgen.y"
{(yyval.sym) = makespecial(_CHUNKSIZES_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].datalist),ISLIST);}
#line 2578 "ncgeny.c"
break;
case 100: /* attrdecl: ambiguous_ref ':' _FLETCHER32 '=' constbool */
#line 767 "ncgen/ncgen.y"
#line 767 "ncgen.y"
{(yyval.sym) = makespecial(_FLETCHER32_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2584 "ncgeny.c"
break;
case 101: /* attrdecl: ambiguous_ref ':' _DEFLATELEVEL '=' constint */
#line 769 "ncgen/ncgen.y"
#line 769 "ncgen.y"
{(yyval.sym) = makespecial(_DEFLATE_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2590 "ncgeny.c"
break;
case 102: /* attrdecl: ambiguous_ref ':' _SHUFFLE '=' constbool */
#line 771 "ncgen/ncgen.y"
#line 771 "ncgen.y"
{(yyval.sym) = makespecial(_SHUFFLE_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2596 "ncgeny.c"
break;
case 103: /* attrdecl: ambiguous_ref ':' _ENDIANNESS '=' conststring */
#line 773 "ncgen/ncgen.y"
#line 773 "ncgen.y"
{(yyval.sym) = makespecial(_ENDIAN_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2602 "ncgeny.c"
break;
case 104: /* attrdecl: ambiguous_ref ':' _FILTER '=' conststring */
#line 775 "ncgen/ncgen.y"
#line 775 "ncgen.y"
{(yyval.sym) = makespecial(_FILTER_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2608 "ncgeny.c"
break;
case 105: /* attrdecl: ambiguous_ref ':' _CODECS '=' conststring */
#line 777 "ncgen/ncgen.y"
#line 777 "ncgen.y"
{(yyval.sym) = makespecial(_CODECS_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2614 "ncgeny.c"
break;
case 106: /* attrdecl: ambiguous_ref ':' _QUANTIZEBG '=' constint */
#line 779 "ncgen/ncgen.y"
#line 779 "ncgen.y"
{(yyval.sym) = makespecial(_QUANTIZEBG_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2620 "ncgeny.c"
break;
case 107: /* attrdecl: ambiguous_ref ':' _QUANTIZEGBR '=' constint */
#line 781 "ncgen/ncgen.y"
#line 781 "ncgen.y"
{(yyval.sym) = makespecial(_QUANTIZEGBR_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2626 "ncgeny.c"
break;
case 108: /* attrdecl: ambiguous_ref ':' _QUANTIZEBR '=' constint */
#line 783 "ncgen/ncgen.y"
#line 783 "ncgen.y"
{(yyval.sym) = makespecial(_QUANTIZEBR_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2632 "ncgeny.c"
break;
case 109: /* attrdecl: ambiguous_ref ':' _NOFILL '=' constbool */
#line 785 "ncgen/ncgen.y"
#line 785 "ncgen.y"
{(yyval.sym) = makespecial(_NOFILL_FLAG,(yyvsp[-4].sym),NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2638 "ncgeny.c"
break;
case 110: /* attrdecl: ':' _FORMAT '=' conststring */
#line 787 "ncgen/ncgen.y"
#line 787 "ncgen.y"
{(yyval.sym) = makespecial(_FORMAT_FLAG,NULL,NULL,(void*)(yyvsp[0].constant),ISCONST);}
#line 2644 "ncgeny.c"
break;
case 111: /* path: ident */
#line 792 "ncgen/ncgen.y"
#line 792 "ncgen.y"
{
(yyval.sym)=(yyvsp[0].sym);
(yyvsp[0].sym)->ref.is_ref=1;
@ -2655,7 +2655,7 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 112: /* path: PATH */
#line 799 "ncgen/ncgen.y"
#line 799 "ncgen.y"
{
(yyval.sym)=(yyvsp[0].sym);
(yyvsp[0].sym)->ref.is_ref=1;
@ -2666,259 +2666,259 @@ fprintf(stderr,"dimension: %s = UNLIMITED\n",(yyvsp[-2].sym)->name);
break;
case 114: /* datasection: DATA */
#line 808 "ncgen/ncgen.y"
#line 808 "ncgen.y"
{}
#line 2672 "ncgeny.c"
break;
case 115: /* datasection: DATA datadecls */
#line 809 "ncgen/ncgen.y"
#line 809 "ncgen.y"
{}
#line 2678 "ncgeny.c"
break;
case 118: /* datadecl: varref '=' datalist */
#line 817 "ncgen/ncgen.y"
#line 817 "ncgen.y"
{(yyvsp[-2].sym)->data = (yyvsp[0].datalist);}
#line 2684 "ncgeny.c"
break;
case 119: /* datalist: datalist0 */
#line 820 "ncgen/ncgen.y"
#line 820 "ncgen.y"
{(yyval.datalist) = (yyvsp[0].datalist);}
#line 2690 "ncgeny.c"
break;
case 120: /* datalist: datalist1 */
#line 821 "ncgen/ncgen.y"
#line 821 "ncgen.y"
{(yyval.datalist) = (yyvsp[0].datalist);}
#line 2696 "ncgeny.c"
break;
case 121: /* datalist0: %empty */
#line 825 "ncgen/ncgen.y"
#line 825 "ncgen.y"
{(yyval.datalist) = builddatalist(0);}
#line 2702 "ncgeny.c"
break;
case 122: /* datalist1: dataitem */
#line 829 "ncgen/ncgen.y"
#line 829 "ncgen.y"
{(yyval.datalist) = const2list((yyvsp[0].constant));}
#line 2708 "ncgeny.c"
break;
case 123: /* datalist1: datalist ',' dataitem */
#line 831 "ncgen/ncgen.y"
#line 831 "ncgen.y"
{dlappend((yyvsp[-2].datalist),((yyvsp[0].constant))); (yyval.datalist)=(yyvsp[-2].datalist); }
#line 2714 "ncgeny.c"
break;
case 124: /* dataitem: constdata */
#line 835 "ncgen/ncgen.y"
#line 835 "ncgen.y"
{(yyval.constant)=(yyvsp[0].constant);}
#line 2720 "ncgeny.c"
break;
case 125: /* dataitem: '{' datalist '}' */
#line 836 "ncgen/ncgen.y"
#line 836 "ncgen.y"
{(yyval.constant)=builddatasublist((yyvsp[-1].datalist));}
#line 2726 "ncgeny.c"
break;
case 126: /* constdata: simpleconstant */
#line 840 "ncgen/ncgen.y"
#line 840 "ncgen.y"
{(yyval.constant)=(yyvsp[0].constant);}
#line 2732 "ncgeny.c"
break;
case 127: /* constdata: OPAQUESTRING */
#line 841 "ncgen/ncgen.y"
#line 841 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_OPAQUE);}
#line 2738 "ncgeny.c"
break;
case 128: /* constdata: FILLMARKER */
#line 842 "ncgen/ncgen.y"
#line 842 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_FILLVALUE);}
#line 2744 "ncgeny.c"
break;
case 129: /* constdata: NIL */
#line 843 "ncgen/ncgen.y"
#line 843 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_NIL);}
#line 2750 "ncgeny.c"
break;
case 130: /* constdata: econstref */
#line 844 "ncgen/ncgen.y"
#line 844 "ncgen.y"
{(yyval.constant)=(yyvsp[0].constant);}
#line 2756 "ncgeny.c"
break;
case 132: /* econstref: path */
#line 849 "ncgen/ncgen.y"
#line 849 "ncgen.y"
{(yyval.constant) = makeenumconstref((yyvsp[0].sym));}
#line 2762 "ncgeny.c"
break;
case 133: /* function: ident '(' arglist ')' */
#line 853 "ncgen/ncgen.y"
#line 853 "ncgen.y"
{(yyval.constant)=evaluate((yyvsp[-3].sym),(yyvsp[-1].datalist));}
#line 2768 "ncgeny.c"
break;
case 134: /* arglist: simpleconstant */
#line 858 "ncgen/ncgen.y"
#line 858 "ncgen.y"
{(yyval.datalist) = const2list((yyvsp[0].constant));}
#line 2774 "ncgeny.c"
break;
case 135: /* arglist: arglist ',' simpleconstant */
#line 860 "ncgen/ncgen.y"
#line 860 "ncgen.y"
{dlappend((yyvsp[-2].datalist),((yyvsp[0].constant))); (yyval.datalist)=(yyvsp[-2].datalist);}
#line 2780 "ncgeny.c"
break;
case 136: /* simpleconstant: CHAR_CONST */
#line 864 "ncgen/ncgen.y"
#line 864 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_CHAR);}
#line 2786 "ncgeny.c"
break;
case 137: /* simpleconstant: BYTE_CONST */
#line 865 "ncgen/ncgen.y"
#line 865 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_BYTE);}
#line 2792 "ncgeny.c"
break;
case 138: /* simpleconstant: SHORT_CONST */
#line 866 "ncgen/ncgen.y"
#line 866 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_SHORT);}
#line 2798 "ncgeny.c"
break;
case 139: /* simpleconstant: INT_CONST */
#line 867 "ncgen/ncgen.y"
#line 867 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_INT);}
#line 2804 "ncgeny.c"
break;
case 140: /* simpleconstant: INT64_CONST */
#line 868 "ncgen/ncgen.y"
#line 868 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_INT64);}
#line 2810 "ncgeny.c"
break;
case 141: /* simpleconstant: UBYTE_CONST */
#line 869 "ncgen/ncgen.y"
#line 869 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_UBYTE);}
#line 2816 "ncgeny.c"
break;
case 142: /* simpleconstant: USHORT_CONST */
#line 870 "ncgen/ncgen.y"
#line 870 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_USHORT);}
#line 2822 "ncgeny.c"
break;
case 143: /* simpleconstant: UINT_CONST */
#line 871 "ncgen/ncgen.y"
#line 871 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_UINT);}
#line 2828 "ncgeny.c"
break;
case 144: /* simpleconstant: UINT64_CONST */
#line 872 "ncgen/ncgen.y"
#line 872 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_UINT64);}
#line 2834 "ncgeny.c"
break;
case 145: /* simpleconstant: FLOAT_CONST */
#line 873 "ncgen/ncgen.y"
#line 873 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_FLOAT);}
#line 2840 "ncgeny.c"
break;
case 146: /* simpleconstant: DOUBLE_CONST */
#line 874 "ncgen/ncgen.y"
#line 874 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_DOUBLE);}
#line 2846 "ncgeny.c"
break;
case 147: /* simpleconstant: TERMSTRING */
#line 875 "ncgen/ncgen.y"
#line 875 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_STRING);}
#line 2852 "ncgeny.c"
break;
case 148: /* intlist: constint */
#line 879 "ncgen/ncgen.y"
#line 879 "ncgen.y"
{(yyval.datalist) = const2list((yyvsp[0].constant));}
#line 2858 "ncgeny.c"
break;
case 149: /* intlist: intlist ',' constint */
#line 880 "ncgen/ncgen.y"
#line 880 "ncgen.y"
{(yyval.datalist)=(yyvsp[-2].datalist); dlappend((yyvsp[-2].datalist),((yyvsp[0].constant)));}
#line 2864 "ncgeny.c"
break;
case 150: /* constint: INT_CONST */
#line 885 "ncgen/ncgen.y"
#line 885 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_INT);}
#line 2870 "ncgeny.c"
break;
case 151: /* constint: UINT_CONST */
#line 887 "ncgen/ncgen.y"
#line 887 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_UINT);}
#line 2876 "ncgeny.c"
break;
case 152: /* constint: INT64_CONST */
#line 889 "ncgen/ncgen.y"
#line 889 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_INT64);}
#line 2882 "ncgeny.c"
break;
case 153: /* constint: UINT64_CONST */
#line 891 "ncgen/ncgen.y"
#line 891 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_UINT64);}
#line 2888 "ncgeny.c"
break;
case 154: /* conststring: TERMSTRING */
#line 895 "ncgen/ncgen.y"
#line 895 "ncgen.y"
{(yyval.constant)=makeconstdata(NC_STRING);}
#line 2894 "ncgeny.c"
break;
case 155: /* constbool: conststring */
#line 899 "ncgen/ncgen.y"
#line 899 "ncgen.y"
{(yyval.constant)=(yyvsp[0].constant);}
#line 2900 "ncgeny.c"
break;
case 156: /* constbool: constint */
#line 900 "ncgen/ncgen.y"
#line 900 "ncgen.y"
{(yyval.constant)=(yyvsp[0].constant);}
#line 2906 "ncgeny.c"
break;
case 157: /* varident: IDENT */
#line 908 "ncgen/ncgen.y"
#line 908 "ncgen.y"
{(yyval.sym)=(yyvsp[0].sym);}
#line 2912 "ncgeny.c"
break;
case 158: /* varident: DATA */
#line 909 "ncgen/ncgen.y"
#line 909 "ncgen.y"
{(yyval.sym)=identkeyword((yyvsp[0].sym));}
#line 2918 "ncgeny.c"
break;
case 159: /* ident: IDENT */
#line 913 "ncgen/ncgen.y"
#line 913 "ncgen.y"
{(yyval.sym)=(yyvsp[0].sym);}
#line 2924 "ncgeny.c"
break;
@ -3148,7 +3148,7 @@ yyreturnlab:
return yyresult;
}
#line 916 "ncgen/ncgen.y"
#line 916 "ncgen.y"
#ifndef NO_STDARG
@ -3403,11 +3403,11 @@ truefalse(NCConstant* con, int tag)
{
if(con->nctype == NC_STRING) {
char* sdata = con->value.stringv.stringv;
if(strcmp(sdata,"false") == 0
|| strcmp(sdata,"0") == 0)
if(strncmp(sdata,"false",NC_MAX_NAME) == 0
|| strncmp(sdata,"0",NC_MAX_NAME) == 0)
return 0;
else if(strcmp(sdata,"true") == 0
|| strcmp(sdata,"1") == 0)
else if(strncmp(sdata,"true",NC_MAX_NAME) == 0
|| strncmp(sdata,"1",NC_MAX_NAME) == 0)
return 1;
else goto fail;
} else if(con->value.int32v < 0 || con->value.int32v > 1)

View File

@ -119,7 +119,7 @@ extern int ncgdebug;
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
union YYSTYPE
{
#line 156 "ncgen/ncgen.y"
#line 156 "ncgen.y"
Symbol* sym;
unsigned long size; /* allow for zero size to indicate e.g. UNLIMITED*/

View File

@ -802,13 +802,13 @@ void equalatt(void)
/* shrink space down to what was really needed */
att_space = erealloc(att_space, valnum*nctypesize(valtype));
atts[natts].val = att_space;
if (STREQ(atts[natts].name, _FillValue) &&
if (STREQ(atts[natts].name, NC_FillValue) &&
atts[natts].var != NC_GLOBAL) {
nc_putfill(atts[natts].type,atts[natts].val,
&vars[atts[natts].var].fill_value);
if(atts[natts].type != vars[atts[natts].var].type) {
derror("variable %s: %s type mismatch",
vars[atts[natts].var].name, _FillValue);
vars[atts[natts].var].name, NC_FillValue);
}
}
natts++;

View File

@ -20,7 +20,7 @@
#define yy_flex_debug ncg_flex_debug
#define yyin ncgin
#define yyleng ncgleng
#define yyncgenllex
#define yylex ncglex
#define yylineno ncglineno
#define yyout ncgout
#define yyrestart ncgrestart
@ -113,7 +113,7 @@
#ifdef yylex
#define ncglex_ALREADY_DEFINED
#else
#define yyncgenllex
#define yylex ncglex
#endif
#ifdef yyrestart
@ -1053,8 +1053,8 @@ int yy_flex_debug = 0;
#define YY_MORE_ADJ 0
#define YY_RESTORE_YY_MORE_OFFSET
char *yytext;
#line 1 "ncgen3/ncgen.l"
#line 2 "ncgen3/ncgen.l"
#line 1 "ncgen.l"
#line 2 "ncgen.l"
/*********************************************************************
* Copyright 2018, UCAR/Unidata
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
@ -1359,7 +1359,7 @@ YY_DECL
}
{
#line 107 "ncgen3/ncgen.l"
#line 107 "ncgen.l"
#line 1364 "ncgenl.c"
@ -1420,7 +1420,7 @@ do_action: /* This label is used only to access EOF actions. */
case 1:
YY_RULE_SETUP
#line 108 "ncgen3/ncgen.l"
#line 108 "ncgen.l"
{ /* comment */
break;
}
@ -1428,7 +1428,7 @@ YY_RULE_SETUP
case 2:
/* rule 2 can match eol */
YY_RULE_SETUP
#line 112 "ncgen3/ncgen.l"
#line 112 "ncgen.l"
{
if(yyleng > MAXTRST) {
yyerror("string too long, truncated\n");
@ -1440,59 +1440,59 @@ YY_RULE_SETUP
YY_BREAK
case 3:
YY_RULE_SETUP
#line 121 "ncgen3/ncgen.l"
#line 121 "ncgen.l"
{return (FLOAT_K);}
YY_BREAK
case 4:
YY_RULE_SETUP
#line 122 "ncgen3/ncgen.l"
#line 122 "ncgen.l"
{return (CHAR_K);}
YY_BREAK
case 5:
YY_RULE_SETUP
#line 123 "ncgen3/ncgen.l"
#line 123 "ncgen.l"
{return (BYTE_K);}
YY_BREAK
case 6:
YY_RULE_SETUP
#line 124 "ncgen3/ncgen.l"
#line 124 "ncgen.l"
{return (SHORT_K);}
YY_BREAK
case 7:
YY_RULE_SETUP
#line 125 "ncgen3/ncgen.l"
#line 125 "ncgen.l"
{return (INT_K);}
YY_BREAK
case 8:
YY_RULE_SETUP
#line 126 "ncgen3/ncgen.l"
#line 126 "ncgen.l"
{return (DOUBLE_K);}
YY_BREAK
case 9:
YY_RULE_SETUP
#line 127 "ncgen3/ncgen.l"
#line 127 "ncgen.l"
{int_val = -1;
return (NC_UNLIMITED_K);}
YY_BREAK
case 10:
YY_RULE_SETUP
#line 130 "ncgen3/ncgen.l"
#line 130 "ncgen.l"
{return (DIMENSIONS);}
YY_BREAK
case 11:
YY_RULE_SETUP
#line 131 "ncgen3/ncgen.l"
#line 131 "ncgen.l"
{return (VARIABLES);}
YY_BREAK
case 12:
YY_RULE_SETUP
#line 132 "ncgen3/ncgen.l"
#line 132 "ncgen.l"
{return (DATA);}
YY_BREAK
case 13:
/* rule 13 can match eol */
YY_RULE_SETUP
#line 133 "ncgen3/ncgen.l"
#line 133 "ncgen.l"
{
char *s = (char*)yytext+strlen("netcdf");
char *t = (char*)yytext+yyleng-1;
@ -1514,7 +1514,7 @@ YY_RULE_SETUP
YY_BREAK
case 14:
YY_RULE_SETUP
#line 151 "ncgen3/ncgen.l"
#line 151 "ncgen.l"
{ /* missing value (pre-2.4 backward compatibility) */
if (yytext[0] == '-') {
double_val = -NC_FILL_DOUBLE;
@ -1526,7 +1526,7 @@ YY_RULE_SETUP
YY_BREAK
case 15:
YY_RULE_SETUP
#line 159 "ncgen3/ncgen.l"
#line 159 "ncgen.l"
{ /* missing value (pre-2.4 backward compatibility) */
if (yytext[0] == '-') {
float_val = -NC_FILL_FLOAT;
@ -1538,7 +1538,7 @@ YY_RULE_SETUP
YY_BREAK
case 16:
YY_RULE_SETUP
#line 167 "ncgen3/ncgen.l"
#line 167 "ncgen.l"
{
if (STREQ((char *)yytext, FILL_STRING))
return (FILLVALUE);
@ -1551,7 +1551,7 @@ YY_RULE_SETUP
case 17:
/* rule 17 can match eol */
YY_RULE_SETUP
#line 176 "ncgen3/ncgen.l"
#line 176 "ncgen.l"
{
lineno++ ;
break;
@ -1559,7 +1559,7 @@ YY_RULE_SETUP
YY_BREAK
case 18:
YY_RULE_SETUP
#line 181 "ncgen3/ncgen.l"
#line 181 "ncgen.l"
{
int ii;
if (sscanf((char*)yytext, "%d", &ii) != 1) {
@ -1576,7 +1576,7 @@ YY_RULE_SETUP
YY_BREAK
case 19:
YY_RULE_SETUP
#line 195 "ncgen3/ncgen.l"
#line 195 "ncgen.l"
{
if (sscanf((char*)yytext, "%le", &double_val) != 1) {
snprintf(errstr, sizeof(errstr),"bad long or double constant: %s",(char*)yytext);
@ -1587,7 +1587,7 @@ YY_RULE_SETUP
YY_BREAK
case 20:
YY_RULE_SETUP
#line 202 "ncgen3/ncgen.l"
#line 202 "ncgen.l"
{
if (sscanf((char*)yytext, "%e", &float_val) != 1) {
snprintf(errstr, sizeof(errstr),"bad float constant: %s",(char*)yytext);
@ -1598,7 +1598,7 @@ YY_RULE_SETUP
YY_BREAK
case 21:
YY_RULE_SETUP
#line 209 "ncgen3/ncgen.l"
#line 209 "ncgen.l"
{
int tmp = 0;
if (sscanf((char*)yytext, "%d", &tmp) != 1) {
@ -1611,7 +1611,7 @@ YY_RULE_SETUP
YY_BREAK
case 22:
YY_RULE_SETUP
#line 218 "ncgen3/ncgen.l"
#line 218 "ncgen.l"
{
char *ptr;
errno = 0;
@ -1630,7 +1630,7 @@ YY_RULE_SETUP
YY_BREAK
case 23:
YY_RULE_SETUP
#line 233 "ncgen3/ncgen.l"
#line 233 "ncgen.l"
{
char *ptr;
long long_val;
@ -1652,7 +1652,7 @@ YY_RULE_SETUP
case 24:
/* rule 24 can match eol */
YY_RULE_SETUP
#line 250 "ncgen3/ncgen.l"
#line 250 "ncgen.l"
{
(void) sscanf((char*)&yytext[1],"%c",&byte_val);
return (BYTE_CONST);
@ -1660,7 +1660,7 @@ YY_RULE_SETUP
YY_BREAK
case 25:
YY_RULE_SETUP
#line 254 "ncgen3/ncgen.l"
#line 254 "ncgen.l"
{
byte_val = (char) strtol((char*)&yytext[2], (char **) 0, 8);
return (BYTE_CONST);
@ -1668,7 +1668,7 @@ YY_RULE_SETUP
YY_BREAK
case 26:
YY_RULE_SETUP
#line 258 "ncgen3/ncgen.l"
#line 258 "ncgen.l"
{
byte_val = (char) strtol((char*)&yytext[3], (char **) 0, 16);
return (BYTE_CONST);
@ -1676,7 +1676,7 @@ YY_RULE_SETUP
YY_BREAK
case 27:
YY_RULE_SETUP
#line 262 "ncgen3/ncgen.l"
#line 262 "ncgen.l"
{
switch ((char)yytext[2]) {
case 'a': byte_val = '\007'; break; /* not everyone under-
@ -1697,21 +1697,21 @@ YY_RULE_SETUP
YY_BREAK
case 28:
YY_RULE_SETUP
#line 280 "ncgen3/ncgen.l"
#line 280 "ncgen.l"
{ /* whitespace */
break;
}
YY_BREAK
case 29:
YY_RULE_SETUP
#line 283 "ncgen3/ncgen.l"
#line 283 "ncgen.l"
{/* Note: this next rule will not work for UTF8 characters */
return (yytext[0]) ;
}
YY_BREAK
case 30:
YY_RULE_SETUP
#line 287 "ncgen3/ncgen.l"
#line 287 "ncgen.l"
ECHO;
YY_BREAK
#line 1717 "ncgenl.c"
@ -2719,7 +2719,7 @@ void yyfree (void * ptr )
#define YYTABLES_NAME "yytables"
#line 287 "ncgen3/ncgen.l"
#line 287 "ncgen.l"
/* Hack to keep compile quiet */

View File

@ -74,7 +74,7 @@
#define yychar ncgchar
/* First part of user prologue. */
#line 9 "ncgen3/ncgen.y"
#line 9 "ncgen.y"
#ifdef sccs
static char SccsId[] = "$Id: ncgen.y,v 1.34 2010/03/31 18:18:41 dmh Exp $";
@ -1281,13 +1281,13 @@ yyreduce:
switch (yyn)
{
case 2: /* $@1: %empty */
#line 117 "ncgen3/ncgen.y"
#line 117 "ncgen.y"
{ init_netcdf(); }
#line 1287 "ncgeny.c"
break;
case 3: /* $@2: %empty */
#line 120 "ncgen3/ncgen.y"
#line 120 "ncgen.y"
{
if (derror_count == 0)
define_netcdf(netcdfname);
@ -1298,7 +1298,7 @@ yyreduce:
break;
case 4: /* ncdesc: NETCDF '{' $@1 dimsection vasection $@2 datasection '}' */
#line 128 "ncgen3/ncgen.y"
#line 128 "ncgen.y"
{
if (derror_count == 0)
close_netcdf();
@ -1307,7 +1307,7 @@ yyreduce:
break;
case 11: /* dimdecl: dimd '=' INT_CONST */
#line 143 "ncgen3/ncgen.y"
#line 143 "ncgen.y"
{ if (int_val <= 0)
derror("dimension length must be positive");
dims[ndims].size = (size_t)int_val;
@ -1317,7 +1317,7 @@ yyreduce:
break;
case 12: /* dimdecl: dimd '=' DOUBLE_CONST */
#line 149 "ncgen3/ncgen.y"
#line 149 "ncgen.y"
{ /* for rare case where 2^31 < dimsize < 2^32 */
if (double_val <= 0)
derror("dimension length must be positive");
@ -1332,7 +1332,7 @@ yyreduce:
break;
case 13: /* dimdecl: dimd '=' NC_UNLIMITED_K */
#line 160 "ncgen3/ncgen.y"
#line 160 "ncgen.y"
{ if (rec_dim != -1)
derror("only one NC_UNLIMITED dimension allowed");
rec_dim = ndims; /* the unlimited (record) dimension */
@ -1343,7 +1343,7 @@ yyreduce:
break;
case 14: /* dimd: dim */
#line 168 "ncgen3/ncgen.y"
#line 168 "ncgen.y"
{
if (yyvsp[0]->is_dim == 1) {
derror( "duplicate dimension declaration for %s",
@ -1363,43 +1363,43 @@ yyreduce:
break;
case 27: /* type: BYTE_K */
#line 200 "ncgen3/ncgen.y"
#line 200 "ncgen.y"
{ type_code = NC_BYTE; }
#line 1369 "ncgeny.c"
break;
case 28: /* type: CHAR_K */
#line 201 "ncgen3/ncgen.y"
#line 201 "ncgen.y"
{ type_code = NC_CHAR; }
#line 1375 "ncgeny.c"
break;
case 29: /* type: SHORT_K */
#line 202 "ncgen3/ncgen.y"
#line 202 "ncgen.y"
{ type_code = NC_SHORT; }
#line 1381 "ncgeny.c"
break;
case 30: /* type: INT_K */
#line 203 "ncgen3/ncgen.y"
#line 203 "ncgen.y"
{ type_code = NC_INT; }
#line 1387 "ncgeny.c"
break;
case 31: /* type: FLOAT_K */
#line 204 "ncgen3/ncgen.y"
#line 204 "ncgen.y"
{ type_code = NC_FLOAT; }
#line 1393 "ncgeny.c"
break;
case 32: /* type: DOUBLE_K */
#line 205 "ncgen3/ncgen.y"
#line 205 "ncgen.y"
{ type_code = NC_DOUBLE; }
#line 1399 "ncgeny.c"
break;
case 35: /* $@3: %empty */
#line 211 "ncgen3/ncgen.y"
#line 211 "ncgen.y"
{
static struct vars dummyvar;
@ -1436,7 +1436,7 @@ yyreduce:
break;
case 36: /* varspec: var $@3 dimspec */
#line 244 "ncgen3/ncgen.y"
#line 244 "ncgen.y"
{
vars[nvars].ndims = nvdims;
nvars++;
@ -1445,7 +1445,7 @@ yyreduce:
break;
case 42: /* vdim: dim */
#line 258 "ncgen3/ncgen.y"
#line 258 "ncgen.y"
{
if (nvdims >= NC_MAX_VAR_DIMS) {
derror("%s has too many dimensions",vars[nvars].name);
@ -1469,7 +1469,7 @@ yyreduce:
break;
case 43: /* $@4: %empty */
#line 279 "ncgen3/ncgen.y"
#line 279 "ncgen.y"
{
defatt();
}
@ -1477,7 +1477,7 @@ yyreduce:
break;
case 44: /* attdecl: att $@4 '=' attvallist */
#line 283 "ncgen3/ncgen.y"
#line 283 "ncgen.y"
{
equalatt();
}
@ -1485,7 +1485,7 @@ yyreduce:
break;
case 45: /* $@5: %empty */
#line 288 "ncgen3/ncgen.y"
#line 288 "ncgen.y"
{
defatt();
}
@ -1493,7 +1493,7 @@ yyreduce:
break;
case 46: /* gattdecl: gatt $@5 '=' attvallist */
#line 292 "ncgen3/ncgen.y"
#line 292 "ncgen.y"
{
equalatt();
}
@ -1501,7 +1501,7 @@ yyreduce:
break;
case 48: /* gatt: ':' attr */
#line 300 "ncgen3/ncgen.y"
#line 300 "ncgen.y"
{
varnum = NC_GLOBAL; /* handle of "global" attribute */
}
@ -1509,7 +1509,7 @@ yyreduce:
break;
case 49: /* avar: var */
#line 306 "ncgen3/ncgen.y"
#line 306 "ncgen.y"
{ if (yyvsp[0]->is_var == 1)
varnum = yyvsp[0]->vnum;
else {
@ -1522,7 +1522,7 @@ yyreduce:
break;
case 50: /* attr: IDENT */
#line 316 "ncgen3/ncgen.y"
#line 316 "ncgen.y"
{
/* make sure atts array will hold attributes */
grow_aarray(natts, /* must hold natts+1 atts */
@ -1536,7 +1536,7 @@ yyreduce:
break;
case 53: /* aconst: attconst */
#line 330 "ncgen3/ncgen.y"
#line 330 "ncgen.y"
{
if (valtype == NC_UNSPECIFIED)
valtype = atype_code;
@ -1547,7 +1547,7 @@ yyreduce:
break;
case 54: /* attconst: CHAR_CONST */
#line 339 "ncgen3/ncgen.y"
#line 339 "ncgen.y"
{
atype_code = NC_CHAR;
*char_valp++ = char_val;
@ -1557,7 +1557,7 @@ yyreduce:
break;
case 55: /* attconst: TERMSTRING */
#line 345 "ncgen3/ncgen.y"
#line 345 "ncgen.y"
{
atype_code = NC_CHAR;
{
@ -1574,7 +1574,7 @@ yyreduce:
break;
case 56: /* attconst: BYTE_CONST */
#line 358 "ncgen3/ncgen.y"
#line 358 "ncgen.y"
{
atype_code = NC_BYTE;
*byte_valp++ = byte_val;
@ -1584,7 +1584,7 @@ yyreduce:
break;
case 57: /* attconst: SHORT_CONST */
#line 364 "ncgen3/ncgen.y"
#line 364 "ncgen.y"
{
atype_code = NC_SHORT;
*short_valp++ = short_val;
@ -1594,7 +1594,7 @@ yyreduce:
break;
case 58: /* attconst: INT_CONST */
#line 370 "ncgen3/ncgen.y"
#line 370 "ncgen.y"
{
atype_code = NC_INT;
*int_valp++ = int_val;
@ -1604,7 +1604,7 @@ yyreduce:
break;
case 59: /* attconst: FLOAT_CONST */
#line 376 "ncgen3/ncgen.y"
#line 376 "ncgen.y"
{
atype_code = NC_FLOAT;
*float_valp++ = float_val;
@ -1614,7 +1614,7 @@ yyreduce:
break;
case 60: /* attconst: DOUBLE_CONST */
#line 382 "ncgen3/ncgen.y"
#line 382 "ncgen.y"
{
atype_code = NC_DOUBLE;
*double_valp++ = double_val;
@ -1624,7 +1624,7 @@ yyreduce:
break;
case 66: /* $@6: %empty */
#line 398 "ncgen3/ncgen.y"
#line 398 "ncgen.y"
{
valtype = vars[varnum].type; /* variable type */
valnum = 0; /* values accumulated for variable */
@ -1679,7 +1679,7 @@ yyreduce:
break;
case 67: /* datadecl: avar $@6 '=' constlist */
#line 449 "ncgen3/ncgen.y"
#line 449 "ncgen.y"
{
if (valnum < var_len) { /* leftovers */
nc_fill(valtype,
@ -1698,7 +1698,7 @@ yyreduce:
break;
case 70: /* $@7: %empty */
#line 468 "ncgen3/ncgen.y"
#line 468 "ncgen.y"
{
if(valnum >= var_len) {
if (vars[varnum].dims[0] != rec_dim) { /* not recvar */
@ -1727,7 +1727,7 @@ yyreduce:
break;
case 71: /* dconst: $@7 const */
#line 493 "ncgen3/ncgen.y"
#line 493 "ncgen.y"
{
if (not_a_string) {
switch (valtype) {
@ -1757,7 +1757,7 @@ yyreduce:
break;
case 72: /* const: CHAR_CONST */
#line 521 "ncgen3/ncgen.y"
#line 521 "ncgen.y"
{
atype_code = NC_CHAR;
switch (valtype) {
@ -1787,7 +1787,7 @@ yyreduce:
break;
case 73: /* const: TERMSTRING */
#line 547 "ncgen3/ncgen.y"
#line 547 "ncgen.y"
{
not_a_string = 0;
atype_code = NC_CHAR;
@ -1846,7 +1846,7 @@ yyreduce:
break;
case 74: /* const: BYTE_CONST */
#line 602 "ncgen3/ncgen.y"
#line 602 "ncgen.y"
{
atype_code = NC_BYTE;
switch (valtype) {
@ -1876,7 +1876,7 @@ yyreduce:
break;
case 75: /* const: SHORT_CONST */
#line 628 "ncgen3/ncgen.y"
#line 628 "ncgen.y"
{
atype_code = NC_SHORT;
switch (valtype) {
@ -1906,7 +1906,7 @@ yyreduce:
break;
case 76: /* const: INT_CONST */
#line 654 "ncgen3/ncgen.y"
#line 654 "ncgen.y"
{
atype_code = NC_INT;
switch (valtype) {
@ -1936,7 +1936,7 @@ yyreduce:
break;
case 77: /* const: FLOAT_CONST */
#line 680 "ncgen3/ncgen.y"
#line 680 "ncgen.y"
{
atype_code = NC_FLOAT;
switch (valtype) {
@ -1966,7 +1966,7 @@ yyreduce:
break;
case 78: /* const: DOUBLE_CONST */
#line 706 "ncgen3/ncgen.y"
#line 706 "ncgen.y"
{
atype_code = NC_DOUBLE;
switch (valtype) {
@ -1999,7 +1999,7 @@ yyreduce:
break;
case 79: /* const: FILLVALUE */
#line 735 "ncgen3/ncgen.y"
#line 735 "ncgen.y"
{
/* store fill_value */
switch (valtype) {
@ -2228,7 +2228,7 @@ yyreturnlab:
return yyresult;
}
#line 770 "ncgen3/ncgen.y"
#line 770 "ncgen.y"
/* HELPER PROGRAMS */

View File

@ -191,6 +191,9 @@ IF(NETCDF_ENABLE_TESTS)
add_sh_test(nczarr_test run_quantize)
add_sh_test(nczarr_test run_notzarr)
# Test back compatibility of old key format
add_sh_test(nczarr_test run_oldkeys)
# This has timeout under CMake
# if(NOT ISCMAKE)
add_sh_test(nczarr_test run_interop)
@ -226,7 +229,7 @@ IF(NETCDF_ENABLE_TESTS)
if(NETCDF_ENABLE_NCZARR_ZIP)
add_sh_test(nczarr_test run_newformat)
# Test various corrupted files
ADD_SH_TEST(nczarr_test run_corrupt.sh)
ADD_SH_TEST(nczarr_test run_corrupt)
endif()
IF(FALSE) # Obsolete tests

View File

@ -13,10 +13,10 @@ LDADD = ${top_builddir}/liblib/libnetcdf.la
TESTS_ENVIRONMENT =
TEST_EXTENSIONS = .sh
#SH_LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver-verbose
#sh_LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver-verbose
#LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver-verbose
#TESTS_ENVIRONMENT += export SETX=1;
SH_LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver-verbose
sh_LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver-verbose
LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver-verbose
TESTS_ENVIRONMENT += export SETX=1;
#TESTS_ENVIRONMENT += export NCTRACING=1;
AM_CPPFLAGS += -I${top_srcdir} -I${top_srcdir}/libnczarr
@ -121,6 +121,9 @@ if USE_HDF5
TESTS += run_fillonlyz.sh
endif
# Test back compatibility of old key format
TESTS += run_oldkeys.sh
if BUILD_BENCHMARKS
UTILSRC = bm_utils.c timer_utils.c test_utils.c bm_utils.h bm_timer.h
@ -207,7 +210,7 @@ run_filter.sh \
run_newformat.sh run_nczarr_fill.sh run_quantize.sh \
run_jsonconvention.sh run_nczfilter.sh run_unknown.sh \
run_scalar.sh run_strings.sh run_nulls.sh run_notzarr.sh run_external.sh \
run_unlim_io.sh run_corrupt.sh
run_unlim_io.sh run_corrupt.sh run_oldkeys.sh
EXTRA_DIST += \
ref_ut_map_create.cdl ref_ut_map_writedata.cdl ref_ut_map_writemeta2.cdl ref_ut_map_writemeta.cdl \
@ -220,7 +223,7 @@ ref_perdimspecs.cdl ref_fillonly.cdl \
ref_whole.cdl ref_whole.txt \
ref_skip.cdl ref_skip.txt ref_skipw.cdl \
ref_rem.cdl ref_rem.dmp ref_ndims.cdl ref_ndims.dmp \
ref_misc1.cdl ref_misc1.dmp ref_misc2.cdl \
ref_misc1.cdl ref_misc1.dmp ref_misc2.cdl ref_zarr_test_data_meta.cdl \
ref_avail1.cdl ref_avail1.dmp ref_avail1.txt \
ref_xarray.cdl ref_purezarr.cdl ref_purezarr_base.cdl ref_nczarr2zarr.cdl \
ref_bzip2.cdl ref_filtered.cdl ref_multi.cdl \
@ -228,16 +231,15 @@ ref_any.cdl ref_oldformat.cdl ref_oldformat.zip ref_newformatpure.cdl \
ref_groups.h5 ref_byte.zarr.zip ref_byte_fill_value_null.zarr.zip \
ref_groups_regular.cdl ref_byte.cdl ref_byte_fill_value_null.cdl \
ref_jsonconvention.cdl ref_jsonconvention.zmap \
ref_string.cdl ref_string_nczarr.baseline ref_string_zarr.baseline ref_scalar.cdl \
ref_nulls_nczarr.baseline ref_nulls_zarr.baseline ref_nulls.cdl ref_notzarr.tar.gz
ref_string.cdl ref_string_nczarr.baseline ref_string_zarr.baseline ref_scalar.cdl ref_scalar_nczarr.cdl \
ref_nulls_nczarr.baseline ref_nulls_zarr.baseline ref_nulls.cdl ref_notzarr.tar.gz \
ref_oldkeys.cdl ref_oldkeys.file.zip ref_oldkeys.zmap \
ref_noshape.file.zip
# Interoperability files
# Interoperability files from external sources
EXTRA_DIST += ref_power_901_constants_orig.zip ref_power_901_constants.cdl ref_quotes_orig.zip ref_quotes.cdl \
ref_zarr_test_data.cdl.gz ref_zarr_test_data_2d.cdl.gz
# Additional Files
EXTRA_DIST += ref_noshape.file.zip
CLEANFILES = ut_*.txt ut*.cdl tmp*.nc tmp*.cdl tmp*.txt tmp*.dmp tmp*.zip tmp*.nc tmp*.dump tmp*.tmp tmp*.zmap tmp_ngc.c ref_zarr_test_data.cdl tst_*.nc.zip ref_quotes.zip ref_power_901_constants.zip
BUILT_SOURCES = test_quantize.c test_filter_vlen.c test_unlim_vars.c test_endians.c \

View File

@ -50,7 +50,7 @@ typedef struct Format {
int debug;
int linear;
int holevalue;
int rank;
size_t rank;
size_t dimlens[NC_MAX_VAR_DIMS];
size_t chunklens[NC_MAX_VAR_DIMS];
size_t chunkcounts[NC_MAX_VAR_DIMS];
@ -60,7 +60,7 @@ typedef struct Format {
} Format;
typedef struct Odometer {
int rank; /*rank */
size_t rank; /*rank */
size_t start[NC_MAX_VAR_DIMS];
size_t stop[NC_MAX_VAR_DIMS];
size_t max[NC_MAX_VAR_DIMS]; /* max size of ith index */
@ -71,11 +71,11 @@ typedef struct Odometer {
#define ceildiv(x,y) (((x) % (y)) == 0 ? ((x) / (y)) : (((x) / (y)) + 1))
static char* captured[4096];
static int ncap = 0;
static size_t ncap = 0;
extern int nc__testurl(const char*,char**);
Odometer* odom_new(int rank, const size_t* stop, const size_t* max);
Odometer* odom_new(size_t rank, const size_t* stop, const size_t* max);
void odom_free(Odometer* odom);
int odom_more(Odometer* odom);
int odom_next(Odometer* odom);
@ -120,9 +120,9 @@ cleanup(void)
}
Odometer*
odom_new(int rank, const size_t* stop, const size_t* max)
odom_new(size_t rank, const size_t* stop, const size_t* max)
{
int i;
size_t i;
Odometer* odom = NULL;
if((odom = calloc(1,sizeof(Odometer))) == NULL)
return NULL;
@ -339,12 +339,12 @@ dump(Format* format)
{
void* chunkdata = NULL; /*[CHUNKPROD];*/
Odometer* odom = NULL;
int r;
size_t r;
size_t offset[NC_MAX_VAR_DIMS];
int holechunk = 0;
char sindices[64];
#ifdef H5
int i;
size_t i;
hid_t fileid, grpid, datasetid;
hid_t dxpl_id = H5P_DEFAULT; /*data transfer property list */
unsigned int filter_mask = 0;
@ -388,7 +388,7 @@ dump(Format* format)
if((chunkdata = calloc(sizeof(int),format->chunkprod))==NULL) usage(NC_ENOMEM);
printf("rank=%d dims=(%s) chunks=(%s)\n",format->rank,printvector(format->rank,format->dimlens),
printf("rank=%zu dims=(%s) chunks=(%s)\n",format->rank,printvector(format->rank,format->dimlens),
printvector(format->rank,format->chunklens));
while(odom_more(odom)) {
@ -506,12 +506,14 @@ done:
int
main(int argc, char** argv)
{
int i,stat = NC_NOERR;
int stat = NC_NOERR;
size_t i;
Format format;
int ncid, varid, dimids[NC_MAX_VAR_DIMS];
int vtype, storage;
int mode;
int c;
int r;
memset(&format,0,sizeof(format));
@ -577,7 +579,8 @@ main(int argc, char** argv)
/* Get the info about the var */
if((stat=nc_inq_varid(ncid,format.var_name,&varid))) usage(stat);
if((stat=nc_inq_var(ncid,varid,NULL,&vtype,&format.rank,dimids,NULL))) usage(stat);
if((stat=nc_inq_var(ncid,varid,NULL,&vtype,&r,dimids,NULL))) usage(stat);
format.rank = (size_t)r;
if(format.rank == 0) usage(NC_EDIMSIZE);
if((stat=nc_inq_var_chunking(ncid,varid,&storage,format.chunklens))) usage(stat);
if(storage != NC_CHUNKED) usage(NC_EBADCHUNK);

View File

@ -4,39 +4,21 @@ dimensions:
dim1 = 4 ;
dim2 = 4 ;
variables:
int ivar(dim0, dim1, dim2) ;
ivar:_FillValue = -2147483647 ;
ivar:_Storage = @chunked@ ;
ivar:_ChunkSizes = 4, 4, 4 ;
ivar:_Filter = @IH5@ ;
ivar:_Codecs = @ICX@ ;
float fvar(dim0, dim1, dim2) ;
fvar:_FillValue = 9.96921e+36f ;
fvar:_Storage = @chunked@ ;
fvar:_ChunkSizes = 4, 4, 4 ;
fvar:_Filter = @FH5@ ;
fvar:_Codecs = @FCX@ ;
int ivar(dim0, dim1, dim2) ;
ivar:_FillValue = -2147483647 ;
ivar:_Storage = @chunked@ ;
ivar:_ChunkSizes = 4, 4, 4 ;
ivar:_Filter = @IH5@ ;
ivar:_Codecs = @ICX@ ;
data:
ivar =
0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15,
16, 17, 18, 19,
20, 21, 22, 23,
24, 25, 26, 27,
28, 29, 30, 31,
32, 33, 34, 35,
36, 37, 38, 39,
40, 41, 42, 43,
44, 45, 46, 47,
48, 49, 50, 51,
52, 53, 54, 55,
56, 57, 58, 59,
60, 61, 62, 63 ;
fvar =
0.5, 1.5, 2.5, 3.5,
4.5, 5.5, 6.5, 7.5,
@ -54,4 +36,22 @@ data:
52.5, 53.5, 54.5, 55.5,
56.5, 57.5, 58.5, 59.5,
60.5, 61.5, 62.5, 63.5 ;
ivar =
0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15,
16, 17, 18, 19,
20, 21, 22, 23,
24, 25, 26, 27,
28, 29, 30, 31,
32, 33, 34, 35,
36, 37, 38, 39,
40, 41, 42, 43,
44, 45, 46, 47,
48, 49, 50, 51,
52, 53, 54, 55,
56, 57, 58, 59,
60, 61, 62, 63 ;
}

View File

@ -3,5 +3,5 @@
[2] /.nczgroup : (80) |{"dims": {"dim0": 4,"dim1": 4,"dim2": 4,"dim3": 4},"vars": ["var"],"groups": []}|
[3] /.zattrs : (68) |{"_NCProperties": "version=2,netcdf=4.8.1-development,nczarr=1.0.0"}|
[4] /.zgroup : (18) |{"zarr_format": 2}|
[6] /var/.nczarray : (67) |{"dimrefs": ["/dim0","/dim1","/dim2","/dim3"],"storage": "chunked"}|
[6] /var/.nczarray : (67) |{"dimension_references": ["/dim0","/dim1","/dim2","/dim3"],"storage": "chunked"}|
[7] /var/.zarray : (172) |{"zarr_format": 2,"shape": [4,4,4,4],"dtype": "<f4","chunks": [4,4,4,4],"order": "C","compressor":

View File

@ -1,8 +1,8 @@
netcdf ref_byte {
dimensions:
_zdim_20 = 20 ;
_Anonymous_Dim_20 = 20 ;
variables:
ubyte byte(_zdim_20, _zdim_20) ;
ubyte byte(_Anonymous_Dim_20, _Anonymous_Dim_20) ;
byte:_Storage = "chunked" ;
byte:_ChunkSizes = 20, 20 ;

View File

@ -1,8 +1,8 @@
netcdf ref_byte_fill_value_null {
dimensions:
_zdim_20 = 20 ;
_Anonymous_Dim_20 = 20 ;
variables:
ubyte byt(_zdim_20, _zdim_20) ;
ubyte byt(_Anonymous_Dim_20, _Anonymous_Dim_20) ;
byt:_Storage = "chunked" ;
byt:_ChunkSizes = 20, 20 ;
byt:_NoFill = "true" ;

Some files were not shown because too many files have changed in this diff Show More