Merge upstream into patch-4

This commit is contained in:
DWesl 2024-06-22 11:11:33 -04:00
commit aa4cdf8d2c
119 changed files with 3068 additions and 3372 deletions

View File

@ -2,6 +2,9 @@ name: NetCDF-C CMake CI - Windows
on: [pull_request, workflow_dispatch]
env:
REMOTETESTDOWN: ${{ vars.REMOTETESTDOWN }}
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
@ -160,4 +163,4 @@ jobs:
run: |
cd build
PATH=~/tmp/bin:$PATH ctest . --rerun-failed --output-on-failure -VV
if: ${{ failure() }}
if: ${{ failure() }}

View File

@ -4,8 +4,11 @@
name: Run CDash Ubuntu/Linux netCDF Tests
on: workflow_dispatch
on: [workflow_dispatch]
env:
REMOTETESTDOWN: ${{ vars.REMOTETESTDOWN }}
concurrency:
group: ${{ github.workflow}}-${{ github.head_ref }}
cancel-in-progress: true

View File

@ -300,7 +300,7 @@ jobs:
- name: Configure
shell: bash -l {0}
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --enable-dap --disable-dap-remote-tests --disable-xml2
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --enable-dap --disable-dap-remote-tests --disable-libxml2
if: ${{ success() }}
- name: Look at config.log if error

View File

@ -1,157 +0,0 @@
###
# Test S3 Support
# -- derived from run_tests_ubuntu.yml
###
###
# Build hdf5 dependencies and cache them in a combined directory.
###
name: Run S3 netCDF Tests (under Ubuntu Linux)
on: [workflow_dispatch]
concurrency:
group: ${{ github.workflow}}-${{ github.head_ref }}
cancel-in-progress: true
jobs:
build-deps-serial:
runs-on: ubuntu-latest
strategy:
matrix:
hdf5: [ 1.10.8, 1.12.2, 1.14.3 ]
steps:
- uses: actions/checkout@v4
- name: Install System dependencies
shell: bash -l {0}
run: sudo apt update && sudo apt install -y libaec-dev zlib1g-dev automake autoconf libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev
###
# Installing libhdf5
###
- name: Cache libhdf5-${{ matrix.hdf5 }}
id: cache-hdf5
uses: actions/cache@v4
with:
path: ~/environments/${{ matrix.hdf5 }}
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
- name: Build libhdf5-${{ matrix.hdf5 }}
if: steps.cache-hdf5.outputs.cache-hit != 'true'
run: |
set -x
wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-$(echo ${{ matrix.hdf5 }} | cut -d. -f 1,2)/hdf5-${{ matrix.hdf5 }}/src/hdf5-${{ matrix.hdf5 }}.tar.bz2
tar -jxf hdf5-${{ matrix.hdf5 }}.tar.bz2
pushd hdf5-${{ matrix.hdf5 }}
./configure --disable-static --enable-shared --prefix=${HOME}/environments/${{ matrix.hdf5 }} --enable-hl --with-szlib
make -j
make install -j
popd
#####
# S3 Autotools-based tests.
#####
##
# Serial
##
nc-ac-tests-s3-serial:
needs: build-deps-serial
runs-on: ubuntu-latest
strategy:
matrix:
hdf5: [ 1.14.3 ]
steps:
- uses: actions/checkout@v4
- name: Install System dependencies
shell: bash -l {0}
run: sudo apt update && sudo apt install -y libaec-dev zlib1g-dev automake autoconf libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev openssl libssl-dev
###
# Set Environmental Variables
###
- run: echo "CFLAGS=-I${HOME}/environments/${{ matrix.hdf5 }}/include" >> $GITHUB_ENV
- run: echo "LDFLAGS=-L${HOME}/environments/${{ matrix.hdf5 }}/lib" >> $GITHUB_ENV
- run: echo "LD_LIBRARY_PATH=${HOME}/environments/${{ matrix.hdf5 }}/lib" >> $GITHUB_ENV
###
# Fetch Cache
###
- name: Fetch HDF Cache
id: cache-hdf
uses: actions/cache@v4
with:
path: ~/environments/${{ matrix.hdf5 }}
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
- name: Check Cache
shell: bash -l {0}
run: ls ${HOME}/environments && ls ${HOME}/environments/${{ matrix.hdf5 }} && ls ${HOME}/environments/${{ matrix.hdf5}}/lib
###
# Configure and build
###
- name: Run autoconf
shell: bash -l {0}
run: autoreconf -if
- name: Configure
shell: bash -l {0}
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --disable-dap --enable-external-server-tests --enable-s3 --enable-s3-internal --with-s3-testing=public
if: ${{ success() }}
- name: Look at config.log if error
shell: bash -l {0}
run: cat config.log
if: ${{ failure() }}
- name: Print Summary
shell: bash -l {0}
run: cat libnetcdf.settings
- name: Build Library and Utilities
shell: bash -l {0}
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make -j
if: ${{ success() }}
- name: Build Tests
shell: bash -l {0}
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check TESTS="" -j
if: ${{ success() }}
- name: Run Tests
shell: bash -l {0}
env:
AWS_PROFILE: ${{ secrets.DEFAULT_PROFILE }}
run: |
mkdir -p ~/.aws
echo "" > ~/.aws/config
chmod go-x ~/.aws/config
echo "${AWS_PROFILE}" >> ~/.aws/config
LD_LIBRARY_PATH="/home/runner/work/netcdf-c/netcdf-c/liblib/.libs:${LD_LIBRARY_PATH}"
CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check -j
if: ${{ success() }}
#####
# S3 CMake-based tests.
#####
##
# Serial
##
# T.B.D. nc-cmake-tests-s3-serial:

View File

@ -4,7 +4,10 @@
name: Run Ubuntu/Linux netCDF Tests
on: [pull_request, workflow_dispatch]
on: [pull_request,workflow_dispatch]
env:
REMOTETESTDOWN: ${{ vars.REMOTETESTDOWN }}
concurrency:
group: ${{ github.workflow}}-${{ github.head_ref }}
@ -171,7 +174,7 @@ jobs:
- name: Configure
shell: bash -l {0}
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --enable-dap --disable-dap-remote-tests --enable-doxygen --enable-external-server-tests --disable-xml2
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --enable-dap --disable-dap-remote-tests --enable-doxygen --enable-external-server-tests --disable-libxml2
if: ${{ success() }}
- name: Look at config.log if error
@ -266,7 +269,7 @@ jobs:
- name: Configure
shell: bash -l {0}
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --enable-dap --disable-dap-remote-tests --enable-doxygen --enable-external-server-tests --disable-xml2 --disable-shared --enable-static
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure --enable-hdf5 --enable-dap --disable-dap-remote-tests --enable-doxygen --enable-external-server-tests --disable-libxml2 --disable-shared --enable-static
if: ${{ success() }}
- name: Look at config.log if error

View File

@ -10,6 +10,7 @@ env:
SHELLOPTS: igncr
CHERE_INVOKING: 1
CYGWIN_NOWINPATH: 1
REMOTETESTDOWN: ${{ vars.REMOTETESTDOWN }}
jobs:
build-and-test-autotools:

View File

@ -6,6 +6,10 @@
name: Run MSYS2, MinGW64-based Tests (Not Visual Studio)
env:
CPPFLAGS: "-D_BSD_SOURCE"
REMOTETESTDOWN: ${{ vars.REMOTETESTDOWN }}
on: [pull_request,workflow_dispatch]
concurrency:

View File

@ -85,6 +85,7 @@ if(UNAME)
getuname(osname -s)
getuname(osrel -r)
getuname(cpu -m)
getuname(host -n)
set(TMP_BUILDNAME "${osname}-${osrel}-${cpu}")
endif()
@ -225,6 +226,12 @@ include(GenerateExportHeader)
# Compiler and Linker Configuration
################################
# Set in support of https://github.com/Unidata/netcdf-c/issues/2700
if(${CMAKE_C_COMPILER_ID} MATCHES "Intel")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fhonor-infinities")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fhonor-infinities")
endif()
option(NETCDF_FIND_SHARED_LIBS "Find dynamically-built versions of dependent libraries" ${BUILD_SHARED_LIBS})
##
@ -382,13 +389,13 @@ if(MSVC)
endif()
# Option to build netCDF Version 2
OPTION (ENABLE_V2_API "Build netCDF Version 2." ON)
set(BUILD_V2 ${ENABLE_V2_API})
if(NOT ENABLE_V2_API)
OPTION (NETCDF_ENABLE_V2_API "Build netCDF Version 2." ON)
set(BUILD_V2 ${NETCDF_ENABLE_V2_API})
if(NOT NETCDF_ENABLE_V2_API)
set(NO_NETCDF_2 ON)
else(NOT ENABLE_V2_API)
else(NOT NETCDF_ENABLE_V2_API)
set(USE_NETCDF_2 TRUE)
endif(NOT ENABLE_V2_API)
endif(NOT NETCDF_ENABLE_V2_API)
# Option to build utilities
option(NETCDF_BUILD_UTILITIES "Build ncgen, ncgen3, ncdump." ON)
@ -431,40 +438,81 @@ endif()
# Format Option checks
################################
# We need to now treat enable-netcdf4 and enable-hdf5 as separate,
# but for back compatability, we need to treat enable-netcdf4
# as equivalent to enable-hdf5.
# We detect this using these rules:
# 1. if NETCDF_ENABLE_HDF5 is off then disable hdf5
# 2. if NETCDF_ENABLE_NETCDF_4 is off then disable hdf5
# 3. else enable hdf5
option(NETCDF_ENABLE_NETCDF_4 "Use HDF5." ON)
option(NETCDF_ENABLE_HDF5 "Use HDF5." ON)
if(NOT NETCDF_ENABLE_HDF5 OR NOT NETCDF_ENABLE_NETCDF_4)
set(NETCDF_ENABLE_HDF5 OFF CACHE BOOL "Use HDF5" FORCE)
# As a long term goal, and because it is now the case that
# -DNETCDF_ENABLE_NCZARR => USE_NETCDF4, so make the external options
# -DNETCDF_ENABLE-NETCDF-4 and _DNETCDF_ENABLE-NETCDF4 obsolete
# in favor of --NETCDF_ENABLE-HDF5.
# We will do the following for one more release cycle.
# 1. Make --NETCDF_ENABLE-NETCDF-4 be an alias for --NETCDF_ENABLE-NETCDF4.
# 2. Make --NETCDF_ENABLE-NETCDF4 an alias for --NETCDF_ENABLE-HDF5.
# 3. Internally, convert most (but not all) uses of USE_NETCDF_4 and USE_NETCDF4 to USE_HDF5.
# Collect the values of -DNETCDF_ENABLE-NETCDF-4, -DNETCDF_ENABLE-NETCDF4, and -DNETCDF_ENABLE-HDF5.
# Figure out which options are defined and process options
if(DEFINED NETCDF_ENABLE_NETCDF_4)
set(UNDEF_NETCDF_4 OFF CACHE BOOL "")
option(NETCDF_ENABLE_NETCDF_4 "" ON)
else()
set(UNDEF_NETCDF_4 ON CACHE BOOL "")
endif()
if(DEFINED NETCDF_ENABLE_NETCDF4)
set(UNDEF_NETCDF4 OFF CACHE BOOL "")
option(NETCDF_ENABLE_NETCDF4 "" ON)
else()
set(UNDEF_NETCDF4 ON CACHE BOOL "")
endif()
if(DEFINED NETCDF_ENABLE_HDF5)
set(UNDEF_HDF5 OFF CACHE BOOL "")
option(NETCDF_ENABLE_HDF5 "" ON)
else()
set(UNDEF_HDF5 ON CACHE BOOL "")
endif()
option(NETCDF_ENABLE_HDF4 "Build netCDF-4 with HDF4 read capability(HDF4, HDF5 and Zlib required)." OFF)
if(NOT UNDEF_NETCDF_4)
message(WARNING "NETCDF_ENABLE_NETCDF_4 is deprecated; please use NETCDF_ENABLE_HDF5")
endif()
if(NOT UNDEF_NETCDF4)
message(WARNING "NETCDF_ENABLE_NETCDF4 is deprecated; please use NETCDF_ENABLE_HDF5")
endif()
# NETCDF_ENABLE_NETCDF_4 overrides NETCDF_ENABLE_NETCDF4 if latter not defined.
if((NOT "${UNDEF_NETCDF_4}") AND UNDEF_NETCDF4)
set(NETCDF_ENABLE_NETCDF4 ${NETCDF_ENABLE_NETCDF_4} CACHE BOOL "" FORCE)
endif()
# NETCDF_ENABLE_NETCDF4 overrides NETCDF_ENABLE_HDF5 if latter not defined.
if((NOT "${UNDEF_NETCDF4}") AND UNDEF_HDF5)
set(NETCDF_ENABLE_HDF5 "${NETCDF_ENABLE_HDF5}" CACHE BOOL "" FORCE)
endif()
# Otherwise, use NETCDF_ENABLE_HDF5 default
if(UNDEF_HDF5)
set(NETCDF_ENABLE_HDF5 ON CACHE BOOL "" FORCE)
endif()
# Turn off enable_netcdf4 because it will be used
# as a shorthand for ENABLE_HDF5|ENABLE_HDF4|ENABLE_NCZARR
set(NETCDF_ENABLE_NETCDF4 OFF CACHE BOOL "" FORCE)
option(NETCDF_ENABLE_DAP "Enable DAP2 and DAP4 Client." ON)
option(NETCDF_ENABLE_NCZARR "Enable NCZarr Client." ON)
option(NETCDF_ENABLE_PNETCDF "Build with parallel I/O for CDF-1, 2, and 5 files using PnetCDF." OFF)
set(NETCDF_ENABLE_CDF5 AUTO CACHE STRING "AUTO")
option(NETCDF_ENABLE_CDF5 "Enable CDF5 support" ON)
if(DEFINED NETCDF_ENABLE_NETCDF4)
message(FATAL_ERROR "NETCDF_ENABLE_NETCDF4 is deprecated. Please use NETCDF_ENABLE_NETCDF_4 instead.")
option(NETCDF_ENABLE_HDF4 "Enable HDF4 Read Support" OFF)
option(NETCDF_ENABLE_HDF4_FILE_TESTS "Enable HDF4 File Tests" ${NETCDF_ENABLE_HDF4})
if(NETCDF_ENABLE_HDF4)
set(USE_HDF4 ON)
endif()
# Netcdf-4 support (i.e. libsrc4) is required by more than just HDF5 (e.g. NCZarr)
# So depending on what above formats are enabled, enable netcdf-4
if(NETCDF_ENABLE_HDF5 OR NETCDF_ENABLE_HDF4 OR NETCDF_ENABLE_NCZARR)
set(NETCDF_ENABLE_NETCDF_4 ON CACHE BOOL "Enable netCDF-4 API" FORCE)
set(NETCDF_ENABLE_NETCDF4 ON CACHE BOOL "Enable netCDF-4 API" FORCE)
endif()
# enable|disable all forms of network access
option(NETCDF_ENABLE_REMOTE_FUNCTIONALITY "Enable|disable all forms remote data access (DAP, S3, etc)" ON)
message(">>> NETCDF_ENABLE_REMOTE_FUNCTIONALITY=${NETCDF_ENABLE_REMOTE_FUNCTIONALITY}")
if(NOT NETCDF_ENABLE_REMOTE_FUNCTIONALITY)
message(WARNING "NETCDF_ENABLE_REMOTE_FUNCTIONALITY=NO => NETCDF_ENABLE_DAP[4]=NO")
set(NETCDF_ENABLE_DAP OFF CACHE BOOL "NETCDF_ENABLE_REMOTE_FUNCTIONALITY=NO => NETCDF_ENABLE_DAP=NO" FORCE)
@ -484,9 +532,9 @@ endif()
# Did the user specify a default minimum blocksize for posixio?
set(NCIO_MINBLOCKSIZE 256 CACHE STRING "Minimum I/O Blocksize for netCDF classic and 64-bit offset format files.")
if(NETCDF_ENABLE_NETCDF_4)
if(NETCDF_ENABLE_NETCDF4)
set(USE_NETCDF4 ON CACHE BOOL "")
set(NETCDF_ENABLE_NETCDF_4 ON CACHE BOOL "")
set(NETCDF_ENABLE_NETCDF4 ON CACHE BOOL "")
else()
set(USE_HDF4_FILE_TESTS OFF)
set(USE_HDF4 OFF)
@ -494,22 +542,15 @@ else()
set(NETCDF_ENABLE_HDF4 OFF)
endif()
# Option Logging, only valid for netcdf4.
# Option Logging, only valid for netcdf4 dispatchers.
option(NETCDF_ENABLE_LOGGING "Enable Logging." OFF)
if(NOT NETCDF_ENABLE_NETCDF_4)
set(NETCDF_ENABLE_LOGGING OFF)
endif()
if(NETCDF_ENABLE_LOGGING)
target_compile_definitions(netcdf PRIVATE LOGGING ENABLE_SET_LOG_LEVEL)
set(LOGGING ON)
set(ENABLE_SET_LOG_LEVEL ON)
endif()
option(NETCDF_ENABLE_SET_LOG_LEVEL_FUNC "Enable definition of nc_set_log_level()." ON)
if(NETCDF_ENABLE_NETCDF_4 AND NOT NETCDF_ENABLE_LOGGING AND NETCDF_ENABLE_SET_LOG_LEVEL_FUNC)
target_compile_definitions(netcdf PRIVATE -DENABLE_SET_LOG_LEVEL)
set(ENABLE_SET_LOG_LEVEL ON)
if(NOT NETCDF_ENABLE_NETCDF4)
set(NETCDF_ENABLE_LOGGING OFF)
endif()
set(LOGGING ${NETCDF_ENABLE_LOGGING})
set(NETCDF_ENABLE_SET_LOG_LEVEL ${NETCDF_ENABLE_LOGGING})
# Option to allow for strict null file padding.
# See https://github.com/Unidata/netcdf-c/issues/657 for more information
option(NETCDF_ENABLE_STRICT_NULL_BYTE_HEADER_PADDING "Enable strict null byte header padding." OFF)
@ -562,6 +603,12 @@ set(NETCDF_ENABLE_EXTERNAL_SERVER_TESTS OFF CACHE BOOL "" FORCE)
set(NETCDF_ENABLE_DAP_LONG_TESTS OFF CACHE BOOL "" FORCE)
endif()
# Provide a global control for remotetest.
if ("$ENV{REMOTETESTDOWN}" STREQUAL "yes")
message(WARNING "ENV(REMOTETESTDOWN) => NETCDF_ENABLE_DAP_REMOTE_TESTS == OFF")
set(NETCDF_ENABLE_DAP_REMOTE_TESTS OFF CACHE BOOL "" FORCE)
endif()
set(REMOTETESTSERVERS "remotetest.unidata.ucar.edu" CACHE STRING "test servers to use for remote test")
set(REMOTETESTSERVERS "remotetest.unidata.ucar.edu" CACHE STRING "test servers to use for remote test")
@ -760,13 +807,17 @@ if(NETCDF_ENABLE_TESTS)
set(NC_CTEST_DROP_SITE "cdash.unidata.ucar.edu:443" CACHE STRING "Dashboard location for CTest-based testing purposes.")
set(NC_CTEST_DROP_LOC_PREFIX "" CACHE STRING "Prefix for Dashboard location on remote server when using CTest-based testing.")
set(SUBMIT_URL "https://cdash.unidata.ucar.edu:443")
find_program(HOSTNAME_CMD NAMES hostname)
if(NOT WIN32)
set(HOSTNAME_ARG "-s")
endif()
if(HOSTNAME_CMD)
execute_process(COMMAND ${HOSTNAME_CMD} "${HOSTNAME_ARG}" OUTPUT_VARIABLE HOSTNAME OUTPUT_STRIP_TRAILING_WHITESPACE)
set(NC_CTEST_SITE "${HOSTNAME}" CACHE STRING "Hostname of test machine.")
if("${host}" STREQUAL "")
find_program(HOSTNAME_CMD NAMES hostname)
if(NOT WIN32)
set(HOSTNAME_ARG "-s")
endif()
if(HOSTNAME_CMD)
execute_process(COMMAND ${HOSTNAME_CMD} "${HOSTNAME_ARG}" OUTPUT_VARIABLE HOSTNAME OUTPUT_STRIP_TRAILING_WHITESPACE)
set(NC_CTEST_SITE "${HOSTNAME}" CACHE STRING "Hostname of test machine.")
endif()
else()
set(NC_CTEST_SITE "${host}" CACHE STRING "Hostname of test machine.")
endif()
if(NC_CTEST_SITE)
@ -835,12 +886,6 @@ if(NETCDF_ENABLE_FSYNC)
set(USE_FSYNC ON)
endif()
# Temporary
OPTION (ENABLE_JNA "Enable jna bug fix code." OFF)
if(ENABLE_JNA)
set(JNA ON)
endif()
# Linux specific large file support flags.
# Modelled after check in CMakeLists.txt for hdf5.
option(NETCDF_ENABLE_LARGE_FILE_SUPPORT "Enable large file support." ON)
@ -958,7 +1003,7 @@ endif()
option(NETCDF_ENABLE_PARALLEL_TESTS "Enable Parallel IO Tests. Requires HDF5/NetCDF4 with parallel I/O Support." "${HDF5_PARALLEL}")
if(NETCDF_ENABLE_PARALLEL_TESTS AND USE_PARALLEL)
set(TEST_PARALLEL ON CACHE BOOL "")
if(USE_NETCDF4)
if(USE_HDF5)
set(TEST_PARALLEL4 ON CACHE BOOL "")
endif()
endif()
@ -983,7 +1028,6 @@ if(NOT BUILD_SHARED_LIBS)
endif()
option(NETCDF_ENABLE_NCZARR_FILTERS "Enable NCZarr filters" ${NETCDF_ENABLE_PLUGINS})
option(NETCDF_ENABLE_NCZARR_FILTERS_TESTING "Enable NCZarr filter testing." ${NETCDF_ENABLE_NCZARR_FILTERS})
# Constraints
if (NOT NETCDF_ENABLE_PLUGINS AND NETCDF_ENABLE_NCZARR_FILTERS)
@ -992,16 +1036,10 @@ if (NOT NETCDF_ENABLE_PLUGINS AND NETCDF_ENABLE_NCZARR_FILTERS)
endif()
IF (NOT NETCDF_ENABLE_NCZARR)
message(WARNING "NETCDF_ENABLE_NCZARR==NO => NETCDF_ENABLE_NCZARR_FILTERS==NO AND NETCDF_ENABLE_NCZARR_FILTER_TESTING==NO")
message(WARNING "NETCDF_ENABLE_NCZARR==NO => NETCDF_ENABLE_NCZARR_FILTERS==NO")
set(NETCDF_ENABLE_NCZARR_FILTERS OFF CACHE BOOL "Disable NCZARR_FILTERS" FORCE)
endif()
IF (NOT NETCDF_ENABLE_NCZARR_FILTERS)
set(NETCDF_ENABLE_NCZARR_FILTER_TESTING OFF CACHE BOOL "Enable NCZarr Filter Testing" FORCE)
endif()
set(ENABLE_CLIENTSIDE_FILTERS OFF)
# Determine whether or not to generate documentation.
option(NETCDF_ENABLE_DOXYGEN "Enable generation of doxygen-based documentation." OFF)
if(NETCDF_ENABLE_DOXYGEN)
@ -1248,6 +1286,7 @@ CHECK_FUNCTION_EXISTS(strlcat HAVE_STRLCAT)
CHECK_FUNCTION_EXISTS(strlcpy HAVE_STRLCPY)
CHECK_FUNCTION_EXISTS(strdup HAVE_STRDUP)
CHECK_FUNCTION_EXISTS(strndup HAVE_STRNDUP)
CHECK_FUNCTION_EXISTS(strlen HAVE_STRLEN)
CHECK_FUNCTION_EXISTS(strtoll HAVE_STRTOLL)
CHECK_FUNCTION_EXISTS(strcasecmp HAVE_STRCASECMP)
CHECK_FUNCTION_EXISTS(strtoull HAVE_STRTOULL)
@ -1391,7 +1430,6 @@ endif(USE_HDF5)
if(USE_HDF4)
add_subdirectory(libhdf4)
add_subdirectory(hdf4_test)
endif(USE_HDF4)
if(NETCDF_ENABLE_DAP2)
@ -1430,6 +1468,8 @@ if(NETCDF_ENABLE_NCZARR)
DESTINATION ${netCDF_BINARY_DIR}/nczarr_test/)
endif()
# Tests and files which depend on libnetcdf must be included
# *after* this line.
add_subdirectory(liblib)
if(NETCDF_ENABLE_PLUGINS)
@ -1446,10 +1486,13 @@ endif()
# Enable tests
if(NETCDF_ENABLE_TESTS)
if(ENABLE_V2_API)
if(NETCDF_ENABLE_V2_API)
add_subdirectory(nctest)
endif()
add_subdirectory(nc_test)
if(USE_HDF4)
add_subdirectory(hdf4_test)
endif()
if(USE_HDF5)
include_directories(h5_test)
add_subdirectory(nc_test4)
@ -1471,6 +1514,7 @@ if(NETCDF_ENABLE_TESTS)
add_subdirectory(unit_test)
endif(NETCDF_ENABLE_UNIT_TESTS)
if(NETCDF_ENABLE_NCZARR)
include_directories(nczarr_test)
add_subdirectory(nczarr_test)
endif()
endif()
@ -1494,12 +1538,16 @@ add_subdirectory(docs)
# in the libdir.
##
if(MSVC)
file(GLOB COPY_FILES ${CMAKE_PREFIX_PATH}/lib/*.lib)
foreach(CPP ${CMAKE_PREFIX_PATH})
file(GLOB COPY_FILES ${CPP}/lib/*.lib)
endforeach()
install(FILES ${COPY_FILES}
DESTINATION ${CMAKE_INSTALL_LIBDIR}
COMPONENT dependencies)
file(GLOB COPY_FILES ${CMAKE_PREFIX_PATH}/bin/*.dll)
foreach(CPP ${CMAKE_PREFIX_PATH})
file(GLOB COPY_FILES ${CPP}/bin/*.dll)
endforeach()
string(REGEX REPLACE "msv[.*].dll" "" COPY_FILES "${COPY_FILES}")
install(FILES ${COPY_FILES}
DESTINATION ${CMAKE_INSTALL_BINDIR}
@ -1520,6 +1568,9 @@ configure_file(
###
# Create pkgconfig files.
###
if(NOT DEFINED CMAKE_INSTALL_MANDIR)
set(CMAKE_INSTALL_MANDIR "${CMAKE_INSTALL_PREFIX}/share/man/")
endif()
if(NOT DEFINED CMAKE_INSTALL_LIBDIR)
set(CMAKE_INSTALL_LIBDIR lib)
@ -1604,8 +1655,6 @@ ENABLE_MAKEDIST(README.md COPYRIGHT RELEASE_NOTES.md INSTALL INSTALL.cmake test_
set(host_cpu "${cpu}")
set(host_vendor "${osname}")
set(host_os "${osrel}")
set(abs_top_builddir "${CMAKE_CURRENT_BINARY_DIR}")
set(abs_top_srcdir "${CMAKE_CURRENT_SOURCE_DIR}")
string(RANDOM LENGTH 3 ALPHABET "0123456789" PLATFORMUID)
math(EXPR PLATFORMUID "${PLATFORMUID} + 1" OUTPUT_FORMAT DECIMAL)
@ -1619,9 +1668,10 @@ set(LDFLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS_${CMAKE_BU
is_disabled(BUILD_SHARED_LIBS enable_static)
is_enabled(BUILD_SHARED_LIBS enable_shared)
is_enabled(ENABLE_V2_API HAS_NC2)
is_enabled(NETCDF_ENABLE_NETCDF_4 HAS_NC4)
is_enabled(NETCDF_ENABLE_V2_API HAS_NC2)
is_enabled(NETCDF_ENABLE_NETCDF4 HAS_NC4)
is_enabled(NETCDF_ENABLE_HDF4 HAS_HDF4)
is_enabled(USE_HDF4 HAS_HDF4)
is_enabled(USE_HDF5 HAS_HDF5)
is_enabled(OFF HAS_BENCHMARKS)
is_enabled(STATUS_PNETCDF HAS_PNETCDF)
@ -1633,7 +1683,6 @@ is_enabled(NETCDF_ENABLE_DAP4 HAS_DAP4)
is_enabled(NETCDF_ENABLE_BYTERANGE HAS_BYTERANGE)
is_enabled(NETCDF_ENABLE_DISKLESS HAS_DISKLESS)
is_enabled(USE_MMAP HAS_MMAP)
is_enabled(JNA HAS_JNA)
is_enabled(ENABLE_ZERO_LENGTH_COORD_BOUND RELAX_COORD_BOUND)
is_enabled(USE_CDF5 HAS_CDF5)
is_enabled(NETCDF_ENABLE_ERANGE_FILL HAS_ERANGE_FILL)
@ -1644,7 +1693,6 @@ is_enabled(NETCDF_ENABLE_S3_INTERNAL HAS_S3_INTERNAL)
is_enabled(HAS_HDF5_ROS3 HAS_HDF5_ROS3)
is_enabled(NETCDF_ENABLE_NCZARR HAS_NCZARR)
is_enabled(NETCDF_ENABLE_NCZARR_ZIP HAS_NCZARR_ZIP)
is_enabled(NETCDF_ENABLE_NCZARR_ZIP DO_NCZARR_ZIP_TESTS)
is_enabled(NETCDF_ENABLE_QUANTIZE HAS_QUANTIZE)
is_enabled(NETCDF_ENABLE_LOGGING HAS_LOGGING)
is_enabled(NETCDF_ENABLE_FILTER_TESTING DO_FILTER_TESTS)
@ -1744,6 +1792,8 @@ configure_file(
set(EXTRA_DIST ${EXTRA_DIST} ${CMAKE_CURRENT_SOURCE_DIR}/test_common.in)
set(TOPSRCDIR "${CMAKE_CURRENT_SOURCE_DIR}")
set(TOPBUILDDIR "${CMAKE_CURRENT_BINARY_DIR}")
set(abs_top_builddir "${CMAKE_CURRENT_BINARY_DIR}")
set(abs_top_srcdir "${CMAKE_CURRENT_SOURCE_DIR}")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/test_common.in ${CMAKE_CURRENT_BINARY_DIR}/test_common.sh @ONLY NEWLINE_STYLE LF)
####
@ -1758,10 +1808,9 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/s3gc.in ${CMAKE_CURRENT_BINARY_DIR}/s
#####
# Build and copy nc_test4/findplugin.sh to various places
#####
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nc_test4/findplugin.in ${CMAKE_CURRENT_BINARY_DIR}/nc_test4/findplugin.sh @ONLY NEWLINE_STYLE LF)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nc_test4/findplugin.in ${CMAKE_CURRENT_BINARY_DIR}/nczarr_test/findplugin.sh @ONLY NEWLINE_STYLE LF)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nc_test4/findplugin.in ${CMAKE_CURRENT_BINARY_DIR}/plugins/findplugin.sh @ONLY NEWLINE_STYLE LF)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nc_test4/findplugin.in ${CMAKE_CURRENT_BINARY_DIR}/examples/C/findplugin.sh @ONLY NEWLINE_STYLE LF)
foreach(CC nc_test4 nczarr_test v3_nczarr_test plugins examples/C)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/plugins/findplugin.in ${CMAKE_CURRENT_BINARY_DIR}/${CC}/findplugin.sh @ONLY NEWLINE_STYLE LF)
endforeach()
if(NETCDF_ENABLE_BENCHMARKS)
if(NETCDF_ENABLE_PARALLEL4)

View File

@ -103,9 +103,10 @@ libraries. (And, optionally, the szlib library). Versions required are
at least HDF5 1.8.9, zlib 1.2.5, and curl 7.18.0 or later.
(Optionally, if building with szlib, get szip 2.0 or later.)
HDF5 1.8.9 and zlib 1.2.7 packages are available from the <a
href="ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-4">netCDF-4 ftp
site</a>. If you wish to use the remote data client code, then you
These packages are available at:
https://resources.unidata.ucar.edu/netcdf/netcdf-4/
If you wish to use the remote data client code, then you
will also need libcurl, which can be obtained from the <a
href="http://curl.haxx.se/download.html">curl website</a>.
@ -314,7 +315,7 @@ $ make check install
If parallel I/O access to netCDF classic, 64-bit offset, CDF-5 files is
also needed, the PnetCDF library should also be installed.
(Note: the previously recommended <a
href=ftp://ftp.unidata.ucar.edu/pub/netcdf/contrib/pnetcdf.h>replacement
href="https://resources.unidata.ucar.edu/netcdf/contrib/pnetcdf.h">replacement
pnetcdf.h</a> should no longer be used.) Then configure netCDF with the
"--enable-pnetcdf" option.
@ -361,7 +362,7 @@ Note: --disable prefix indicates that the option is normally enabled.
<tr><td>--enable-netcdf-4<td>build with netcdf-4<td>HDF5 and zlib
<tr><td>--enable-netcdf4<td>synonym for enable-netcdf-4
<tr><td>--enable-hdf4<td>build netcdf-4 with HDF4 read capability<td>HDF4, HDF5 and zlib
<tr><td>--enable-hdf4-file-tests<td>test ability to read HDF4 files<td>selected HDF4 files from Unidata ftp site
<tr><td>--enable-hdf4-file-tests<td>test ability to read HDF4 files<td>selected HDF4 files from Unidata resources site
<tr><td>--enable-pnetcdf<td>build netcdf-4 with parallel I/O for classic, 64-bit offset, and CDF-5 files using PnetCDF
<tr><td>--enable-extra-example-tests<td>Run extra example tests<td>--enable-netcdf-4,GNU sed
<tr><td>--enable-parallel-tests <td>run extra parallel IO tests<td>--enable-netcdf-4, parallel IO support
@ -384,7 +385,7 @@ Note: --disable prefix indicates that the option is normally enabled.
The benchmarks are a
bunch of extra tests, which are timed. We use these
tests to check netCDF performance.
<td>sample data files from the Unidata ftp site
<td>sample data files from the Unidata resources site
<tr><td>--disable-extreme-numbers
<td>don't use extreme numbers during testing, such as MAX_INT - 1<td>
<tr><td>--enable-dll<td>build a win32 DLL<td>mingw compiler

View File

@ -110,7 +110,7 @@ endif
# Build Cloud Storage if desired.
if NETCDF_ENABLE_NCZARR
ZARR_TEST_DIR = nczarr_test
ZARR_TEST_DIRS = nczarr_test
ZARR = libnczarr
endif
@ -129,7 +129,7 @@ if BUILD_TESTSETS
TESTDIRS = $(H5_TEST_DIR)
TESTDIRS += $(UNIT_TEST) $(V2_TEST) nc_test $(NC_TEST4)
TESTDIRS += $(BENCHMARKS_DIR) $(HDF4_TEST_DIR) $(NCDAP2TESTDIR) $(NCDAP4TESTDIR)
TESTDIRS += ${ZARR_TEST_DIR}
TESTDIRS += ${ZARR_TEST_DIRS}
endif
# This is the list of subdirs for which Makefiles will be constructed

View File

@ -7,12 +7,16 @@ This file contains a high-level description of this package's evolution. Release
## 4.9.3 - TBD
* Cleanup the option code for NETCDF_ENABLE_SET_LOG_LEVEL\[_FUNC\] See [Github #2931](https://github.com/Unidata/netcdf-c/issues/2931) for more information.
* Fix duplicate definition when using aws-sdk-cpp. See [Github #2928](https://github.com/Unidata/netcdf-c/issues/2928) for more information.
* Cleanup various obsolete options and do some code refactoring. See [Github #2926](https://github.com/Unidata/netcdf-c/issues/2926) for more information.
* Convert the Zarr-related ENABLE_XXX options to NETCDF_ENABLE_XXX options (part of the cmake overhaul). See [Github #2923](https://github.com/Unidata/netcdf-c/issues/2923) for more information.
* Refactor macro `_FillValue` to `NC_FillValue` to avoid conflict with libc++ headers. See [Github #2858](https://github.com/Unidata/netcdf-c/issues/2858) for more information.
* Changed `cmake` build options to be prefaced with `NETCDF`, to bring things in to line with best practices. This will permit a number of overall quality of life improvements to netCDF, in terms of allowing it to be more easily integrated with upstream projects via `FetchContent()`, `subdirectory()`, etc. Currently, the naming convention in use thus far will still work, but will result in warning messages about deprecation, and instructions on how to update your workflow. See [Github #2895](https://github.com/Unidata/netcdf-c/pull/2895) for more information.
* Fix some problems in handling S3 urls with missing regions. See [Github #2819](https://github.com/Unidata/netcdf-c/pull/2819).
* Incorporate a more modern look and feel to user documentation generated by Doxygen. See [Doxygen Awesome CSS](https://github.com/jothepro/doxygen-awesome-css) and [Github #2864](https://github.com/Unidata/netcdf-c/pull/2864) for more information.
* Added infrastructure to allow for `CMAKE_UNITY_BUILD`, (thanks \@jschueller). See [Github #2839](https://github.com/Unidata/netcdf-c/pull/2839) for more information.
* [cmake] Move dependency management out of the root-level `CMakeLists.txt` into two different files in the `cmake/` folder, `dependencies.cmake` and `netcdf_functions_macros.cmake`. See [Github #2838](https://github.com/Unidata/netcdf-c/pull/2838/) for more information.
* Fix some problems in handling S3 urls with missing regions. See [Github #2819](https://github.com/Unidata/netcdf-c/pull/2819).
* Obviate a number of irrelevant warnings. See [Github #2781](https://github.com/Unidata/netcdf-c/pull/2781).
* Improve the speed and data quantity for DAP4 queries. See [Github #2765](https://github.com/Unidata/netcdf-c/pull/2765).
* Remove the use of execinfo to programmatically dump the stack; it never worked. See [Github #2789](https://github.com/Unidata/netcdf-c/pull/2789).

View File

@ -14,8 +14,8 @@ find_package(MakeDist)
################################
# HDF4
################################
if(NETCDF_ENABLE_HDF4)
set(USE_HDF4 ON )
if(USE_HDF4)
set(NETCDF_USE_HDF4 ON )
# Check for include files, libraries.
find_path(MFHDF_H_INCLUDE_DIR mfhdf.h)
@ -65,11 +65,17 @@ if(NETCDF_ENABLE_HDF4)
if(NOT JPEG_LIB)
message(FATAL_ERROR "HDF4 Support enabled but cannot find libjpeg")
endif()
set(HDF4_LIBRARIES ${JPEG_LIB} ${HDF4_LIBRARIES} )
set(HDF4_LIBRARIES ${JPEG_LIB} ${HDF4_LIBRARIES} CACHE STRING "")
message(STATUS "Found JPEG libraries: ${JPEG_LIB}")
target_link_libraries(netcdf
PRIVATE
${HDF4_LIBRARIES}
)
# Option to enable HDF4 file tests.
option(NETCDF_ENABLE_HDF4_FILE_TESTS "Run HDF4 file tests. This fetches sample HDF4 files from the Unidata ftp site to test with (requires curl)." ON)
#option(NETCDF_ENABLE_HDF4_FILE_TESTS "Run HDF4 file tests. This fetches sample HDF4 files from the Unidata resources site to test with (requires curl)." ON)
if(NETCDF_ENABLE_HDF4_FILE_TESTS)
find_program(PROG_CURL NAMES curl)
if(PROG_CURL)
@ -77,10 +83,11 @@ if(NETCDF_ENABLE_HDF4)
else()
message(STATUS "Unable to locate 'curl'. Disabling hdf4 file tests.")
set(USE_HDF4_FILE_TESTS OFF )
set(NETCDF_ENABLE_HDF4_FILE_TESTS OFF)
endif()
set(USE_HDF4_FILE_TESTS ${USE_HDF4_FILE_TESTS} )
endif()
endif()
endif(USE_HDF4)
################################
# HDF5

View File

@ -25,13 +25,13 @@ message(STATUS "Checking for Deprecated Options")
list(APPEND opts BUILD_UTILITIES ENABLE_BENCHMARKS ENABLE_BYTERANGE ENABLE_CDF5 ENABLE_CONVERSION_WARNINGS)
list(APPEND opts ENABLE_DAP ENABLE_DAP2 ENABLE_DAP4 ENABLE_DISKLESS ENABLE_DOXYGEN ENABLE_ERANGE_FILL)
list(APPEND opts ENABLE_EXAMPLES ENABLE_EXAMPLES_TESTS ENABLE_EXTREME_NUMBERS ENABLE_FFIO ENABLE_FILTER_BLOSC)
list(APPEND opts ENABLEFILTER_BZ2 ENABLE_FILTER_SZIP ENABLE_FILTER_TESTING ENABLE_FILTER_ZSTD ENABLE_FSYNC)
list(APPEND opts ENABLE_FILTER_BZ2 ENABLE_FILTER_SZIP ENABLE_FILTER_TESTING ENABLE_FILTER_ZSTD ENABLE_FSYNC)
list(APPEND opts ENABLE_HDF4 ENABLE_HDF5 ENABLE_LARGE_FILE_SUPPORT ENABLE_LARGE_FILE_TESTS ENABLE_LIBXML2)
list(APPEND opts ENABLE_LOGGING ENABLE_METADATA_PERF_TESTS ENABLE_MMAP ENABLE_NCZARR ENABLE_NCZARR_FILTERS)
list(APPEND opts ENABLE_NCZARR_S3 ENABLE_NCZARR_ZIP ENABLE_NETCDF_4 ENABLE_PARALLEL4 ENABLE_PARALLEL_TESTS)
list(APPEND opts ENABLE_PLUGINS ENABLE_PNETCDF ENABLE_QUANTIZE ENABLE_REMOTE_FUNCTIONALITY ENABLE_S3 ENABLE_S3_AWS)
list(APPEND opts ENABLE_S3_INTERNAL ENABLE_STDIO ENABLE_STRICT_NULL_BYTE_HEADER_PADDING ENABLE_TESTS ENABLE_UNIT_TESTS)
list(APPEND opts FIND_SHARED_LIBS LIB_NAME)
list(APPEND opts FIND_SHARED_LIBS LIB_NAME ENABLE_HDF4_FILE_TESTS)
foreach(opt ${opts})
#MESSAGE(STATUS "Option: ${opt}")

View File

@ -148,6 +148,7 @@ macro(build_bin_test F)
add_executable(${F} "${CMAKE_CURRENT_BINARY_DIR}/${F}.c" ${ARGN})
endif()
target_link_libraries(${F} netcdf ${ALL_TLL_LIBS})
if(MSVC)
set_target_properties(${F}
PROPERTIES
@ -176,6 +177,8 @@ endmacro()
# Binary tests which are used by a script looking for a specific name.
macro(build_bin_test_no_prefix F)
build_bin_test(${F})
if(WIN32)
#SET_PROPERTY(TEST ${F} PROPERTY FOLDER "tests/")
set_target_properties(${F} PROPERTIES

View File

@ -130,8 +130,11 @@ are set when opening a binary file on Windows. */
/* if true, enable CDF5 Support */
#cmakedefine NETCDF_ENABLE_CDF5 1
/* if true, enable client side filters */
#cmakedefine ENABLE_CLIENT_FILTERS 1
/* if true, enable filter testing */
#cmakedefine NETCDF_ENABLE_FILTER_TESTING 1
/* if true, enable filter testing */
#cmakedefine NETCDF_ENABLE_FILTER_TESTING 1
/* if true, enable strict null byte header padding. */
#cmakedefine USE_STRICT_NULL_BYTE_HEADER_PADDING 1
@ -167,7 +170,7 @@ are set when opening a binary file on Windows. */
#cmakedefine NETCDF_ENABLE_S3_INTERNAL 1
/* if true, enable S3 testing*/
#cmakedefine WITH_S3_TESTING "PUBLIC"
#cmakedefine WITH_S3_TESTING "${WITH_S3_TESTING}"
/* S3 Test Bucket */
#define S3TESTBUCKET "${S3TESTBUCKET}"
@ -199,6 +202,9 @@ are set when opening a binary file on Windows. */
/* Define to 1 if blosc library available. */
#cmakedefine HAVE_BLOSC 1
/* if true enable tests that access external servers */
#cmakedefine NETCDF_ENABLE_EXTERNAL_SERVER_TESTS 1
/* Define to 1 if you have hdf5_coll_metadata_ops */
#cmakedefine HDF5_HAS_COLL_METADATA_OPS 1
@ -397,6 +403,9 @@ are set when opening a binary file on Windows. */
/* Define to 1 if you have the `strlcpy' function. */
#cmakedefine HAVE_STRLCPY 1
/* Define to 1 if you have the `strlen' function. */
#cmakedefine HAVE_STRLEN 1
/* Define to 1 if you have the `strtoll' function. */
#cmakedefine HAVE_STRTOLL 1
@ -459,9 +468,6 @@ with zip */
/* if true, HDF5 is at least version 1.10.5 and supports UTF8 paths */
#cmakedefine HDF5_UTF8_PATHS 1
/* if true, include JNA bug fix */
#cmakedefine JNA 1
/* do large file tests */
#cmakedefine LARGE_FILE_TESTS 1
@ -469,7 +475,8 @@ with zip */
#cmakedefine LOGGING 1
/* If true, define nc_set_log_level. */
#cmakedefine ENABLE_SET_LOG_LEVEL 1
#cmakedefine NETCDF_ENABLE_LOGGING 1
#cmakedefine NETCDF_ENABLE_SET_LOG_LEVEL 1
/* min blocksize for posixio. */
#cmakedefine NCIO_MINBLOCKSIZE ${NCIO_MINBLOCKSIZE}
@ -487,7 +494,7 @@ with zip */
#cmakedefine NETCDF_ENABLE_DOXYGEN 1
#cmakedefine NETCDF_ENABLE_INTERNAL_DOCS 1
#cmakedefine VALGRIND_TESTS 1
#cmakedefine ENABLE_CDMREMOTE 1
#cmakedefine NETCDF_ENABLE_CDMREMOTE 1
#cmakedefine USE_HDF5 1
#cmakedefine ENABLE_FILEINFO 1
#cmakedefine TEST_PARALLEL ${TEST_PARALLEL}
@ -570,6 +577,9 @@ with zip */
/* The size of `size_t', as computed by sizeof. */
#cmakedefine SIZEOF_SIZE_T ${SIZEOF_SIZE_T}
/* The size of `ssize_t', as computed by sizeof. */
#cmakedefine SIZEOF_SSIZE_T ${SIZEOF_SSIZE_T}
/* The size of `uint', as computed by sizeof. */
#cmakedefine SIZEOF_UINT ${SIZEOF_UINT}
@ -635,10 +645,13 @@ with zip */
#cmakedefine USE_STDIO 1
/* if true, multi-filters enabled*/
#cmakedefine ENABLE_MULTIFILTERS 1
#cmakedefine NETCDF_ENABLE_MULTIFILTERS 1
/* if true, enable nczarr blosc support */
#cmakedefine ENABLE_BLOSC 1
#cmakedefine NETCDF_ENABLE_BLOSC 1
/* if true enable tests that access external servers */
#cmakedefine NETCDF_ENABLE_EXTERNAL_SERVER_TESTS 1
/* Version number of package */
#cmakedefine VERSION "${netCDF_VERSION}"

View File

@ -132,30 +132,48 @@ AC_DEFINE_UNQUOTED([WINVERBUILD], [$WINVERBUILD], [windows version build])
AC_MSG_NOTICE([checking supported formats])
# An explicit disable of netcdf-4 | netcdf4 is treated as if it was disable-hdf5
AC_MSG_CHECKING([whether we should build with netcdf4 (alias for HDF5)])
AC_ARG_ENABLE([netcdf4], [AS_HELP_STRING([--disable-netcdf4],
[(Deprecated) Synonym for --enable-hdf5)])])
test "x$enable_netcdf4" = xno || enable_netcdf4=yes
AC_MSG_RESULT([$enable_netcdf4 (Deprecated) Please use --disable-hdf5)])
AC_MSG_CHECKING([whether we should build with netcdf-4 (alias for HDF5)])
AC_ARG_ENABLE([netcdf-4], [AS_HELP_STRING([--disable-netcdf-4],
[(synonym for --disable-netcdf4)])])
test "x$enable_netcdf_4" = xno || enable_netcdf_4=yes
AC_MSG_RESULT([$enable_netcdf_4])
# Propagate the alias
if test "x$enable_netcdf_4" = xno ; then enable_netcdf4=no; fi
if test "x$enable_netcdf4" = xno ; then enable_netcdf_4=no; fi
# As a long term goal, and because it is now the case that --enable-nczarr
# => USE_NETCDF4, make the external options --enable-netcdf-4 and
# --enable-netcdf4 obsolete in favor of --enable-hdf5
# We will do the following for one more release cycle.
# 1. Make --enable-netcdf-4 be an alias for --enable-netcdf4.
# 2. Make --enable-netcdf4 an alias for --enable-hdf5.
# 3. Internally, convert most uses of USE_NETCDF_4 ad USE_NETCDF4 to USE_HDF5
# Does the user want to use HDF5?
# Collect the values of --enable-netcdf-4, --enable-netcdf4, and --enable-hdf5.
# Also determine which have been explicitly set on the command line.
AC_ARG_ENABLE([netcdf-4], [AS_HELP_STRING([--enable-netcdf-4],
[(Deprecated) Synonym for --enable-hdf5; default yes])])
AC_ARG_ENABLE([netcdf4], [AS_HELP_STRING([--enable-netcdf4],
[(Deprecated) Synonym for --enable-hdf5; default yes])])
AC_ARG_ENABLE([hdf5], [AS_HELP_STRING([--enable-hdf5],[default yes])])
# Complain about the use of --enable-netcdf-4/--enable-netcdf4
if test "x$enable_netcdf_4" != x ; then
AC_MSG_WARN([--enable-netcdf-4 is deprecated; please use --enable-hdf5])
fi
if test "x$enable_netcdf4" != x ; then
AC_MSG_WARN([--enable-netcdf4 is deprecated; please use --enable-hdf5])
fi
# --enable-netcdf-4 overrides --enable-netcdf4 if latter not defined
if test "x$enable_netcdf_4" != x && test "x$enable_netcdf4" == x ; then
enable_netcdf4="$enable_netcdf_4"
fi
# --enable-netcdf4 overrides --enable-hdf5 if latter not defined
if test "x$enable_netcdf4" != x && test "x$enable_hdf5" == x ; then
enable_hdf5="$enable_netcdf4"
fi
# Otherwise, use --enable-hdf5
AC_MSG_CHECKING([whether we should build with HDF5])
AC_ARG_ENABLE([hdf5], [AS_HELP_STRING([--disable-hdf5],
[do not build with HDF5])])
test "x$enable_hdf5" = xno || enable_hdf5=yes
if test "x$enable_netcdf4" = xno ; then enable_hdf5=no ; fi
# disable-netcdf4 is synonym for disable-hdf5
AC_MSG_RESULT([$enable_hdf5])
# Turn off enable_netcdf4 because it will be used
# as a shorthand for enable_hdf5|enable_hdf4|enable_nczarr
enable_netcdf4=no
# Check whether we want to enable CDF5 support.
AC_MSG_CHECKING([whether CDF5 support should be disabled])
AC_ARG_ENABLE([cdf5],
@ -217,12 +235,8 @@ AC_MSG_RESULT($enable_nczarr)
# HDF5 | HDF4 | NCZarr => netcdf-4
if test "x$enable_hdf5" = xyes || test "x$enable_hdf4" = xyes || test "x$enable_nczarr" = xyes ; then
enable_netcdf_4=yes
enable_netcdf4=yes
fi
AC_MSG_CHECKING([whether netcdf-4 should be forcibly enabled])
AC_MSG_RESULT([$enable_netcdf_4])
# Synonym
enable_netcdf4=${enable_netcdf_4}
AC_MSG_NOTICE([checking user options])
@ -280,8 +294,8 @@ AC_SUBST([DOXYGEN_SERVER_BASED_SEARCH], ["NO"])
AC_ARG_ENABLE([doxygen-pdf-output],
[AS_HELP_STRING([--enable-doxygen-pdf-output],
[Build netCDF library documentation in PDF format. Experimental.])])
AM_CONDITIONAL([NC_NETCDF_ENABLE_DOXYGEN_PDF_OUTPUT], [test "x$enable_doxygen_pdf_output" = xyes])
AC_SUBST([NC_NETCDF_ENABLE_DOXYGEN_PDF_OUTPUT], [$enable_doxygen_pdf_output])
AM_CONDITIONAL([NC_NETCDF_ENABLE_DOXYGEN_PDF_OUTPUT], [test "x$enable_doxygen_pdf_output" = xyes])
AC_ARG_ENABLE([dot],
[AS_HELP_STRING([--enable-dot],
@ -316,24 +330,6 @@ if test "x$enable_fsync" = xyes ; then
AC_DEFINE([USE_FSYNC], [1], [if true, include experimental fsync code])
fi
# Temporary until JNA bug is fixed (which is probably never).
# The problem being solved is this:
# > On Windows using the microsoft runtime, it is an error
# > for one library to free memory allocated by a different library.
# This is probably only an issue when using the netcdf-c library
# via JNA under Java.
AC_MSG_CHECKING([if jna bug workaround is enabled])
AC_ARG_ENABLE([jna],
[AS_HELP_STRING([--enable-jna],
[enable jna bug workaround])],
[],
[enable_jna=no])
test "x$enable_jna" = xno || enable_jna=yes
AC_MSG_RESULT($enable_jna)
if test "x$enable_jna" = xyes ; then
AC_DEFINE([JNA], [1], [if true, include jna bug workaround code])
fi
# Does the user want to turn off unit tests (useful for test coverage
# analysis).
AC_MSG_CHECKING([if unit tests should be enabled])
@ -354,9 +350,9 @@ AC_MSG_RESULT([$enable_dynamic_loading])
# Does the user want to turn on extra HDF4 file tests?
AC_MSG_CHECKING([whether to fetch some sample HDF4 files from Unidata ftp site to test HDF4 reading (requires wget)])
AC_MSG_CHECKING([whether to fetch some sample HDF4 files from Unidata resources site to test HDF4 reading (requires wget)])
AC_ARG_ENABLE([hdf4-file-tests], [AS_HELP_STRING([--enable-hdf4-file-tests],
[get some HDF4 files from Unidata ftp site and test that they can be read])])
[get some HDF4 files from Unidata resources site and test that they can be read])])
test "x$enable_hdf4" = xyes -a "x$enable_hdf4_file_tests" = xyes || enable_hdf4_file_tests=no
if test "x$enable_hdf4_file_tests" = xyes; then
AC_DEFINE([USE_HDF4_FILE_TESTS], 1, [If true, use use wget to fetch some sample HDF4 data, and then test against it.])
@ -454,17 +450,14 @@ AC_ARG_ENABLE([logging],
Ignored if netCDF-4 is not enabled.])])
test "x$enable_logging" = xyes || enable_logging=no
AC_MSG_RESULT([$enable_logging])
# Does the user want to turn off nc_set_log_level() function? (It will
# always be defined if --enable-logging is used.)
AC_MSG_CHECKING([whether nc_set_log_level() function is included (will do nothing unless enable-logging is also used)])
AC_ARG_ENABLE([set_log_level_func], [AS_HELP_STRING([--disable-set-log-level-func],
[disable the nc_set_log_level function])])
test "x$enable_set_log_level_func" = xno -a "x$enable_logging" = xno || enable_set_log_level_func=yes
if test "x$enable_set_log_level_func" = xyes -a "x$enable_netcdf_4" = xyes; then
AC_DEFINE([ENABLE_SET_LOG_LEVEL], 1, [If true, define nc_set_log_level.])
if test "x$enable_logging" = xyes; then
enable_set_log_level_func=yes
enable_set_log_level=yes
AC_DEFINE([NETCDF_ENABLE_SET_LOG_LEVEL], 1, [If true, enable nc_set_log_level function.])
else
enable_set_log_level_func=no
enable_set_log_level=no
fi
AC_MSG_RESULT($enable_set_log_level_func)
# CURLOPT_USERNAME is not defined until curl version 7.19.1
@ -633,6 +626,12 @@ if test "x$enable_dap" = "xno" ; then
fi
AC_MSG_RESULT($enable_dap_remote_tests)
# Provide a global control for remotetest.
if test "xREMOTETESTDOWN" = xyes ; then
AC_MSG_WARN("ENV(REMOTETESTDOWN) => netcdf_enable_dap_remote_tests == no")
enable_dap_remote_tests=no
fi
AC_MSG_CHECKING([whether use of external (non-unidata) servers should be enabled])
AC_ARG_ENABLE([external-server-tests],
[AS_HELP_STRING([--enable-external-server-tests (default off)],
@ -703,14 +702,6 @@ if test "x$enable_dap_remote_tests" = "xno" || test "x$enable_external_server_te
enable_dap_long_tests=no
fi
# Control zarr storage
if test "x$enable_nczarr" = xyes ; then
if test "x$enable_netcdf_4" = xno ; then
AC_MSG_WARN([netCDF-4 disabled, so you must not enable nczarr])
enable_nczarr=no
fi
fi
if test "x$enable_nczarr" = xyes; then
AC_DEFINE([NETCDF_ENABLE_NCZARR], [1], [if true, build NCZarr Client])
AC_SUBST(NETCDF_ENABLE_NCZARR)
@ -1096,7 +1087,7 @@ fi
AC_MSG_CHECKING([whether benchmarks should be run])
AC_ARG_ENABLE([benchmarks],
[AS_HELP_STRING([--enable-benchmarks],
[Run benchmarks. This will cause sample data files from the Unidata ftp
[Run benchmarks. This will cause sample data files from the Unidata resources
site to be fetched. The benchmarks are a bunch of extra tests, which
are timed. We use these tests to check netCDF performance.])])
test "x$enable_benchmarks" = xyes || enable_benchmarks=no
@ -1325,7 +1316,7 @@ AC_CHECK_TYPES([struct timespec])
# disable dap4 if hdf5 is disabled
if test "x$enable_hdf5" = "xno" ; then
AC_MSG_WARN([netcdf-4 not enabled; disabling DAP4])
AC_MSG_WARN([hdf5 not enabled; disabling DAP4])
enable_dap4=no
fi
@ -1585,7 +1576,7 @@ fi
AC_CHECK_LIB([m], [floor], [],
[AC_MSG_ERROR([Can't find or link to the math library.])])
if test "x$enable_netcdf_4" = xyes; then
if test "x$enable_netcdf4" = xyes; then
AC_DEFINE([USE_NETCDF4], [1], [if true, build netCDF-4])
fi
@ -1765,7 +1756,7 @@ if test "x$enable_pnetcdf" = xyes -o "x$enable_parallel4" = xyes; then
else
enable_parallel=no
fi
AM_CONDITIONAL(ENABLE_PARALLEL, [test x$enable_parallel = xyes ])
AM_CONDITIONAL(NETCDF_ENABLE_PARALLEL, [test x$enable_parallel = xyes ])
if test "x$hdf5_parallel" = xyes; then
# Provide more precise parallel control
@ -1841,7 +1832,7 @@ fi
AC_SUBST(M4FLAGS)
# No logging for netcdf-3.
if test "x$enable_netcdf_4" = xno; then
if test "x$enable_netcdf4" = xno; then
enable_logging=no
fi
if test "x$enable_logging" = xyes; then
@ -1875,25 +1866,15 @@ AC_MSG_WARN([--disable-plugins => --disable-filter-testing])
enable_filter_testing=no
fi
if test "x$enable_filter_testing" = xno; then
AC_MSG_WARN([--disable-filter-testing => --disable-nczarr-filter-testing])
enable_nczarr_filter_testing=no
fi
if test "x$enable_nczarr" = xno; then
AC_MSG_WARN([--disable-nczarr => --disable-nczarr-filters])
enable_nczarr_filters=no
enable_nczarr_filter_testing=no
fi
if test "x$enable_nczarr_filters" = xyes; then
AC_DEFINE([NETCDF_ENABLE_NCZARR_FILTERS], [1], [if true, enable NCZarr filters])
fi
# Client side filter registration is permanently disabled
enable_clientside_filters=no
AM_CONDITIONAL(ENABLE_CLIENTSIDE_FILTERS, [test x$enable_clientside_filters = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_FILTER_TESTING, [test x$enable_filter_testing = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_NCZARR_FILTERS, [test x$enable_nczarr_filters = xyes])
@ -1915,20 +1896,21 @@ AM_CONDITIONAL(NETCDF_ENABLE_DAP_LONG_TESTS, [test "x$enable_dap_long_tests" = x
AM_CONDITIONAL(USE_PNETCDF_DIR, [test ! "x$PNETCDFDIR" = x])
AM_CONDITIONAL(USE_LOGGING, [test "x$enable_logging" = xyes])
AM_CONDITIONAL(CROSS_COMPILING, [test "x$cross_compiling" = xyes])
AM_CONDITIONAL(USE_NETCDF4, [test x$enable_netcdf_4 = xyes])
AM_CONDITIONAL(USE_NETCDF4, [test x$enable_netcdf4 = xyes])
AM_CONDITIONAL(USE_HDF5, [test x$enable_hdf5 = xyes])
AM_CONDITIONAL(USE_HDF4, [test x$enable_hdf4 = xyes])
AM_CONDITIONAL(USE_HDF4_FILE_TESTS, [test x$enable_hdf4_file_tests = xyes])
AM_CONDITIONAL(USE_RENAMEV3, [test x$enable_netcdf_4 = xyes -o x$enable_dap = xyes])
AM_CONDITIONAL(USE_RENAMEV3, [test x$enable_netcdf4 = xyes -o x$enable_dap = xyes])
AM_CONDITIONAL(USE_PNETCDF, [test x$enable_pnetcdf = xyes])
AM_CONDITIONAL(USE_DISPATCH, [test x$enable_dispatch = xyes])
AM_CONDITIONAL(BUILD_MMAP, [test x$enable_mmap = xyes])
AM_CONDITIONAL(BUILD_DOCS, [test x$enable_doxygen = xyes])
AM_CONDITIONAL(SHOW_DOXYGEN_TAG_LIST, [test x$enable_doxygen_tasks = xyes])
AM_CONDITIONAL(ENABLE_METADATA_PERF, [test x$enable_metadata_perf = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_METADATA_PERF, [test x$enable_metadata_perf = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_BYTERANGE, [test "x$enable_byterange" = xyes])
AM_CONDITIONAL(RELAX_COORD_BOUND, [test "xyes" = xyes])
AM_CONDITIONAL(HAS_PAR_FILTERS, [test x$hdf5_supports_par_filters = xyes ])
AM_CONDITIONAL(NETCDF_ENABLE_NCZARR_FILTERS, [test x$enable_nczarr_filters = xyes])
# We need to simplify the set of S3 and Zarr flag combinations
AM_CONDITIONAL(NETCDF_ENABLE_S3, [test "x$enable_s3" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_S3_AWS, [test "x$enable_s3_aws" = xyes])
@ -1946,7 +1928,7 @@ AM_CONDITIONAL(HAVE_ZSTD, [test "x$have_zstd" = xyes])
# If the machine doesn't have a long long, and we want netCDF-4, then
# we've got problems!
if test "x$enable_netcdf_4" = xyes; then
if test "x$enable_netcdf4" = xyes; then
AC_TYPE_LONG_LONG_INT
AC_TYPE_UNSIGNED_LONG_LONG_INT
dnl if test ! "x$ac_cv_type_long_long_int" = xyes -o ! "x$ac_cv_type_unsigned_long_long_int" = xyes; then
@ -1963,7 +1945,7 @@ fi
# meteor strike.
AC_MSG_CHECKING([what to call the output of the ftpbin target])
BINFILE_NAME=binary-netcdf-$PACKAGE_VERSION
test "x$enable_netcdf_4" = xno && BINFILE_NAME=${BINFILE_NAME}_nc3
test "x$enable_netcdf4" = xno && BINFILE_NAME=${BINFILE_NAME}_nc3
BINFILE_NAME=${BINFILE_NAME}.tar
AC_SUBST(BINFILE_NAME)
AC_MSG_RESULT([$BINFILE_NAME $FC $CXX])
@ -2024,7 +2006,6 @@ AC_SUBST(HAS_DAP,[$enable_dap])
AC_SUBST(HAS_DAP2,[$enable_dap])
AC_SUBST(HAS_DAP4,[$enable_dap4])
AC_SUBST(HAS_NC2,[$nc_build_v2])
AC_SUBST(HAS_NC4,[$enable_netcdf_4])
AC_SUBST(HAS_CDF5,[$enable_cdf5])
AC_SUBST(HAS_HDF4,[$enable_hdf4])
AC_SUBST(HAS_BENCHMARKS,[$enable_benchmarks])
@ -2035,7 +2016,6 @@ AC_SUBST(HAS_PARALLEL,[$enable_parallel])
AC_SUBST(HAS_PARALLEL4,[$enable_parallel4])
AC_SUBST(HAS_DISKLESS,[yes])
AC_SUBST(HAS_MMAP,[$enable_mmap])
AC_SUBST(HAS_JNA,[$enable_jna])
AC_SUBST(HAS_ERANGE_FILL,[$enable_erange_fill])
AC_SUBST(HAS_BYTERANGE,[$enable_byterange])
AC_SUBST(RELAX_COORD_BOUND,[yes])
@ -2047,7 +2027,6 @@ AC_SUBST(HAS_HDF5_ROS3,[$has_hdf5_ros3])
AC_SUBST(HAS_NCZARR,[$enable_nczarr])
AC_SUBST(NETCDF_ENABLE_S3_TESTING,[$with_s3_testing])
AC_SUBST(HAS_NCZARR_ZIP,[$enable_nczarr_zip])
AC_SUBST(DO_NCZARR_ZIP_TESTS,[$enable_nczarr_zip])
AC_SUBST(HAS_QUANTIZE,[$enable_quantize])
AC_SUBST(HAS_LOGGING,[$enable_logging])
AC_SUBST(DO_FILTER_TESTS,[$enable_filter_testing])
@ -2190,7 +2169,6 @@ AC_DEFUN([AX_SET_META],[
#####
AC_SUBST([NC_VERSION]) NC_VERSION=$VERSION
AX_SET_META([NC_HAS_NC2],[$nc_build_v2],[yes])
AX_SET_META([NC_HAS_NC4],[$enable_netcdf_4],[yes])
AX_SET_META([NC_HAS_HDF4],[$enable_hdf4],[yes])
AX_SET_META([NC_HAS_BENCHMARKS],[$enable_benchmarks],[yes])
AX_SET_META([NC_HAS_HDF5],[$enable_hdf5],[yes])
@ -2198,7 +2176,6 @@ AX_SET_META([NC_HAS_DAP2],[$enable_dap],[yes])
AX_SET_META([NC_HAS_DAP4],[$enable_dap4],[yes])
AX_SET_META([NC_HAS_DISKLESS],[yes],[yes])
AX_SET_META([NC_HAS_MMAP],[$enable_mmap],[yes])
AX_SET_META([NC_HAS_JNA],[$enable_jna],[yes])
AX_SET_META([NC_HAS_PNETCDF],[$enable_pnetcdf],[yes])
AX_SET_META([NC_HAS_PARALLEL],[$enable_parallel],[yes])
AX_SET_META([NC_HAS_PARALLEL4],[$enable_parallel4],[yes])
@ -2243,10 +2220,9 @@ AC_MSG_NOTICE([generating header files and makefiles])
AC_CONFIG_FILES(test_common.sh:test_common.in)
AC_CONFIG_FILES(s3cleanup.sh:s3cleanup.in, [chmod ugo+x s3cleanup.sh])
AC_CONFIG_FILES(s3gc.sh:s3gc.in, [chmod ugo+x s3gc.sh])
AC_CONFIG_FILES(nc_test4/findplugin.sh:nc_test4/findplugin.in, [chmod ugo+x nc_test4/findplugin.sh])
AC_CONFIG_FILES(nczarr_test/findplugin.sh:nc_test4/findplugin.in, [chmod ugo+x nczarr_test/findplugin.sh])
AC_CONFIG_FILES(plugins/findplugin.sh:nc_test4/findplugin.in, [chmod ugo+x plugins/findplugin.sh])
AC_CONFIG_FILES(examples/C/findplugin.sh:nc_test4/findplugin.in, [chmod ugo+x examples/C/findplugin.sh])
for FP in plugins nc_test4 nczarr_test examples/C ; do
AC_CONFIG_FILES(${FP}/findplugin.sh:plugins/findplugin.in, [chmod ugo+x ${FP}/findplugin.sh])
done
AC_CONFIG_FILES(ncdap_test/findtestserver.c:ncdap_test/findtestserver.c.in, [chmod ugo+x ncdap_test/findtestserver.c])
AC_CONFIG_FILES([nc_test/run_pnetcdf_tests.sh:nc_test/run_pnetcdf_tests.sh.in],[chmod ugo+x nc_test/run_pnetcdf_tests.sh])
AC_CONFIG_FILES(dap4_test/findtestserver4.c:ncdap_test/findtestserver.c.in)

View File

@ -93,8 +93,8 @@ clean-local: clean-local-check
.PHONY: clean-local-check
clean-local-check:
-rm -rf results results_*
-rm -f .dodsrc .daprc .ncrc .netrc
rm -rf results results_*
rm -f .dodsrc .daprc .ncrc .netrc
# The shell file maketests.sh is used to build the testdata
# for dap4 testing. It creates and fills the directories

View File

@ -8,7 +8,7 @@ types:
v1_f1_t f1(2) ;
}; // v1_t
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
v1_t v1 ;
data:

View File

@ -8,7 +8,7 @@ types:
v1_f1_t f1(2) ;
}; // v1_t
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
v1_t v1 ;
}

View File

@ -5,12 +5,12 @@
xmlns="http://xml.opendap.org/ns/DAP/4.0#"
xmlns:dap="http://xml.opendap.org/ns/DAP/4.0#">
<Dimensions>
<Dimension name="_AnonymousDim2" size="2"/>
<Dimension name="_Anonymous_Dim_2" size="2"/>
</Dimensions>
<Types>
<Structure name="v1">
<Seq name="f1" type="/v1_f1_t">
<Dim name="/_AnonymousDim2"/>
<Dim name="/_Anonymous_Dim_2"/>
</Sequence>
</Structure>
<Structure name="v1_f1_base">

View File

@ -1,9 +1,9 @@
netcdf amsre_20060131v5 {
dimensions:
_AnonymousDim3 = 3 ;
_AnonymousDim6 = 6 ;
_Anonymous_Dim_3 = 3 ;
_Anonymous_Dim_6 = 6 ;
variables:
byte time_a(_AnonymousDim3, _AnonymousDim6) ;
byte time_a(_Anonymous_Dim_3, _Anonymous_Dim_6) ;
string time_a:Equator_Crossing_Time = "1:30 PM" ;
data:

View File

@ -8,7 +8,7 @@ types:
v1_f1_t f1(2) ;
}; // v1_t
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
v1_t v1 ;
data:

View File

@ -2,13 +2,13 @@ netcdf test_atomic_array {
types:
opaque(16) opaque16_t ;
dimensions:
_AnonymousDim1 = 1 ;
_AnonymousDim2 = 2 ;
_Anonymous_Dim_1 = 1 ;
_Anonymous_Dim_2 = 2 ;
variables:
ubyte vu8(_AnonymousDim1, _AnonymousDim2) ;
double vd(_AnonymousDim1) ;
string vs(_AnonymousDim1, _AnonymousDim1) ;
opaque16_t vo(_AnonymousDim1, _AnonymousDim1) ;
ubyte vu8(_Anonymous_Dim_1, _Anonymous_Dim_2) ;
double vd(_Anonymous_Dim_1) ;
string vs(_Anonymous_Dim_1, _Anonymous_Dim_1) ;
opaque16_t vo(_Anonymous_Dim_1, _Anonymous_Dim_1) ;
// global attributes:
string :_dap4.ce = "/vu8[1][0:2:2];/vd[1];/vs[1][0];/vo[0][1]" ;

View File

@ -1,8 +1,8 @@
netcdf test_atomic_array {
dimensions:
_AnonymousDim3 = 3 ;
_Anonymous_Dim_3 = 3 ;
variables:
short v16(_AnonymousDim3) ;
short v16(_Anonymous_Dim_3) ;
// global attributes:
string :_dap4.ce = "/v16[0:1,3]" ;

View File

@ -1,8 +1,8 @@
netcdf test_atomic_array {
dimensions:
_AnonymousDim3 = 3 ;
_Anonymous_Dim_3 = 3 ;
variables:
short v16(_AnonymousDim3) ;
short v16(_Anonymous_Dim_3) ;
// global attributes:
string :_dap4.ce = "/v16[3,0:1]" ;

View File

@ -5,9 +5,9 @@ types:
Altocumulus = 7, Cirrostratus = 8, Cirrocumulus = 9, Cirrus = 10,
Missing = 127} ;
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
cloud_class_t primary_cloud(_AnonymousDim2) ;
cloud_class_t primary_cloud(_Anonymous_Dim_2) ;
cloud_class_t primary_cloud:_FillValue = Missing ;
// global attributes:

View File

@ -1,8 +1,8 @@
netcdf test_one_vararray {
dimensions:
_AnonymousDim1 = 1 ;
_Anonymous_Dim_1 = 1 ;
variables:
int t(_AnonymousDim1) ;
int t(_Anonymous_Dim_1) ;
// global attributes:
string :_dap4.ce = "/t[1]" ;

View File

@ -1,8 +1,8 @@
netcdf test_one_vararray {
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
int t(_AnonymousDim2) ;
int t(_Anonymous_Dim_2) ;
// global attributes:
string :_dap4.ce = "/t[0:1]" ;

View File

@ -2,10 +2,10 @@ netcdf test_opaque_array {
types:
opaque(16) opaque16_t ;
dimensions:
_AnonymousDim1 = 1 ;
_AnonymousDim2 = 2 ;
_Anonymous_Dim_1 = 1 ;
_Anonymous_Dim_2 = 2 ;
variables:
opaque16_t vo2(_AnonymousDim1, _AnonymousDim2) ;
opaque16_t vo2(_Anonymous_Dim_1, _Anonymous_Dim_2) ;
// global attributes:
string :_dap4.ce = "/vo2[1][0:1]" ;

View File

@ -5,9 +5,9 @@ types:
int y ;
}; // s_t
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
s_t s(_AnonymousDim2, _AnonymousDim2) ;
s_t s(_Anonymous_Dim_2, _Anonymous_Dim_2) ;
// global attributes:
string :_dap4.ce = "/s[0:2:3][0:1]" ;

View File

@ -8,7 +8,7 @@ types:
v1_f1_t f1(2) ;
}; // v1_t
dimensions:
_AnonymousDim2 = 2 ;
_Anonymous_Dim_2 = 2 ;
variables:
v1_t v1 ;
data:

View File

@ -1,9 +1,9 @@
netcdf \2004050300_eta_211 {
dimensions:
record = UNLIMITED ; // (1 currently)
_AnonymousDim1 = 1 ;
_AnonymousDim4 = 4 ;
_AnonymousDim15 = 15 ;
_Anonymous_Dim_1 = 1 ;
_Anonymous_Dim_4 = 4 ;
_Anonymous_Dim_15 = 15 ;
variables:
double reftime(record) ;
string reftime:units = "hours since 1992-1-1" ;
@ -11,7 +11,7 @@ variables:
double valtime(record) ;
string valtime:units = "hours since 1992-1-1" ;
string valtime:long_name = "valid time" ;
float Z_sfc(_AnonymousDim1, _AnonymousDim4, _AnonymousDim15) ;
float Z_sfc(_Anonymous_Dim_1, _Anonymous_Dim_4, _Anonymous_Dim_15) ;
string Z_sfc:navigation = "nav" ;
float Z_sfc:_FillValue = -9999.006f ;
string Z_sfc:units = "gp m" ;

View File

@ -115,7 +115,7 @@ It is strongly recommended that applicable conventions be followed unless there
`Conventions`
> If present, 'Conventions' is a global attribute that is a character array for the name of the conventions followed by the dataset. Originally, these conventions were named by a string that was interpreted as a directory name relative to the directory /pub/netcdf/Conventions/ on the now defunct host ftp.unidata.ucar.edu. The web page https://www.unidata.ucar.edu/netcdf/conventions.html is now the preferred and authoritative location for registering a URI reference to a set of conventions maintained elsewhere. Authors of new conventions should submit a request to support-netcdf@unidata.ucar.edu for listing on the Unidata conventions web page.
> If present, 'Conventions' is a global attribute that is a character array for the name of the conventions followed by the dataset. Originally, these conventions were named by a string that was interpreted as a directory name relative to the directory /pub/netcdf/Conventions/ on the now defunct ftp host. The web page https://www.unidata.ucar.edu/netcdf/conventions.html is now the preferred and authoritative location for registering a URI reference to a set of conventions maintained elsewhere. Authors of new conventions should submit a request to support-netcdf@unidata.ucar.edu for listing on the Unidata conventions web page.
<p>

View File

@ -28,7 +28,7 @@
recently made some use of netCDF, based on
<ol>
<li>
downloads from the Unidata site (ftp and http)
downloads from the Unidata downloads site
</li>
<li>
subscribers and posters to netCDF mailing lists

View File

@ -6,18 +6,20 @@
# See netcdf-c/COPYRIGHT file for more info.
# Copy some test files from current source dir to out-of-tree build dir.
FILE(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.sh ${CMAKE_CURRENT_SOURCE_DIR}/*.hdf4)
FILE(COPY ${COPY_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
IF(WIN32)
FILE(COPY ${COPY_FILES} DESTINATION ${RUNTIME_OUTPUT_DIRECTORY}/)
ENDIF()
file(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.sh ${CMAKE_CURRENT_SOURCE_DIR}/*.hdf4)
file(COPY ${COPY_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
if(WIN32)
file(COPY ${COPY_FILES} DESTINATION ${RUNTIME_OUTPUT_DIRECTORY}/)
endif()
IF(USE_HDF4_FILE_TESTS AND NOT WIN32)
if(USE_HDF4_FILE_TESTS AND NOT WIN32)
build_bin_test_no_prefix(tst_interops2)
target_link_libraries(tst_interops2 netcdf ${ALL_TLL_LIBS})
build_bin_test_no_prefix(tst_interops3)
add_bin_test(hdf4_test tst_chunk_hdf4)
add_bin_test(hdf4_test tst_h4_lendian)
add_bin_test(hdf4_test tst_hdf4_extra)
add_sh_test(hdf4_test run_get_hdf4_files)
add_sh_test(hdf4_test run_formatx_hdf4)
ENDIF()
endif()

View File

@ -14,7 +14,7 @@ if test "x$srcdir" = x ; then srcdir=`pwd`; fi
# Get a file from the resources site; retry several times
getfile() {
DATAFILE="https://resources.unidata.ucar.edu/sample_data/hdf4/$1.gz"
DATAFILE="https://resources.unidata.ucar.edu/netcdf/sample_data/hdf4/$1.gz"
for try in 1 2 3 4 ; do # try 4 times
@ -30,7 +30,7 @@ getfile() {
set -e
echo ""
echo "Getting HDF4 sample files from Unidata FTP site..."
echo "Getting HDF4 sample files from Unidata resources site..."
file_list="AMSR_E_L2_Rain_V10_200905312326_A.hdf AMSR_E_L3_DailyLand_V06_20020619.hdf \
MYD29.A2009152.0000.005.2009153124331.hdf MYD29.A2002185.0000.005.2007160150627.hdf \

View File

@ -7,7 +7,10 @@
# Ed Hartnett, Dennis Heimbigner, Ward Fisher
include_HEADERS = netcdf.h netcdf_meta.h netcdf_mem.h netcdf_aux.h \
netcdf_filter.h netcdf_filter_build.h netcdf_filter_hdf5_build.h netcdf_dispatch.h netcdf_json.h
netcdf_filter.h netcdf_filter_build.h netcdf_filter_hdf5_build.h netcdf_dispatch.h
# Built source
include_HEADERS += netcdf_json.h
if BUILD_PARALLEL
include_HEADERS += netcdf_par.h
@ -32,6 +35,8 @@ endif
EXTRA_DIST = CMakeLists.txt XGetopt.h netcdf_meta.h.in netcdf_dispatch.h.in
BUILT_SOURCES = netcdf_json.h
# netcdf_json.h is constructed as a header-only file for use by
# nczarr code wrappers in the plugin directory. It is
# constructed by joining libdispatch/ncjson.c with
@ -40,7 +45,9 @@ EXTRA_DIST = CMakeLists.txt XGetopt.h netcdf_meta.h.in netcdf_dispatch.h.in
# static inside netcdf_json.h. This is an ugly hack to avoid
# having to reference libnetcdf in the nczarr code wrappers.
# Give the recipe for rebuilding netcdf_json.h
makepluginjson::
sed -e 's/NCJSON_H/NETCDF_JSON_H/' -e '/ncjson.h/d' <${srcdir}/ncjson.h > netcdf_json.h
# Give the recipe for building netcdf_json.h
netcdf_json.h: ${top_srcdir}/libdispatch/ncjson.c ${top_srcdir}/include/ncjson.h
sed -e 's/NCJSON_H/NETCDF_JSON_H/' -e '/ncjson.h/d' -e '/#endif[^!]*!NETCDF_JSON_H!/d' <${srcdir}/ncjson.h > netcdf_json.h
echo '#ifdef NETCDF_JSON_H' >> netcdf_json.h
sed -e '/ncjson.h/d' < ${srcdir}/../libdispatch/ncjson.c >> netcdf_json.h
echo '#endif /*NETCDF_JSON_H*/' >> netcdf_json.h

View File

@ -220,4 +220,9 @@ EXTERNL hid_t nc4_H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id,
int hdf5set_format_compatibility(hid_t fapl_id);
/* HDF5 initialization/finalization */
extern int nc4_hdf5_initialized;
extern void nc4_hdf5_initialize(void);
extern void nc4_hdf5_finalize(void);
#endif /* _HDF5INTERNAL_ */

View File

@ -137,4 +137,12 @@ EXTERNL int NC_copy_data_all(NC* nc, nc_type xtypeid, const void* memory, size_t
#define USED2INFO(nc) ((1<<(nc->dispatch->model)) & (1<<NC_FORMATX_DAP2))
#define USED4INFO(nc) ((1<<(nc->dispatch->model)) & (1<<NC_FORMATX_DAP4))
/* In DAP4 and Zarr (and maybe other places in the future)
we may have dimensions with a size, but no name.
In this case we need to create a name based on the size.
As a rule, the dimension name is NCDIMANON_<n> where n is the size
and NCDIMANON is a prefix defined here.
*/
#define NCDIMANON "_Anonymous_Dim"
#endif /* _NC_H_ */

View File

@ -15,7 +15,6 @@
#else
# include <stdint.h>
#endif /* HAVE_STDINT_H */
#include <sys/types.h> /* off_t */
#include "netcdf.h"
#ifdef USE_PARALLEL
#include "netcdf_par.h"

View File

@ -13,7 +13,6 @@
#include "netcdf.h"
#include "config.h"
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <string.h>
@ -55,9 +54,6 @@ typedef enum {NCNAT, NCVAR, NCDIM, NCATT, NCTYP, NCFLD, NCGRP, NCFIL} NC_SORT;
/** One mega-byte. */
#define MEGABYTE 1048576
/** The HDF5 ID for the szip filter. */
#define HDF5_FILTER_SZIP 4
#define X_SCHAR_MIN (-128) /**< Minimum signed char value. */
#define X_SCHAR_MAX 127 /**< Maximum signed char value. */
#define X_UCHAR_MAX 255U /**< Maximum unsigned char value. */
@ -84,9 +80,6 @@ typedef enum {NCNAT, NCVAR, NCDIM, NCATT, NCTYP, NCFLD, NCGRP, NCFIL} NC_SORT;
#define X_DOUBLE_MAX 1.7976931348623157e+308 /**< Maximum double value. */
#define X_DOUBLE_MIN (-X_DOUBLE_MAX) /**< Minimum double value. */
/** This is the number of netCDF atomic types. */
#define NUM_ATOMIC_TYPES (NC_MAX_ATOMIC_TYPE + 1)
/** Number of parameters needed for ZLIB filter. */
#define CD_NELEMS_ZLIB 1
@ -97,17 +90,16 @@ typedef enum {NCNAT, NCVAR, NCDIM, NCATT, NCTYP, NCFLD, NCGRP, NCFIL} NC_SORT;
#define NC4_DATA_SET(nc,data) ((nc)->dispatchdata = (void *)(data))
/* Reserved attribute flags: must be powers of 2. */
/** Hidden attributes; immutable and unreadable thru API. */
#define HIDDENATTRFLAG 1
/** Readonly attributes; readable, but immutable thru the API. */
#define READONLYFLAG 2
/** Subset of readonly flags; readable by name only thru the API. */
#define NAMEONLYFLAG 4
/** Per-variable attribute, as opposed to global */
#define VARFLAG 16
/** Hidden attributes; immutable and unreadable thru API. */
# define HIDDENATTRFLAG 1
/** Readonly attributes; readable, but immutable thru the API. */
# define READONLYFLAG 2
/** Subset of readonly flags; readable by name only thru the API. */
# define NAMEONLYFLAG 4
/** Mark reserved attributes that are constructed on the fly when accessed */
# define VIRTUALFLAG 8
/** Per-variable attribute, as opposed to global */
# define VARFLAG 16
/** Boolean type, to make the code easier to read. */
typedef enum {NC_FALSE = 0, NC_TRUE = 1} nc_bool_t;
@ -238,7 +230,7 @@ typedef struct NC_TYPE_INFO
size_t size; /**< Size of the type in memory, in bytes */
nc_bool_t committed; /**< True when datatype is committed in the file */
nc_type nc_type_class; /**< NC_VLEN, NC_COMPOUND, NC_OPAQUE, NC_ENUM, NC_INT, NC_FLOAT, or NC_STRING. */
void *format_type_info; /**< HDF5-specific type info. */
void *format_type_info; /**< dispatcher-specific type info. */
int varsized; /**< <! 1 if this type is (recursively) variable sized; 0 if fixed size */
/** Information for each type or class */
@ -274,7 +266,7 @@ typedef struct NC_GRP_INFO
} NC_GRP_INFO_T;
/* These constants apply to the flags field in the
* HDF5_FILE_INFO_T defined below. */
* NC_FILE_INFO_T defined below. */
#define NC_INDEF 0x01 /**< in define mode, cleared by ncendef */
/** This is the metadata we need to keep track of for each
@ -322,31 +314,6 @@ typedef struct NC_FILE_INFO
} mem;
} NC_FILE_INFO_T;
/* Collect global state info in one place */
typedef struct NCglobalstate {
int initialized;
char* tempdir; /* track a usable temp dir */
char* home; /* track $HOME */
char* cwd; /* track getcwd */
struct NCRCinfo* rcinfo; /* Currently only one rc file per session */
struct GlobalZarr { /* Zarr specific parameters */
char dimension_separator;
} zarr;
struct GlobalAWS { /* AWS S3 specific parameters/defaults */
char* default_region;
char* config_file;
char* profile;
char* access_key_id;
char* secret_access_key;
} aws;
struct Alignment { /* H5Pset_alignment parameters */
int defined; /* 1 => threshold and alignment explicitly set */
int threshold;
int alignment;
} alignment;
struct ChunkCache chunkcache;
} NCglobalstate;
/** Variable Length Datatype struct in memory. Must be identical to
* HDF5 hvl_t. (This is only used for VL sequences, not VL strings,
* which are stored in char *'s) */
@ -356,18 +323,14 @@ typedef struct
void *p; /**< Pointer to VL data */
} nc_hvl_t;
/* Misc functions */
extern int NC4_inq_atomic_type(nc_type typeid1, char *name, size_t *size);
extern int NC4_lookup_atomic_type(const char *name, nc_type* idp, size_t *sizep);
/* These functions convert between netcdf and HDF5 types. */
/* These functions convert between different netcdf types. */
extern int nc4_get_typelen_mem(NC_FILE_INFO_T *h5, nc_type xtype, size_t *len);
extern int nc4_convert_type(const void *src, void *dest, const nc_type src_type,
const nc_type dest_type, const size_t len, int *range_error,
const void *fill_value, int strict_nc3, int quantize_mode,
int nsd);
/* These functions do HDF5 things. */
/* These functions do netcdf-4 things. */
extern int nc4_reopen_dataset(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var);
extern int nc4_read_atts(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var);
@ -467,26 +430,69 @@ extern int nc4_close_netcdf4_file(NC_FILE_INFO_T *h5, int abort, NC_memio *memio
extern int nc4_find_default_chunksizes2(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var);
extern int nc4_check_chunksizes(NC_GRP_INFO_T* grp, NC_VAR_INFO_T* var, const size_t* chunksizes);
/* HDF5 initialization/finalization */
extern int nc4_hdf5_initialized;
extern void nc4_hdf5_initialize(void);
extern void nc4_hdf5_finalize(void);
/* This is only included if --enable-logging is used for configure; it
prints info about the metadata to stderr. */
#ifdef LOGGING
extern int log_metadata_nc(NC_FILE_INFO_T *h5);
#endif
/**************************************************/
/* Atomic types constants and functions */
/** This is the number of netCDF atomic types (as opposed to max) . */
#define NUM_ATOMIC_TYPES (NC_MAX_ATOMIC_TYPE + 1)
/** @internal Names of atomic types. */
extern const char* nc4_atomic_name[NUM_ATOMIC_TYPES];
/* Misc functions */
extern int NC4_inq_atomic_type(nc_type typeid1, char *name, size_t *size);
extern int NC4_lookup_atomic_type(const char *name, nc_type* idp, size_t *sizep);
extern int NC4_inq_atomic_typeid(int ncid, const char *name, nc_type *typeidp);
extern int NC4_get_atomic_typeclass(nc_type xtype, int *type_class);
/**************************************************/
/* Type alignment related functions */
extern int nc_set_alignment(int threshold, int alignment);
extern int nc_get_alignment(int* thresholdp, int* alignmentp);
/**************************************************/
/* Begin to collect global state info in one place (more to do) */
typedef struct NCglobalstate {
int initialized;
char* tempdir; /* track a usable temp dir */
char* home; /* track $HOME */
char* cwd; /* track getcwd */
struct NCRCinfo* rcinfo; /* Currently only one rc file per session */
struct GlobalZarr { /* Zarr specific parameters */
char dimension_separator;
int default_zarrformat;
} zarr;
struct GlobalAWS { /* AWS S3 specific parameters/defaults */
char* default_region;
char* config_file;
char* profile;
char* access_key_id;
char* secret_access_key;
} aws;
struct Alignment { /* H5Pset_alignment parameters */
int defined; /* 1 => threshold and alignment explicitly set */
int threshold;
int alignment;
} alignment;
struct ChunkCache chunkcache;
} NCglobalstate;
extern struct NCglobalstate* NC_getglobalstate(void);
extern void NC_freeglobalstate(void);
/**************************************************/
/* Binary searcher for reserved attributes */
extern const NC_reservedatt* NC_findreserved(const char* name);
/* Global State Management */
extern NCglobalstate* NC_getglobalstate(void);
extern void NC_freeglobalstate(void);
/* reserved attribute initializer */
extern void NC_initialize_reserved(void);
/* Generic reserved Attributes */
#define NC_ATT_REFERENCE_LIST "REFERENCE_LIST"
@ -500,9 +506,12 @@ extern void NC_freeglobalstate(void);
#define NC_ATT_NC3_STRICT_NAME "_nc3_strict"
#define NC_XARRAY_DIMS "_ARRAY_DIMENSIONS"
#define NC_ATT_CODECS "_Codecs"
/* Must match values in libnczarr/zinternal.h */
#define NC_NCZARR_SUPERBLOCK "_nczarr_superblock"
#define NC_NCZARR_GROUP "_nczarr_group"
#define NC_NCZARR_ARRAY "_nczarr_array"
#define NC_NCZARR_ATTR "_nczarr_attr"
#define NC_NCZARR_ATTR_UC "_NCZARR_ATTR"
#define NC_NCZARR_MAXSTRLEN_ATTR "_nczarr_maxstrlen"
#define NC_NCZARR_DEFAULT_MAXSTRLEN_ATTR "_nczarr_default_maxstrlen"
#define NC_NCZARR_ATTR_UC "_NCZARR_ATTR" /* deprecated */
#endif /* _NC4INTERNAL_ */

View File

@ -52,12 +52,10 @@ void nc_log(int severity, const char *fmt, ...);
#define BAIL_QUIET BAIL
#ifdef USE_NETCDF4
#ifndef ENABLE_SET_LOG_LEVEL
#ifndef NETCDF_ENABLE_SET_LOG_LEVEL
/* Define away any calls to nc_set_log_level(), if its not enabled. */
#define nc_set_log_level(e)
#endif /* ENABLE_SET_LOG_LEVEL */
#endif
#endif /* NETCDF_ENABLE_SET_LOG_LEVEL */
#endif /* LOGGING */

View File

@ -17,9 +17,9 @@ affect the operation of the system.
*/
typedef unsigned int NCFLAGS;
# define SETFLAG(controls,flag) (((controls).flags) |= (flag))
# define CLRFLAG(controls,flag) (((controls).flags) &= ~(flag))
# define FLAGSET(controls,flag) (((controls.flags) & (flag)) != 0)
#define SETFLAG(controls,flag) (((controls).flags) |= (NCFLAGS)(flag))
#define CLRFLAG(controls,flag) (((controls).flags) &= ~(NCFLAGS)(flag))
#define FLAGSET(controls,flag) ((((controls).flags) & (NCFLAGS)(flag)) != 0)
/* Defined flags */
#define NCF_NC3 (0x0001) /* DAP->netcdf-3 */

View File

@ -31,7 +31,7 @@ and do the command:
/* Override for plugins */
#ifdef NETCDF_JSON_H
#define OPTEXPORT static
#else
#else /*NETCDF_JSON_H*/
#define OPTEXPORT MSC_EXTRA
#endif /*NETCDF_JSON_H*/
@ -121,7 +121,7 @@ OPTEXPORT int NCJclone(const NCjson* json, NCjson** clonep);
OPTEXPORT void NCJdump(const NCjson* json, unsigned flags, FILE*);
/* convert NCjson* object to output string */
OPTEXPORT const char* NCJtotext(const NCjson* json);
#endif
#endif /*NETCDF_JSON_H*/
#if defined(__cplusplus)
}
@ -147,3 +147,4 @@ OPTEXPORT const char* NCJtotext(const NCjson* json);
#endif /*NCJSON_H*/

View File

@ -10,7 +10,7 @@
#include <stdarg.h>
#include "ncexternl.h"
#define NCCATCH
#undef NCCATCH
#define NCENVLOGGING "NCLOGGING"
#define NCENVTRACING "NCTRACING"

View File

@ -45,6 +45,7 @@ struct NCglobalstate;
extern "C" {
#endif
/* API for ncs3sdk_XXX.[c|cpp] */
EXTERNL int NC_s3sdkinitialize(void);
EXTERNL int NC_s3sdkfinalize(void);
EXTERNL void* NC_s3sdkcreateclient(NCS3INFO* context);
@ -60,8 +61,7 @@ EXTERNL int NC_s3sdksearch(void* s3client0, const char* bucket, const char* pref
EXTERNL int NC_s3sdkdeletekey(void* client0, const char* bucket, const char* pathkey, char** errmsgp);
/* From ds3util.c */
EXTERNL int NC_s3sdkinitialize(void);
EXTERNL int NC_s3sdkfinalize(void);
EXTERNL void NC_s3sdkenvironment(void);
EXTERNL int NC_getdefaults3region(NCURI* uri, const char** regionp);
EXTERNL int NC_s3urlprocess(NCURI* url, NCS3INFO* s3, NCURI** newurlp);

View File

@ -119,6 +119,7 @@ extern "C" {
0x0002
All upper 16 bits are unused except
0x20000
0x40000
*/
/* Lower 16 bits */

View File

@ -31,7 +31,7 @@ and do the command:
/* Override for plugins */
#ifdef NETCDF_JSON_H
#define OPTEXPORT static
#else
#else /*NETCDF_JSON_H*/
#define OPTEXPORT MSC_EXTRA
#endif /*NETCDF_JSON_H*/
@ -121,7 +121,7 @@ OPTEXPORT int NCJclone(const NCjson* json, NCjson** clonep);
OPTEXPORT void NCJdump(const NCjson* json, unsigned flags, FILE*);
/* convert NCjson* object to output string */
OPTEXPORT const char* NCJtotext(const NCjson* json);
#endif
#endif /*NETCDF_JSON_H*/
#if defined(__cplusplus)
}
@ -147,6 +147,8 @@ OPTEXPORT const char* NCJtotext(const NCjson* json);
#endif /*NETCDF_JSON_H*/
#ifdef NETCDF_JSON_H
/* Copyright 2018, UCAR/Unidata.
See the COPYRIGHT file for more information.
*/
@ -219,7 +221,7 @@ typedef struct NCJparser {
} NCJparser;
typedef struct NCJbuf {
int len; /* |text|; does not include nul terminator */
size_t len; /* |text|; does not include nul terminator */
char* text; /* NULL || nul terminated */
} NCJbuf;
@ -257,7 +259,7 @@ static int NCJyytext(NCJparser*, char* start, size_t pdlen);
static void NCJreclaimArray(struct NCjlist*);
static void NCJreclaimDict(struct NCjlist*);
static int NCJunescape(NCJparser* parser);
static int unescape1(int c);
static char unescape1(char c);
static int listappend(struct NCjlist* list, NCjson* element);
static int NCJcloneArray(const NCjson* array, NCjson** clonep);
@ -265,7 +267,7 @@ static int NCJcloneDict(const NCjson* dict, NCjson** clonep);
static int NCJunparseR(const NCjson* json, NCJbuf* buf, unsigned flags);
static int bytesappendquoted(NCJbuf* buf, const char* s);
static int bytesappend(NCJbuf* buf, const char* s);
static int bytesappendc(NCJbuf* bufp, const char c);
static int bytesappendc(NCJbuf* bufp, char c);
/* Hide everything for plugins */
#ifdef NETCDF_JSON_H
@ -516,13 +518,12 @@ done:
static int
NCJlex(NCJparser* parser)
{
int c;
int token = NCJ_UNDEF;
char* start;
size_t count;
while(token == 0) { /* avoid need to goto when retrying */
c = *parser->pos;
char c = *parser->pos;
if(c == '\0') {
token = NCJ_EOF;
} else if(c <= ' ' || c == '\177') {/* ignore whitespace */
@ -541,7 +542,7 @@ NCJlex(NCJparser* parser)
}
/* Pushback c */
parser->pos--;
count = ((parser->pos) - start);
count = (size_t)((parser->pos) - start);
if(NCJyytext(parser,start,count)) goto done;
/* Discriminate the word string to get the proper sort */
if(testbool(parser->yytext) == NCJ_OK)
@ -568,7 +569,7 @@ NCJlex(NCJparser* parser)
token = NCJ_UNDEF;
goto done;
}
count = ((parser->pos) - start) - 1; /* -1 for trailing quote */
count = (size_t)((parser->pos) - start) - 1; /* -1 for trailing quote */
if(NCJyytext(parser,start,count)==NCJ_ERR) goto done;
if(NCJunescape(parser)==NCJ_ERR) goto done;
token = NCJ_STRING;
@ -789,7 +790,7 @@ NCJunescape(NCJparser* parser)
{
char* p = parser->yytext;
char* q = p;
int c;
char c;
for(;(c=*p++);) {
if(c == NCJ_ESCAPE) {
c = *p++;
@ -799,9 +800,9 @@ NCJunescape(NCJparser* parser)
case 'n': c = '\n'; break;
case 'r': c = '\r'; break;
case 't': c = '\t'; break;
case NCJ_QUOTE: c = c; break;
case NCJ_ESCAPE: c = c; break;
default: c = c; break;/* technically not Json conformant */
case NCJ_QUOTE: break;
case NCJ_ESCAPE: break;
default: break;/* technically not Json conformant */
}
}
*q++ = c;
@ -811,8 +812,8 @@ NCJunescape(NCJparser* parser)
}
/* Unescape a single character */
static int
unescape1(int c)
static char
unescape1(char c)
{
switch (c) {
case 'b': c = '\b'; break;
@ -820,7 +821,7 @@ unescape1(int c)
case 'n': c = '\n'; break;
case 'r': c = '\r'; break;
case 't': c = '\t'; break;
default: c = c; break;/* technically not Json conformant */
default: break;/* technically not Json conformant */
}
return c;
}
@ -1155,7 +1156,7 @@ static int
escape(const char* text, NCJbuf* buf)
{
const char* p = text;
int c;
char c;
for(;(c=*p++);) {
char replace = 0;
switch (c) {
@ -1265,3 +1266,4 @@ netcdf_supresswarnings(void)
ignore = (void*)NCJtotext;
ignore = ignore;
}
#endif /*NETCDF_JSON_H*/

View File

@ -40,7 +40,6 @@
#define NC_VERSION "@NC_VERSION@"
#define NC_HAS_NC2 @NC_HAS_NC2@ /*!< API version 2 support. */
#define NC_HAS_NC4 @NC_HAS_NC4@ /*!< API version 4 support. */
#define NC_HAS_HDF4 @NC_HAS_HDF4@ /*!< HDF4 support. */
#define NC_HAS_HDF5 @NC_HAS_HDF5@ /*!< HDF5 support. */
#define NC_HAS_SZIP @NC_HAS_SZIP@ /*!< szip support */
@ -50,7 +49,6 @@
#define NC_HAS_BYTERANGE @NC_HAS_BYTERANGE@ /*!< Byterange support. */
#define NC_HAS_DISKLESS @NC_HAS_DISKLESS@ /*!< diskless support. */
#define NC_HAS_MMAP @NC_HAS_MMAP@ /*!< mmap support. */
#define NC_HAS_JNA @NC_HAS_JNA@ /*!< jna support. */
#define NC_HAS_PNETCDF @NC_HAS_PNETCDF@ /*!< PnetCDF support. */
#define NC_HAS_PARALLEL4 @NC_HAS_PARALLEL4@ /*!< parallel IO support via HDF5 */
#define NC_HAS_PARALLEL @NC_HAS_PARALLEL@ /*!< parallel IO support via HDF5 and/or PnetCDF. */

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,9 @@
/* A Bison parser, made by GNU Bison 3.0.4. */
/* A Bison parser, made by GNU Bison 3.8.2. */
/* Bison interface for Yacc-like parsers in C
Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2021 Free Software Foundation,
Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -15,7 +16,7 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
along with this program. If not, see <https://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
@ -30,6 +31,10 @@
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
/* DO NOT RELY ON FEATURES THAT ARE NOT DOCUMENTED in the manual,
especially those whose name start with YY_ or yy_. They are
private implementation details that can be changed or removed. */
#ifndef YY_DCE_DCE_TAB_H_INCLUDED
# define YY_DCE_DCE_TAB_H_INCLUDED
/* Debug traces. */
@ -40,15 +45,20 @@
extern int dcedebug;
#endif
/* Token type. */
/* Token kinds. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
enum yytokentype
{
SCAN_WORD = 258,
SCAN_STRINGCONST = 259,
SCAN_NUMBERCONST = 260
YYEMPTY = -2,
YYEOF = 0, /* "end of file" */
YYerror = 256, /* error */
YYUNDEF = 257, /* "invalid token" */
SCAN_WORD = 258, /* SCAN_WORD */
SCAN_STRINGCONST = 259, /* SCAN_STRINGCONST */
SCAN_NUMBERCONST = 260 /* SCAN_NUMBERCONST */
};
typedef enum yytokentype yytoken_kind_t;
#endif
/* Value type. */
@ -60,6 +70,8 @@ typedef int YYSTYPE;
int dceparse (DCEparsestate* parsestate);
#endif /* !YY_DCE_DCE_TAB_H_INCLUDED */

View File

@ -363,8 +363,8 @@ NCD4_get_rcproperties(NCD4INFO* state)
unsigned long interval=0;
if(sscanf(option,"%lu/%lu",&idle,&interval) != 2)
fprintf(stderr,"Illegal KEEPALIVE VALUE: %s\n",option);
state->curl->keepalive.idle = idle;
state->curl->keepalive.interval = interval;
state->curl->keepalive.idle = (long)idle;
state->curl->keepalive.interval = (long)interval;
state->curl->keepalive.active = 1;
}
}

View File

@ -36,7 +36,7 @@ static unsigned NCD4_computeChecksum(NCD4meta* meta, NCD4node* topvar);
/* Macro define procedures */
#ifdef D4DUMPCSUM
static unsigned int debugcrc32(unsigned int crc, const void *buf, size_t size)
static unsigned int debugcrc32(unsigned int crc, const void *buf, unsigned int size)
{
int i;
fprintf(stderr,"crc32: ");
@ -309,7 +309,7 @@ fillopfixed(NCD4meta* meta, d4size_t opaquesize, NCD4offset* offset, void** dstp
SKIPCOUNTER(offset);
/* verify that it is the correct size */
actual = count;
delta = actual - opaquesize;
delta = (int)actual - (int)opaquesize;
if(delta != 0) {
#ifdef FIXEDOPAQUE
nclog(NCLOGWARN,"opaque changed from %lu to %lu",actual,opaquesize);
@ -443,7 +443,7 @@ NCD4_computeChecksum(NCD4meta* meta, NCD4node* topvar)
ASSERT((ISTOPLEVEL(topvar)));
#ifndef HYRAXCHECKSUM
csum = CRC32(csum,topvar->data.dap4data.memory,topvar->data.dap4data.size);
csum = CRC32(csum,topvar->data.dap4data.memory, (unsigned int)topvar->data.dap4data.size);
#else
if(topvar->basetype->subsort != NC_STRING) {
csum = CRC32(csum,topvar->data.dap4data.memory,topvar->data.dap4data.size);

View File

@ -3,6 +3,7 @@
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
*********************************************************************/
#include "config.h"
#include "ncdispatch.h"
#include "ncd4dispatch.h"
#include "d4includes.h"
@ -160,7 +161,7 @@ NCD4_open(const char * path, int mode,
{ret = NC_ENOMEM; goto done;}
dmrresp->controller = d4info;
if((ret=NCD4_readDMR(d4info, d4info->controls.flags.flags, d4info->dmruri, dmrresp))) goto done;
if((ret=NCD4_readDMR(d4info, d4info->dmruri, dmrresp))) goto done;
/* set serial.rawdata */
len = ncbyteslength(d4info->curl->packet);

View File

@ -231,7 +231,6 @@ static int
delimitAtomicVar(NCD4meta* compiler, NCD4node* var, NCD4offset* offset)
{
int ret = NC_NOERR;
int typesize;
d4size_t i;
d4size_t dimproduct;
nc_type tid;
@ -249,7 +248,7 @@ delimitAtomicVar(NCD4meta* compiler, NCD4node* var, NCD4offset* offset)
truetype = basetype->basetype;
tid = truetype->subsort;
typesize = NCD4_typesize(tid);
size_t typesize = NCD4_typesize(tid);
if(tid != NC_STRING) {
INCR(offset,(typesize*dimproduct));
} else if(tid == NC_STRING) { /* walk the counts */
@ -448,13 +447,13 @@ skipAtomicInstance(NCD4meta* compiler, NCD4node* type, NCD4offset* offset)
{
int ret = NC_NOERR;
d4size_t count;
int typesize;
size_t typesize;
switch (type->subsort) {
default: /* fixed size atomic type */
typesize = NCD4_typesize(type->meta.id);
INCR(offset,typesize);
break;
INCR(offset,typesize);
break;
case NC_STRING:
/* Get string count */
count = GETCOUNTER(offset);

View File

@ -70,7 +70,7 @@ NCD4_fetchurl(CURL* curl, const char* url, NCbytes* buf, long* filetime, int* ht
if(cstat != CURLE_OK) goto done;
httpcode = NCD4_fetchhttpcode(curl);
if(httpcodep) *httpcodep = httpcode;
if(httpcodep) *httpcodep = (int)httpcode;
/* Get the last modified time */
if(filetime != NULL)

View File

@ -39,7 +39,7 @@ static int convertString(union ATOMICS* converter, NCD4node* type, const char* s
static void* copyAtomic(union ATOMICS* converter, nc_type type, size_t len, void* dst, NClist* blobs);
static int decodeEconst(NCD4meta* builder, NCD4node* enumtype, const char* nameorval, union ATOMICS* converter);
static int downConvert(union ATOMICS* converter, NCD4node* type);
static void freeStringMemory(char** mem, int count);
static void freeStringMemory(char** mem, size_t count);
static size_t getDimrefs(NCD4node* var, int* dimids);
static size_t getDimsizes(NCD4node* var, int* dimsizes);
static d4size_t getpadding(d4size_t offset, size_t alignment);
@ -255,7 +255,7 @@ buildDimension(NCD4meta* builder, NCD4node* dim)
if(dim->dim.isunlimited) {
NCCHECK((nc_def_dim(group->meta.id,dim->name,NC_UNLIMITED,&dim->meta.id)));
} else {
NCCHECK((nc_def_dim(group->meta.id,dim->name,(size_t)dim->dim.size,&dim->meta.id)));
NCCHECK((nc_def_dim(group->meta.id,dim->name,dim->dim.size,&dim->meta.id)));
}
done:
return THROW(ret);
@ -535,7 +535,7 @@ buildAtomicVar(NCD4meta* builder, NCD4node* var)
#endif
rank = getDimrefs(var,dimids);
NCCHECK((nc_def_var(group->meta.id,var->name,var->basetype->meta.id,rank,dimids,&var->meta.id)));
NCCHECK((nc_def_var(group->meta.id,var->name,var->basetype->meta.id,(int)rank,dimids,&var->meta.id)));
/* Tag the var */
savevarbyid(group,var);
@ -705,11 +705,10 @@ getDimsizes(NCD4node* var, int* dimsizes)
/* Utilities */
static void
freeStringMemory(char** mem, int count)
freeStringMemory(char** mem, size_t count)
{
int i;
if(mem == NULL) return;
for(i=0;i<count;i++) {
for(size_t i=0;i<count;i++) {
char* p = mem[i];
if(p) free(p);
}

View File

@ -46,7 +46,7 @@ d4odom_new(size_t rank,
assert(odom->rank <= NC_MAX_VAR_DIMS);
for(i=0;i<odom->rank;i++) {
size_t istart,icount,istop,ideclsize;
ptrdiff_t istride;
size_t istride;
istart = (start != NULL ? start[i] : 0);
icount = (count != NULL ? count[i] : (size != NULL ? size[i] : 1));
istride = (size_t)(stride != NULL ? stride[i] : 1);
@ -98,14 +98,13 @@ d4odom_more(D4odometer* odom)
d4size_t
d4odom_next(D4odometer* odom)
{
int i; /* do not make unsigned */
d4size_t count;
if(odom->rank == 0) { /*scalar*/
odom->index[0]++;
return 0;
}
count = d4odom_offset(odom); /* convenience */
for(i=odom->rank-1;i>=0;i--) {
for(size_t i=odom->rank; i-- >0;) {
odom->index[i] += odom->stride[i];
if(odom->index[i] < odom->stop[i]) break;
if(i == 0) break; /* leave the 0th entry if it overflows*/

View File

@ -7,7 +7,7 @@
#define D4ODOM_H 1
typedef struct D4odometer {
int rank;
size_t rank;
size_t index[NC_MAX_VAR_DIMS];
size_t start[NC_MAX_VAR_DIMS];
#if 0

View File

@ -288,7 +288,7 @@ parseDimensions(NCD4parser* parser, NCD4node* group, ncxml_t xml)
if((ret = parseULL(sizestr,&size))) goto done;
nullfree(sizestr);
if((ret=makeNode(parser,group,x,NCD4_DIM,NC_NULL,&dimnode))) goto done;
dimnode->dim.size = (long long)size;
dimnode->dim.size = size;
dimnode->dim.isunlimited = (unlimstr != NULL);
nullfree(unlimstr);
/* Process attributes */
@ -881,7 +881,7 @@ getOpaque(NCD4parser* parser, ncxml_t varxml, NCD4node* group)
{
size_t i;
int ret = NC_NOERR;
long long len;
size_t len;
NCD4node* opaquetype = NULL;
char* xattr;
@ -894,11 +894,11 @@ getOpaque(NCD4parser* parser, ncxml_t varxml, NCD4node* group)
/* See if this var has UCARTAGOPAQUE attribute */
xattr = ncxml_attr(varxml,UCARTAGOPAQUE);
if(xattr != NULL) {
long long tmp = 0;
long long tmp = 0;
if((ret = parseLL(xattr,&tmp)) || (tmp < 0))
FAIL(NC_EINVAL,"Illegal opaque len: %s",xattr);
len = tmp;
nullfree(xattr);
FAIL(NC_EINVAL,"Illegal opaque len: %s",xattr);
len = (size_t)tmp;
nullfree(xattr);
}
}
#ifndef FIXEDOPAQUE
@ -920,7 +920,7 @@ getOpaque(NCD4parser* parser, ncxml_t varxml, NCD4node* group)
if(opaquetype == NULL) {/* create it */
char name[NC_MAX_NAME+1];
/* Make name be "opaqueN" */
snprintf(name,NC_MAX_NAME,"opaque%lld_t",len);
snprintf(name,NC_MAX_NAME,"opaque%zu_t",len);
/* Opaque types are always created in the current group */
if((ret=makeNode(parser,group,NULL,NCD4_TYPE,NC_OPAQUE,&opaquetype)))
goto done;
@ -1098,14 +1098,13 @@ lookupFQNList(NCD4parser* parser, NClist* fqn, NCD4sort sort, NCD4node** result)
{
int ret = NC_NOERR;
size_t i;
int nsteps;
NCD4node* current;
char* name = NULL;
NCD4node* node = NULL;
/* Step 1: walk thru groups until can go no further */
current = parser->metadata->root;
nsteps = nclistlength(fqn);
size_t nsteps = nclistlength(fqn);
for(i=1;i<nsteps;i++) { /* start at 1 to side-step root name */
assert(ISGROUP(current->sort));
name = (char*)nclistget(fqn,i);
@ -1282,17 +1281,17 @@ done:
static NCD4node*
lookupAtomicType(NClist* atomictypes, const char* name)
{
int n = nclistlength(atomictypes);
int L = 0;
int R = (n - 1);
int m, cmp;
size_t n = nclistlength(atomictypes);
if (n == 0) return NULL;
size_t L = 0;
size_t R = n - 1;
NCD4node* p;
for(;;) {
if(L > R) break;
m = (L + R) / 2;
size_t m = (L + R) / 2;
p = (NCD4node*)nclistget(atomictypes,m);
cmp = strcasecmp(p->name,name);
int cmp = strcasecmp(p->name,name);
if(cmp == 0)
return p;
if(cmp < 0)
@ -1364,13 +1363,13 @@ makeAnonDim(NCD4parser* parser, const char* sizestr)
ret = parseLL(sizestr,&size);
if(ret) return NULL;
snprintf(name,NC_MAX_NAME,"/_AnonymousDim%lld",size);
snprintf(name,NC_MAX_NAME,"/%s_%lld",NCDIMANON,size);
/* See if it exists already */
dim = lookupFQN(parser,name,NCD4_DIM);
if(dim == NULL) {/* create it */
if((ret=makeNode(parser,root,NULL,NCD4_DIM,NC_NULL,&dim))) goto done;
SETNAME(dim,name+1); /* leave out the '/' separator */
dim->dim.size = (long long)size;
dim->dim.size = (size_t)size;
dim->dim.isanonymous = 1;
classify(root,dim);
}

View File

@ -37,7 +37,7 @@ deltatime(struct timeval time0,struct timeval time1)
#endif
int
NCD4_readDMR(NCD4INFO* state, int flags, NCURI* url, NCD4response* resp)
NCD4_readDMR(NCD4INFO* state, NCURI* url, NCD4response* resp)
{
int stat = NC_NOERR;
ncbytesclear(state->curl->packet);
@ -46,7 +46,7 @@ NCD4_readDMR(NCD4INFO* state, int flags, NCURI* url, NCD4response* resp)
}
int
NCD4_readDAP(NCD4INFO* state, int flags, NCURI* url, NCD4response* resp)
NCD4_readDAP(NCD4INFO* state, NCURI* url, NCD4response* resp)
{
int stat = NC_NOERR;

View File

@ -7,6 +7,7 @@
#include <stdarg.h>
#include <stddef.h>
#include "d4includes.h"
#include "d4util.h"
/*
The primary purpose of this code is to recursively traverse
@ -51,7 +52,7 @@ NCD4_swapdata(NCD4response* resp, NCD4node* var, int doswap)
if((ret=walkSeqArray(resp,var,var,offset,doswap))) goto done;
break;
}
var->data.dap4data.size = DELTA(offset,var->data.dap4data.memory);
var->data.dap4data.size = (d4size_t)DELTA(offset,var->data.dap4data.memory);
/* skip checksum, if there is one */
if(resp->inferredchecksumming)
INCR(offset,CHECKSUMSIZE);
@ -77,8 +78,8 @@ walkAtomicVar(NCD4response* resp, NCD4node* topvar, NCD4node* var, NCD4offset* o
subsort = var->basetype->basetype->subsort;
/* Only need to swap multi-byte integers and floats */
if(subsort != NC_STRING) {
int typesize = NCD4_typesize(subsort);
d4size_t totalsize = typesize*dimproduct;
size_t typesize = NCD4_typesize(subsort);
d4size_t totalsize = typesize*dimproduct;
if(typesize == 1) {
INCR(offset,totalsize);
} else { /*(typesize > 1)*/

View File

@ -23,7 +23,7 @@ typedef struct D4blob {d4size_t size; void* memory;} D4blob;
/* Empty blob constant */
#define NULLBLOB(blob) {blob.size = 0; blob.memory = NULL;}
#define OFFSET2BLOB(blob,offset) do{(blob).size = ((offset)->limit - (offset)->base); (blob).memory = (offset)->base; }while(0)
#define OFFSET2BLOB(blob,offset) do{(blob).size = (d4size_t)((offset)->limit - (offset)->base); (blob).memory = (offset)->base; }while(0)
#define BLOB2OFFSET(offset,blob) do{\
(offset)->base = (blob).memory; \
(offset)->limit = ((char*)(blob).memory) + (blob).size; \

View File

@ -69,7 +69,7 @@ NCD4_get_vars(int gid, int varid,
/* build size vector */
for(i=0;i<rank;i++) {
NCD4node* dim = nclistget(ncvar->dims,i);
dimsizes[i] = (size_t)dim->dim.size;
dimsizes[i] = dim->dim.size;
}
/* Extract and desired subset of data */
@ -195,7 +195,7 @@ getvarx(int gid, int varid, NCD4INFO** infop, NCD4node** varp,
if((ret=NCD4_newResponse(info,&dapresp))) goto done;
dapresp->mode = NCD4_DAP;
nclistpush(info->responses,dapresp);
if((ret=NCD4_readDAP(info, info->controls.flags.flags, ceuri, dapresp))) goto done;
if((ret=NCD4_readDAP(info, ceuri, dapresp))) goto done;
/* Extract DMR and dechunk the data part */
if((ret=NCD4_dechunk(dapresp))) goto done;

View File

@ -84,8 +84,8 @@ EXTERNL int NCD4_fetchlastmodified(CURL* curl, char* url, long* filetime);
EXTERNL int NCD4_ping(const char* url);
/* From d4read.c */
EXTERNL int NCD4_readDMR(NCD4INFO* state, int flags, NCURI* url, NCD4response*);
EXTERNL int NCD4_readDAP(NCD4INFO* state, int flags, NCURI* ceuri, NCD4response*);
EXTERNL int NCD4_readDMR(NCD4INFO* state, NCURI* url, NCD4response*);
EXTERNL int NCD4_readDAP(NCD4INFO* state, NCURI* ceuri, NCD4response*);
EXTERNL int NCD4_seterrormessage(NCD4response*, size_t len, char* msg);
/* From d4parser.c */

View File

@ -13,6 +13,7 @@ are defined here.
#undef COMPILEBYDEFAULT
#include "ncdap.h"
#include "ncrc.h"
#include "ncauth.h"
@ -206,10 +207,10 @@ struct NCD4node {
NClist* values;
} attr;
struct { /* sort == NCD4_OPAQUE */
long long size; /* 0 => var length */
size_t size; /* 0 => var length */
} opaque;
struct { /* sort == NCD4_DIMENSION */
long long size;
size_t size;
int isunlimited;
int isanonymous;
} dim;

View File

@ -56,6 +56,8 @@ static int computefieldinfo(struct NCAUX_CMPD* cmpd);
static int filterspec_cvt(const char* txt, size_t* nparamsp, unsigned int* params);
EXTERNL int nc_dump_data(int ncid, nc_type xtype, void* memory, size_t count, char** bufp);
/**************************************************/
/*
This code is a variant of the H5detect.c code from HDF5.
@ -922,6 +924,7 @@ ncaux_inq_any_type(int ncid, nc_type typeid, char *name, size_t *sizep, nc_type
return NC_inq_any_type(ncid, typeid, name, sizep, basetypep, nfieldsp, classp);
}
#ifdef USE_NETCDF4
/**
@param ncid - only needed for a compound type
@param xtype - type for which alignment is requested
@ -932,6 +935,7 @@ ncaux_type_alignment(int xtype, int ncid, size_t* alignp)
/* Defer to the internal version */
return NC_type_alignment(ncid, xtype, alignp);
}
#endif
/**
Dump the output tree of data from a call
@ -948,6 +952,6 @@ This function is just a wrapper around nc_dump__data.
EXTERNL int
ncaux_dump_data(int ncid, int xtype, void* memory, size_t count, char** bufp)
{
EXTERNL int nc_dump_data(int ncid, nc_type xtype, void* memory, size_t count, char** bufp);
return nc_dump_data(ncid, xtype, memory, count, bufp);
}

View File

@ -12,11 +12,17 @@
#include "nc_logging.h"
#include "nclist.h"
static int NC_find_equal_type(int ncid1, nc_type xtype1, int ncid2, nc_type *xtype2);
#ifdef USE_NETCDF4
static int searchgroup(int ncid1, int tid1, int grp, int* tid2);
static int searchgrouptree(int ncid1, int tid1, int grp, int* tid2);
#endif /*USE_NETCDF4*/
#ifdef USE_NETCDF4
/**
* @internal Compare two netcdf types for equality. Must have the
* ncids as well, to find user-defined types.
@ -203,6 +209,8 @@ done:
return ret;
}
#endif /* USE_NETCDF4 */
/**
* @internal Given a type in one file, find its equal (if any) in
* another file. It sounds so simple, but it's a real pain!
@ -232,15 +240,15 @@ NC_find_equal_type(int ncid1, nc_type xtype1, int ncid2, nc_type *xtype2)
return NC_NOERR;
}
#ifdef USE_NETCDF4
/* Recursively search group ncid2 and its children
to find a type that is equal (using compare_type)
to xtype1. */
ret = NC_rec_find_nc_type(ncid1, xtype1 , ncid2, xtype2);
#endif /* USE_NETCDF4 */
return ret;
}
#endif /* USE_NETCDF4 */
/**
* This will copy a variable that is an array of primitive type and
* its attributes from one file to another, assuming dimensions in the
@ -726,4 +734,5 @@ done:
return ret;
}
#endif
#endif /* USE_NETCDF4 */

View File

@ -34,8 +34,6 @@ See LICENSE.txt for license information.
#define MAXPATH 1024
/* Define vectors of zeros and ones for use with various nc_get_varX functions */
/* Note, this form of initialization fails under Cygwin */
size_t NC_coord_zero[NC_MAX_VAR_DIMS] = {0};
@ -143,3 +141,335 @@ NCDISPATCH_finalize(void)
NC_freeglobalstate(); /* should be one of the last things done */
return status;
}
/**************************************************/
/* Global State constants and state */
/* The singleton global state object */
static NCglobalstate* nc_globalstate = NULL;
/* Forward */
static int NC_createglobalstate(void);
/** \defgroup global_state Global state functions. */
/** \{
\ingroup global state
*/
/* NCglobal state management */
static int
NC_createglobalstate(void)
{
int stat = NC_NOERR;
const char* tmp = NULL;
if(nc_globalstate == NULL) {
nc_globalstate = calloc(1,sizeof(NCglobalstate));
}
/* Initialize struct pointers */
if((nc_globalstate->rcinfo = calloc(1,sizeof(struct NCRCinfo)))==NULL)
{stat = NC_ENOMEM; goto done;}
if((nc_globalstate->rcinfo->entries = nclistnew())==NULL)
{stat = NC_ENOMEM; goto done;}
if((nc_globalstate->rcinfo->s3profiles = nclistnew())==NULL)
{stat = NC_ENOMEM; goto done;}
/* Get environment variables */
if(getenv(NCRCENVIGNORE) != NULL)
nc_globalstate->rcinfo->ignore = 1;
tmp = getenv(NCRCENVRC);
if(tmp != NULL && strlen(tmp) > 0)
nc_globalstate->rcinfo->rcfile = strdup(tmp);
/* Initialize chunk cache defaults */
nc_globalstate->chunkcache.size = DEFAULT_CHUNK_CACHE_SIZE; /**< Default chunk cache size. */
nc_globalstate->chunkcache.nelems = DEFAULT_CHUNKS_IN_CACHE; /**< Default chunk cache number of elements. */
nc_globalstate->chunkcache.preemption = DEFAULT_CHUNK_CACHE_PREEMPTION; /**< Default chunk cache preemption. */
done:
return stat;
}
/* Get global state */
NCglobalstate*
NC_getglobalstate(void)
{
if(nc_globalstate == NULL)
NC_createglobalstate();
return nc_globalstate;
}
void
NC_freeglobalstate(void)
{
if(nc_globalstate != NULL) {
nullfree(nc_globalstate->tempdir);
nullfree(nc_globalstate->home);
nullfree(nc_globalstate->cwd);
nullfree(nc_globalstate->aws.default_region);
nullfree(nc_globalstate->aws.config_file);
nullfree(nc_globalstate->aws.profile);
nullfree(nc_globalstate->aws.access_key_id);
nullfree(nc_globalstate->aws.secret_access_key);
if(nc_globalstate->rcinfo) {
NC_rcclear(nc_globalstate->rcinfo);
free(nc_globalstate->rcinfo);
}
free(nc_globalstate);
nc_globalstate = NULL;
}
}
/** \} */
/**************************************************/
/** \defgroup atomic_types Atomic Type functions */
/** \{
\ingroup atomic_types
*/
/* The sizes of types may vary from platform to platform, but within
* netCDF files, type sizes are fixed. */
#define NC_CHAR_LEN sizeof(char) /**< @internal Size of char. */
#define NC_STRING_LEN sizeof(char *) /**< @internal Size of char *. */
#define NC_BYTE_LEN 1 /**< @internal Size of byte. */
#define NC_SHORT_LEN 2 /**< @internal Size of short. */
#define NC_INT_LEN 4 /**< @internal Size of int. */
#define NC_FLOAT_LEN 4 /**< @internal Size of float. */
#define NC_DOUBLE_LEN 8 /**< @internal Size of double. */
#define NC_INT64_LEN 8 /**< @internal Size of int64. */
/** @internal Names of atomic types. */
const char* nc4_atomic_name[NUM_ATOMIC_TYPES] = {"none", "byte", "char",
"short", "int", "float",
"double", "ubyte",
"ushort", "uint",
"int64", "uint64", "string"};
static const size_t nc4_atomic_size[NUM_ATOMIC_TYPES] = {0, NC_BYTE_LEN, NC_CHAR_LEN, NC_SHORT_LEN,
NC_INT_LEN, NC_FLOAT_LEN, NC_DOUBLE_LEN,
NC_BYTE_LEN, NC_SHORT_LEN, NC_INT_LEN, NC_INT64_LEN,
NC_INT64_LEN, NC_STRING_LEN};
/**
* @internal Get the name and size of an atomic type. For strings, 1 is
* returned.
*
* @param typeid1 Type ID.
* @param name Gets the name of the type.
* @param size Gets the size of one element of the type in bytes.
*
* @return ::NC_NOERR No error.
* @return ::NC_EBADID Bad ncid.
* @return ::NC_EBADTYPE Type not found.
* @author Dennis Heimbigner
*/
int
NC4_inq_atomic_type(nc_type typeid1, char *name, size_t *size)
{
if (typeid1 >= NUM_ATOMIC_TYPES)
return NC_EBADTYPE;
if (name)
strcpy(name, nc4_atomic_name[typeid1]);
if (size)
*size = nc4_atomic_size[typeid1];
return NC_NOERR;
}
/**
* @internal Get the id and size of an atomic type by name.
*
* @param name [in] the name of the type.
* @param idp [out] the type index of the type.
* @param sizep [out] the size of one element of the type in bytes.
*
* @return ::NC_NOERR No error.
* @return ::NC_EBADID Bad ncid.
* @return ::NC_EBADTYPE Type not found.
* @author Dennis Heimbigner
*/
int
NC4_lookup_atomic_type(const char *name, nc_type* idp, size_t *sizep)
{
int i;
if (name == NULL || strlen(name) == 0)
return NC_EBADTYPE;
for(i=0;i<NUM_ATOMIC_TYPES;i++) {
if(strcasecmp(name,nc4_atomic_name[i])==0) {
if(idp) *idp = i;
if(sizep) *sizep = nc4_atomic_size[i];
return NC_NOERR;
}
}
return NC_EBADTYPE;
}
/**
* @internal Get the id of an atomic type from the name.
*
* @param ncid File and group ID.
* @param name Name of type
* @param typeidp Pointer that will get the type ID.
*
* @return ::NC_NOERR No error.
* @return ::NC_EBADTYPE Type not found.
* @author Ed Hartnett
*/
int
NC4_inq_atomic_typeid(int ncid, const char *name, nc_type *typeidp)
{
int i;
NC_UNUSED(ncid);
/* Handle atomic types. */
for (i = 0; i < NUM_ATOMIC_TYPES; i++) {
if (!strcmp(name, nc4_atomic_name[i]))
{
if (typeidp)
*typeidp = i;
return NC_NOERR;
}
}
return NC_EBADTYPE;
}
/**
* @internal Get the class of a type
*
* @param xtype NetCDF type ID.
* @param type_class Pointer that gets class of type, NC_INT,
* NC_FLOAT, NC_CHAR, or NC_STRING, NC_ENUM, NC_VLEN, NC_COMPOUND, or
* NC_OPAQUE.
*
* @return ::NC_NOERR No error.
* @author Ed Hartnett, Dennis Heimbigner
*/
int
NC4_get_atomic_typeclass(nc_type xtype, int *type_class)
{
assert(type_class);
switch (xtype) {
case NC_BYTE:
case NC_UBYTE:
case NC_SHORT:
case NC_USHORT:
case NC_INT:
case NC_UINT:
case NC_INT64:
case NC_UINT64:
/* NC_INT is class used for all integral types */
*type_class = NC_INT;
break;
case NC_FLOAT:
case NC_DOUBLE:
/* NC_FLOAT is class used for all floating-point types */
*type_class = NC_FLOAT;
break;
case NC_CHAR:
*type_class = NC_CHAR;
break;
case NC_STRING:
*type_class = NC_STRING;
break;
default:
return NC_EBADTYPE;
}
return NC_NOERR;
}
/** \} */
/**************************************************/
/** \defgroup alignment Alignment functions. */
/** \{
\ingroup alignment
*/
/**
Provide a function to store global data alignment
information.
Repeated calls to nc_set_alignment will overwrite any existing values.
If defined, then for every file created or opened after the call to
nc_set_alignment, and for every new variable added to the file, the
most recently set threshold and alignment values will be applied
to that variable.
The nc_set_alignment function causes new data written to a
netCDF-4 file to be aligned on disk to a specified block
size. To be effective, alignment should be the system disk block
size, or a multiple of it. This setting is effective with MPI
I/O and other parallel systems.
This is a trade-off of write speed versus file size. Alignment
leaves holes between file objects. The default of no alignment
writes file objects contiguously, without holes. Alignment has
no impact on file readability.
Alignment settings apply only indirectly, through the file open
functions. Call nc_set_alignment first, then nc_create or
nc_open for one or more files. Current alignment settings are
locked in when each file is opened, then forgotten when the same
file is closed. For illustration, it is possible to write
different files at the same time with different alignments, by
interleaving nc_set_alignment and nc_open calls.
Alignment applies to all newly written low-level file objects at
or above the threshold size, including chunks of variables,
attributes, and internal infrastructure. Alignment is not locked
in to a data variable. It can change between data chunks of the
same variable, based on a file's history.
Refer to H5Pset_alignment in HDF5 documentation for more
specific details, interactions, and additional rules.
@param threshold The minimum size to which alignment is applied.
@param alignment The alignment value.
@return ::NC_NOERR No error.
@return ::NC_EINVAL Invalid input.
@author Dennis Heimbigner
@ingroup datasets
*/
int
nc_set_alignment(int threshold, int alignment)
{
NCglobalstate* gs = NC_getglobalstate();
gs->alignment.threshold = threshold;
gs->alignment.alignment = alignment;
gs->alignment.defined = 1;
return NC_NOERR;
}
/**
Provide get function to retrieve global data alignment
information.
The nc_get_alignment function return the last values set by
nc_set_alignment. If nc_set_alignment has not been called, then
it returns the value 0 for both threshold and alignment.
@param thresholdp Return the current minimum size to which alignment is applied or zero.
@param alignmentp Return the current alignment value or zero.
@return ::NC_NOERR No error.
@return ::NC_EINVAL Invalid input.
@author Dennis Heimbigner
@ingroup datasets
*/
int
nc_get_alignment(int* thresholdp, int* alignmentp)
{
NCglobalstate* gs = NC_getglobalstate();
if(thresholdp) *thresholdp = gs->alignment.threshold;
if(alignmentp) *alignmentp = gs->alignment.alignment;
return NC_NOERR;
}
/** \} */

View File

@ -218,98 +218,6 @@ done:
return stat;
}
/**************************************************/
/* Support direct user defined filters */
#ifdef ENABLE_CLIENTSIDE_FILTERS
/** Register filer client.
* @note Use void* to avoid having to include hdf.h
*
* @param id Filter ID
* @param info Pointer that gets info.
*
* @return NC_NOERR if the filter is available
* @return NC_EBADID if ncid is invalid
* @return NC_ENOFILTER if filter is not available.
* @author Dennis Heimbigner
*/
EXTERNL int
nc_filter_client_register(unsigned int id, void* info)
{
int stat = NC_NOERR;
#ifdef USE_HDF5
NC_FILTER_OBJ_HDF5 client;
if(id == 0 ||info == NULL)
return NC_EINVAL;
memset(&client,0,sizeof(client));
client.hdr.format = NC_FILTER_FORMAT_HDF5;
client.sort = NC_FILTER_SORT_CLIENT;
client.u.client.id = id;
client.u.client.info = info;
/* Note use of a global function, not part of the dispatch table */
stat = nc4_global_filter_action(NCFILTER_CLIENT_REG, id, &client);
#else
stat = NC_ENOTBUILT;
#endif
return stat;
}
/** Unregister filer client.
* @note Use void* to avoid having to include hdf.h
*
* @param id Filter ID
*
* @return NC_NOERR if the filter is available
* @author Dennis Heimbigner
*/
EXTERNL int
nc_filter_client_unregister(unsigned int id)
{
int stat = NC_NOERR;
#ifdef USE_HDF5
stat = nc4_global_filter_action(NCFILTER_CLIENT_UNREG, id, NULL);
#else
stat = NC_ENOTBUILT;
#endif
return stat;
}
/** Inquire about filer client.
* @note Use void* to avoid having to include hdf.h
*
* @param id Filter ID
* @param infop Pointer that gets info.
*
* @return NC_NOERR if the filter is available
* @author Dennis Heimbigner
*/
EXTERNL int
nc_filter_client_inq(unsigned int id, void* infop)
{
int stat = NC_NOERR;
#ifdef USE_HDF5
H5Z_class2_t* hct = (H5Z_class2_t*)infop;
NC_FILTER_OBJ_HDF5 client;
if(id == 0 ||infop == NULL)
return NC_EINVAL;
memset(&client,0,sizeof(client));
client.hdr.format = NC_FILTER_FORMAT_HDF5;
client.sort = NC_FILTER_SORT_CLIENT;
client.u.client.id = id;
client.u.client.info = hct;
/* Note use of a global function, not part of the dispatch table */
stat = nc4_global_filter_action(NCFILTER_CLIENT_INQ, id, &client);
if(stat == NC_NOERR) {
*hct = *(H5Z_class2_t*)client.u.client.info;
}
#else
stat = NC_ENOTBUILT;
#endif
return stat;
}
#endif /*ENABLE_CLIENTSIDE_FILTERS*/
/**************************************************/
/* Functions for accessing standardized filters */

View File

@ -1,243 +0,0 @@
/*
* Copyright 2018, University Corporation for Atmospheric Research
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
*/
#include "config.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef _MSC_VER
#include <io.h>
#endif
#include "netcdf.h"
#include "netcdf_filter.h"
#include "ncdispatch.h"
#include "nc4internal.h"
#include "ncjson.h"
/*
NCZarr filter API
*/
/**************************************************/
/* Per-variable filters */
/**
Find the set of filters (if any) associated with a variable.
Assumes NCZarr format using json
\param ncid NetCDF or group ID, from a previous call to nc_open(),
\param varid Variable ID
\param jsonp a JSON formatted string is returned in this argument
\returns ::NC_NOERR No error.
\returns ::NC_ENOTNC4 Not a netCDF-4 file.
\returns ::NC_EBADID Bad ncid
\returns ::NC_ENOTVAR Invalid variable ID.
\returns ::NC_EINVAL Invalid arguments
\ingroup variables
\author Dennis Heimbigner
*/
EXTERNL int
nc_inq_var_filterx_ids(int ncid, int varid, char** textp)
{
NC* ncp;
int stat = NC_NOERR;
TRACE(nc_inq_var_filterx_ids);
if((stat = NC_check_id(ncid,&ncp))) return stat;
if((stat = ncp->dispatch->inq_var_filterx_ids(ncid,varid,textp))) goto done;
done:
return stat;
}
/**
Find the the param info about filter (if any)
associated with a variable and with specified id.
Assumes HDF5 format using unsigned ints.
\param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
\param varid Variable ID
\param id The filter id of interest
\param nparamsp (Out) Storage which will get the number of parameters to the filter
\param params (Out) Storage which will get associated parameters.
Note: the caller must allocate and free.
\returns ::NC_NOERR No error.
\returns ::NC_ENOTNC4 Not a netCDF-4 file.
\returns ::NC_EBADID Bad ncid.
\returns ::NC_ENOTVAR Invalid variable ID.
\returns ::NC_ENOFILTER Specified filter not defined for this variable.
\ingroup variables
\author Dennis Heimbigner
*/
EXTERNL int
nc_inq_var_filterx_info(int ncid, int varid, const char* id, char** textp)
{
NC* ncp;
int stat = NC_check_id(ncid,&ncp);
TRACE(nc_inq_var_filterx_info);
if(stat != NC_NOERR) return stat;
if((stat = ncp->dispatch->inq_var_filterx_info(ncid,varid,id,textp))) goto done;
done:
return stat;
}
/**
Define a new variable filter
Assumes HDF5 format using unsigned ints.
Only variables with chunked storage can use filters.
@param ncid File and group ID.
@param varid Variable ID.
@param id Filter ID.
@param nparams Number of filter parameters.
@param parms Filter parameters.
@return ::NC_NOERR No error.
@return ::NC_EINVAL Variable must be chunked.
@return ::NC_EBADID Bad ID.
@author Dennis Heimbigner
*/
EXTERNL int
nc_def_var_filterx(int ncid, int varid, const char* json)
{
NC* ncp;
int stat = NC_check_id(ncid,&ncp);
TRACE(nc_def_var_filterx);
if(stat != NC_NOERR) return stat;
if((stat = ncp->dispatch->def_var_filterx(ncid,varid,json))) goto done;
done:
return stat;
}
/**
Find the first filter (if any) associated with a variable.
\param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
\param varid Variable ID
\param textp Storage which will get the filter info (id + parameters) in json format
This is redundant over the multi-filter API, so
it can be implemented in terms of those functions.
\returns ::NC_NOERR No error.
\returns ::NC_ENOTNC4 Not a netCDF-4 file.
\returns ::NC_EBADID Bad ncid.
\returns ::NC_ENOTVAR Invalid variable ID.
\ingroup variables
\author Dennis Heimbigner
*/
EXTERNL int
nc_inq_var_filterx(int ncid, int varid, char** textp)
{
NC* ncp;
int stat = NC_NOERR;
char* text = NULL;
NCjson* json = NULL;
NCjson* jid = NULL;
TRACE(nc_inq_var_filterx);
if((stat = NC_check_id(ncid,&ncp))) goto done;
/* Get the filters on this variable */
if((stat = nc_inq_var_filterx_ids(ncid,varid,&text))) goto done;
/* Parse it */
if((stat = NCJparse(text,0,&json))) goto done;
if(json->sort != NCJ_ARRAY)
{stat = NC_EFILTER; goto done;}
if(NCJlength(json) == 0 || NCJcontents(json) == NULL)
{stat = NC_ENOFILTER; goto done;}
jid = NCJith(json,0);
if(jid->sort == NCJ_DICT || jid->sort == NCJ_ARRAY)
{stat = NC_EFILTER; goto done;}
/* Get info about the first filter */
if((stat = nc_inq_var_filterx_info(ncid,varid,NCJstring(jid),textp)))
{stat = NC_ENOFILTER; goto done;}
done:
NCJreclaim(json);
return stat;
}
/**************************************************/
/* Support direct user defined filters */
#ifdef ENABLE_CLIENTSIDE_FILTERS
/* Use void* to avoid having to include hdf.h*/
EXTERNL int
nc_filterx_client_register(unsigned int id, void* info)
{
int stat = NC_NOERR;
#ifdef USE_HDF5
NC_FILTERX_OBJ_HDF5 client;
if(id == 0 ||info == NULL)
return NC_EINVAL;
memset(&client,0,sizeof(client));
client.hdr.format = NC_FILTERX_FORMAT_HDF5;
client.sort = NC_FILTERX_SORT_CLIENT;
client.u.client.id = id;
client.u.client.info = info;
/* Note use of a global function, not part of the dispatch table */
stat = nc4_global_filterx_action(NCFILTER_CLIENT_REG, id, &client);
#else
stat = NC_ENOTBUILT;
#endif
return stat;
}
EXTERNL int
nc_filterx_client_unregister(unsigned int id)
{
int stat = NC_NOERR;
#ifdef USE_HDF5
stat = nc4_global_filterx_action(NCFILTER_CLIENT_UNREG, id, NULL);
#else
stat = NC_ENOTBUILT;
#endif
return stat;
}
/* Use void* to avoid having to include hdf.h*/
EXTERNL int
nc_filterx_client_inq(unsigned int id, void* infop)
{
int stat = NC_NOERR;
#ifdef USE_HDF5
H5Z_class2_t* hct = (H5Z_class2_t*)infop;
NC_FILTERX_OBJ_HDF5 client;
if(id == 0 ||infop == NULL)
return NC_EINVAL;
memset(&client,0,sizeof(client));
client.hdr.format = NC_FILTERX_FORMAT_HDF5;
client.sort = NC_FILTERX_SORT_CLIENT;
client.u.client.id = id;
client.u.client.info = hct;
/* Note use of a global function, not part of the dispatch table */
stat = nc4_global_filterx_action(NCFILTER_CLIENT_INQ, id, &client);
if(stat == NC_NOERR) {
*hct = *(H5Z_class2_t*)client.u.client.info;
}
#else
stat = NC_ENOTBUILT;
#endif
return stat;
}
#endif /*ENABLE_CLIENTSIDE_FILTERS*/

View File

@ -52,8 +52,8 @@ DAPSUBSTRATE(NC* nc)
typedef struct Position{char* memory; ptrdiff_t offset;} Position;
/* Forward */
#ifdef USE_NETCDF4
static int dump_datar(int ncid, nc_type xtype, Position*, NCbytes* buf);
#ifdef USE_NETCDF4
static int dump_compound(int ncid, nc_type xtype, size_t size, size_t nfields, Position* offset, NCbytes* buf);
static int dump_vlen(int ncid, nc_type xtype, nc_type basetype, Position* offset, NCbytes* buf);
static int dump_enum(int ncid, nc_type xtype, nc_type basetype, Position* offset, NCbytes* buf);

View File

@ -43,9 +43,6 @@ enum URLFORMAT {UF_NONE=0, UF_VIRTUAL=1, UF_PATH=2, UF_S3=3, UF_OTHER=4};
static const char* awsconfigfiles[] = {".aws/config",".aws/credentials",NULL};
#define NCONFIGFILES (sizeof(awsconfigfiles)/sizeof(char*))
static int ncs3_initialized = 0;
static int ncs3_finalized = 0;
/**************************************************/
/* Forward */
@ -56,38 +53,21 @@ static int awsparse(const char* text, NClist* profiles);
/**************************************************/
/* Capture environmental Info */
EXTERNL int
NC_s3sdkinitialize(void)
EXTERNL void
NC_s3sdkenvironment(void)
{
if(!ncs3_initialized) {
ncs3_initialized = 1;
ncs3_finalized = 0;
}
{
/* Get various environment variables as defined by the AWS sdk */
NCglobalstate* gs = NC_getglobalstate();
if(getenv("AWS_REGION")!=NULL)
gs->aws.default_region = nulldup(getenv("AWS_REGION"));
else if(getenv("AWS_DEFAULT_REGION")!=NULL)
gs->aws.default_region = nulldup(getenv("AWS_DEFAULT_REGION"));
else if(gs->aws.default_region == NULL)
gs->aws.default_region = nulldup(AWS_GLOBAL_DEFAULT_REGION);
gs->aws.access_key_id = nulldup(getenv("AWS_ACCESS_KEY_ID"));
gs->aws.config_file = nulldup(getenv("AWS_CONFIG_FILE"));
gs->aws.profile = nulldup(getenv("AWS_PROFILE"));
gs->aws.secret_access_key = nulldup(getenv("AWS_SECRET_ACCESS_KEY"));
}
return NC_NOERR;
}
EXTERNL int
NC_s3sdkfinalize(void)
{
if(!ncs3_finalized) {
ncs3_initialized = 0;
ncs3_finalized = 1;
}
return NC_NOERR;
/* Get various environment variables as defined by the AWS sdk */
NCglobalstate* gs = NC_getglobalstate();
if(getenv("AWS_REGION")!=NULL)
gs->aws.default_region = nulldup(getenv("AWS_REGION"));
else if(getenv("AWS_DEFAULT_REGION")!=NULL)
gs->aws.default_region = nulldup(getenv("AWS_DEFAULT_REGION"));
else if(gs->aws.default_region == NULL)
gs->aws.default_region = nulldup(AWS_GLOBAL_DEFAULT_REGION);
gs->aws.access_key_id = nulldup(getenv("AWS_ACCESS_KEY_ID"));
gs->aws.config_file = nulldup(getenv("AWS_CONFIG_FILE"));
gs->aws.profile = nulldup(getenv("AWS_PROFILE"));
gs->aws.secret_access_key = nulldup(getenv("AWS_SECRET_ACCESS_KEY"));
}
/**************************************************/

View File

@ -868,7 +868,7 @@ nc_def_var_szip(int ncid, int varid, int options_mask, int pixels_per_block)
/* This will cause H5Pset_szip to be called when the var is
* created. */
unsigned int params[2] = {(unsigned int)options_mask, (unsigned int)pixels_per_block};
if ((ret = nc_def_var_filter(ncid, varid, HDF5_FILTER_SZIP, 2, params)))
if ((ret = nc_def_var_filter(ncid, varid, H5Z_FILTER_SZIP, 2, params)))
return ret;
return NC_NOERR;
@ -1312,7 +1312,7 @@ NC_check_nulls(int ncid, int varid, const size_t *start, size_t **count,
int
nc_free_string(size_t len, char **data)
{
int i;
size_t i;
for (i = 0; i < len; i++)
free(data[i]);
return NC_NOERR;

View File

@ -13,13 +13,16 @@
#include <stdio.h>
#endif
#if defined(_WIN32) || defined(_MSC_VER)
extern errno_t rand_s(unsigned int *randomValue);
#endif
/* Support platform independent generation of 32-bit unsigned int random numbers */
int
main() {
unsigned int urnd = 0; /* range 0..2147483647 */
#if defined(WIN32) || defined(_MSC_VER)
#if defined(_WIN32) || defined(_MSC_VER)
(void)rand_s(&urnd);
#else
long rnd;

View File

@ -133,10 +133,9 @@ NC_s3sdkinitialize(void)
if(!ncs3_initialized) {
ncs3_initialized = 1;
ncs3_finalized = 0;
#ifdef DEBUG
//ncs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Debug;
//ncs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Debug;
ncs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Trace;
ncs3options.httpOptions.installSigPipeHandler = true;
ncs3options.loggingOptions.logger_create_fn = [] { return std::make_shared<Aws::Utils::Logging::ConsoleLogSystem>(Aws::Utils::Logging::LogLevel::Trace); };
@ -144,6 +143,9 @@ NC_s3sdkinitialize(void)
#endif
Aws::InitAPI(ncs3options);
/* Get environment information */
NC_s3sdkenvironment();
}
return NCUNTRACE(NC_NOERR);
}
@ -500,7 +502,6 @@ NC_s3sdkwriteobject(void* s3client0, const char* bucket, const char* pathkey, s
int stat = NC_NOERR;
const char* key = NULL;
const char* mcontent = (char*)content;
NCTRACE(11,"bucket=%s pathkey=%s count=%lld content=%p",bucket,pathkey,count,content);
AWSS3CLIENT s3client = (AWSS3CLIENT)s3client0;
@ -535,7 +536,7 @@ NC_s3sdkwriteobject(void* s3client0, const char* bucket, const char* pathkey, s
put_request.SetContentLength((long long)count);
std::shared_ptr<Aws::IOStream> data = std::shared_ptr<Aws::IOStream>(new Aws::StringStream());
data->rdbuf()->pubsetbuf((char*)content,count);
data->rdbuf()->pubsetbuf((char*)content,(std::streamsize)count);
put_request.SetBody(data);
auto put_result = AWSS3GET(s3client)->PutObject(put_request);
if(!put_result.IsSuccess()) {

View File

@ -108,6 +108,37 @@ static int queryinsert(NClist* list, char* ekey, char* evalue);
#define NT(x) ((x)==NULL?"null":x)
/**************************************************/
static int ncs3_initialized = 0;
static int ncs3_finalized = 0;
EXTERNL int
NC_s3sdkinitialize(void)
{
if(!ncs3_initialized) {
ncs3_initialized = 1;
ncs3_finalized = 0;
}
/* Get environment information */
NC_s3sdkenvironment(void);
return NC_NOERR;
}
EXTERNL int
NC_s3sdkfinalize(void)
{
if(!ncs3_finalized) {
ncs3_initialized = 0;
ncs3_finalized = 1;
}
return NC_NOERR;
}
/**************************************************/
#if 0
static void
dumps3info(NCS3INFO* s3info, const char* tag)

View File

@ -22,5 +22,7 @@ if (NETCDF_ENABLE_DLL)
target_compile_definitions(netcdfhdf4 PRIVATE DLL_NETCDF DLL_EXPORT)
endif()
target_link_libraries(netcdfhdf4 PUBLIC ${HDF4_LIBRARIES})
# Remember to package this file for CMake builds.
ADD_EXTRA_DIST(${libhdf4_SOURCES} CMakeLists.txt)

View File

@ -11,7 +11,6 @@
#include "config.h"
#include "netcdf.h"
#include "ncpathmgr.h"
#include "ncpathmgr.h"
#include "hdf5internal.h"
/** @internal These flags may not be set for create. */

View File

@ -20,7 +20,7 @@
#include "netcdf.h"
#include "netcdf_filter.h"
#ifdef ENABLE_BLOSC
#ifdef NETCDF_ENABLE_BLOSC
#include <blosc.h>
#endif
@ -29,135 +29,6 @@
/* Forward */
static int NC4_hdf5_filter_free(struct NC_HDF5_Filter* spec);
/**************************************************/
/* Filter registration support */
#ifdef ENABLE_CLIENTSIDE_FILTERS
/* Mnemonic */
#define FILTERACTIVE 1
/* WARNING: GLOBAL VARIABLE */
/* Define list of registered filters */
static NClist* NC4_registeredfilters = NULL; /** List<NC_FILTER_CLIENT_HDF5*> */
/**************************************************/
/* Filter registration support */
static int
clientfilterlookup(unsigned int id)
{
int i;
if(NC4_registeredfilters == NULL)
NC4_registeredfilters = nclistnew();
for(i=0;i<nclistlength(NC4_registeredfilters);i++) {
NC_FILTER_CLIENT_HDF5* x = nclistget(NC4_registeredfilters,i);
if(x != NULL && x->id == id) {
return i; /* return position */
}
}
return -1;
}
static void
reclaiminfo(NC_FILTER_CLIENT_HDF5* info)
{
nullfree(info);
}
static int
filterremove(int pos)
{
NC_FILTER_CLIENT_HDF5* info = NULL;
if(NC4_registeredfilters == NULL)
return THROW(NC_EINVAL);
if(pos < 0 || pos >= nclistlength(NC4_registeredfilters))
return THROW(NC_EINVAL);
info = nclistget(NC4_registeredfilters,pos);
reclaiminfo(info);
nclistremove(NC4_registeredfilters,pos);
return NC_NOERR;
}
static NC_FILTER_CLIENT_HDF5*
dupfilterinfo(NC_FILTER_CLIENT_HDF5* info)
{
NC_FILTER_CLIENT_HDF5* dup = NULL;
if(info == NULL) goto fail;
if((dup = calloc(1,sizeof(NC_FILTER_CLIENT_HDF5))) == NULL) goto fail;
*dup = *info;
return dup;
fail:
reclaiminfo(dup);
return NULL;
}
int
nc4_global_filter_action(int op, unsigned int id, NC_FILTER_OBJ_HDF5* infop)
{
int stat = NC_NOERR;
H5Z_class2_t* h5filterinfo = NULL;
herr_t herr;
int pos = -1;
NC_FILTER_CLIENT_HDF5* dup = NULL;
NC_FILTER_CLIENT_HDF5* elem = NULL;
NC_FILTER_CLIENT_HDF5 ncf;
NC_UNUSED(format);
switch (op) {
case NCFILTER_CLIENT_REG: /* Ignore id argument */
if(infop == NULL) {stat = NC_EINVAL; goto done;}
assert(NC_FILTER_FORMAT_HDF5 == infop->hdr.format);
assert(NC_FILTER_SORT_CLIENT == infop->sort);
elem = (NC_FILTER_CLIENT_HDF5*)&infop->u.client;
h5filterinfo = elem->info;
/* Another sanity check */
if(id != h5filterinfo->id)
{stat = NC_EINVAL; goto done;}
/* See if this filter is already defined */
if((pos = clientfilterlookup(id)) >= 0)
{stat = NC_ENAMEINUSE; goto done;} /* Already defined */
if((herr = H5Zregister(h5filterinfo)) < 0)
{stat = NC_EFILTER; goto done;}
/* Save a copy of the passed in info */
ncf.id = id;
ncf.info = elem->info;
if((dup=dupfilterinfo(&ncf)) == NULL)
{stat = NC_ENOMEM; goto done;}
nclistpush(NC4_registeredfilters,dup);
break;
case NCFILTER_CLIENT_UNREG:
if(id <= 0)
{stat = NC_ENOTNC4; goto done;}
/* See if this filter is already defined */
if((pos = clientfilterlookup(id)) < 0)
{stat = NC_ENOFILTER; goto done;} /* Not defined */
if((herr = H5Zunregister(id)) < 0)
{stat = NC_EFILTER; goto done;}
if((stat=filterremove(pos))) goto done;
break;
case NCFILTER_CLIENT_INQ:
if(infop == NULL) goto done;
/* Look up the id in our local table */
if((pos = clientfilterlookup(id)) < 0)
{stat = NC_ENOFILTER; goto done;} /* Not defined */
elem = (NC_FILTER_CLIENT_HDF5*)nclistget(NC4_registeredfilters,pos);
if(elem == NULL) {stat = NC_EINTERNAL; goto done;}
if(infop != NULL) {
infop->u.client = *elem;
}
break;
default:
{stat = NC_EINTERNAL; goto done;}
}
done:
return THROW(stat);
}
#endif /*ENABLE_CLIENTSIDE_FILTERS*/
/**************************************************/
/**************************************************/
/**
* @file

View File

@ -930,7 +930,7 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid)
if(H5Pset_fapl_ros3(fapl_id, &fa) < 0)
BAIL(NC_EHDFERR);
} else
#endif /*ENABLE_ROS3*/
#endif /*NETCDF_ENABLE_ROS3*/
{/* Configure FAPL to use our byte-range file driver */
if (H5Pset_fapl_http(fapl_id) < 0)
BAIL(NC_EHDFERR);
@ -1965,11 +1965,9 @@ exit:
static void
hdf5free(void* memory)
{
#ifndef JNA
/* On Windows using the microsoft runtime, it is an error
for one library to free memory allocated by a different library.*/
if(memory != NULL) H5free_memory(memory);
#endif
}
/**
@ -2073,9 +2071,6 @@ read_type(NC_GRP_INFO_T *grp, hid_t hdf_typeid, char *type_name)
int nmembers;
unsigned int m;
char* member_name = NULL;
#ifdef JNA
char jna[1001];
#endif
type->nc_type_class = NC_COMPOUND;
if((retval = NC4_set_varsize(type))) return retval;
@ -2108,12 +2103,6 @@ read_type(NC_GRP_INFO_T *grp, hid_t hdf_typeid, char *type_name)
retval = NC_EBADNAME;
break;
}
#ifdef JNA
else {
strncpy(jna,member_name,1000);
member_name = jna;
}
#endif
/* Offset in bytes on *this* platform. */
member_offset = H5Tget_member_offset(native_typeid, m);
@ -2220,9 +2209,6 @@ read_type(NC_GRP_INFO_T *grp, hid_t hdf_typeid, char *type_name)
nc_type base_nc_type = NC_NAT;
void *value;
char *member_name = NULL;
#ifdef JNA
char jna[1001];
#endif
type->nc_type_class = NC_ENUM;
if((retval = NC4_set_varsize(type))) return retval;
@ -2261,11 +2247,6 @@ read_type(NC_GRP_INFO_T *grp, hid_t hdf_typeid, char *type_name)
if (!(member_name = H5Tget_member_name(hdf_typeid, i)))
return NC_EHDFERR;
#ifdef JNA
strncpy(jna,member_name,1000);
member_name = jna;
#endif
if (strlen(member_name) > NC_MAX_NAME)
return NC_EBADNAME;

View File

@ -42,7 +42,7 @@ extern int NC_HDF4_initialize(void);
extern int NC_HDF4_finalize(void);
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
EXTERNL int NC_s3sdkinitialize(void);
EXTERNL int NC_s3sdkfinalize(void);
#endif
@ -105,7 +105,7 @@ nc_initialize()
#ifdef USE_HDF4
if((stat = NC_HDF4_initialize())) goto done;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
if((stat = NC_s3sdkinitialize())) goto done;
#endif
#ifdef NETCDF_ENABLE_NCZARR
@ -170,7 +170,7 @@ nc_finalize(void)
if((stat = NCZ_finalize())) failed = stat;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
if((stat = NC_s3sdkfinalize())) failed = stat;
#endif

View File

@ -104,7 +104,7 @@ load(NCPSharedLib* lib , const char* path0, int flags)
int errcode = GetLastError();
char* msg = NULL;
FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, errcode, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), &msg, 0, NULL);
NULL, errcode, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (char*)&msg, 0, NULL);
memset(lib->err.msg,0,sizeof(lib->err.msg));
if(msg)
strncpy(lib->err.msg,msg,sizeof(lib->err.msg));

View File

@ -72,11 +72,11 @@ Inserted into any .zarray
\"storage\": \"scalar\"|\"contiguous\"|\"compact\"|\"chunked\"
}"
Inserted into any .zattrs ? or should it go into the container?
"_nczarr_attrs": "{
"_nczarr_attr": "{
\"types\": {\"attr1\": \"<i4\", \"attr2\": \"<i1\",...}
}
+
+Note: _nczarr_attrs type include non-standard use of a zarr type "|U1" => NC_CHAR.
+Note: _nczarr_attr type include non-standard use of a zarr type "|U1" => NC_CHAR.
+
*/
@ -96,6 +96,9 @@ Inserted into any .zattrs ? or should it go into the container?
#define NOXARRAYCONTROL "noxarray"
#define XARRAYSCALAR "_scalar_"
#define NC_NCZARR_MAXSTRLEN_ATTR "_nczarr_maxstrlen"
#define NC_NCZARR_DEFAULT_MAXSTRLEN_ATTR "_nczarr_default_maxstrlen"
#define LEGAL_DIM_SEPARATORS "./"
#define DFALT_DIM_SEPARATOR '.'

View File

@ -499,20 +499,21 @@ s3clear(void* s3client, const char* bucket, const char* rootkey)
{
int stat = NC_NOERR;
char** list = NULL;
char** p;
size_t nkeys = 0;
if(s3client && bucket && rootkey) {
if((stat = NC_s3sdksearch(s3client, bucket, rootkey, &nkeys, &list, NULL)))
goto done;
if(list != NULL) {
for(p=list;*p;p++) {
size_t i;
for(i=0;i<nkeys;i++) {
char* p = list[i];
/* If the key is the rootkey, skip it */
if(strcmp(rootkey,*p)==0) continue;
if(strcmp(rootkey,p)==0) continue;
#ifdef S3DEBUG
fprintf(stderr,"s3clear: %s\n",*p);
fprintf(stderr,"s3clear: %s\n",p);
#endif
if((stat = NC_s3sdkdeletekey(s3client, bucket, *p, NULL)))
if((stat = NC_s3sdkdeletekey(s3client, bucket, p, NULL)))
goto done;
}
}

View File

@ -907,7 +907,7 @@ load_jatts(NCZMAP* map, NC_OBJ* container, int nczarrv1, NCjson** jattrsp, NClis
if((stat = nczm_concat(fullpath,NCZATTRDEP,&key))) goto done;
stat=NCZ_downloadjson(map,key,&jncattr);
}
} else {/* Get _nczarr_attrs from .zattrs */
} else {/* Get _nczarr_attr from .zattrs */
stat = NCJdictget(jattrs,NCZ_V2_ATTR,&jncattr);
if(!stat && jncattr == NULL)
{stat = NCJdictget(jattrs,NCZ_V2_ATTR_UC,&jncattr);}

View File

@ -31,7 +31,6 @@ Benchmarks: @HAS_BENCHMARKS@
NetCDF-2 API: @HAS_NC2@
HDF4 Support: @HAS_HDF4@
HDF5 Support: @HAS_HDF5@
NetCDF-4 API: @HAS_NC4@
CDF5 Support: @HAS_CDF5@
NC-4 Parallel Support: @HAS_PARALLEL4@
PnetCDF Support: @HAS_PNETCDF@
@ -47,7 +46,6 @@ NCZarr Support: @HAS_NCZARR@
Diskless Support: @HAS_DISKLESS@
MMap Support: @HAS_MMAP@
JNA Support: @HAS_JNA@
ERANGE Fill Support: @HAS_ERANGE_FILL@
Relaxed Boundary Check: @RELAX_COORD_BOUND@

View File

@ -96,8 +96,8 @@ if(HAVE_M4)
COMMAND ${NC_M4} ${ARGS_MANPAGE} "${CMAKE_CURRENT_BINARY_DIR}/netcdf.m4" > "${CMAKE_CURRENT_BINARY_DIR}/netcdf.3"
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
)
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/netcdf.3" DESTINATION "share/man/man3" COMPONENT documentation)
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/netcdf.3" DESTINATION "${CMAKE_INSTALL_MANDIR}/man3" COMPONENT documentation)
endif(NOT MSVC)
endif()
endif()

View File

@ -56,10 +56,13 @@ NC4_initialize(void)
if(getenv(NCLOGLEVELENV) != NULL) {
char* slevel = getenv(NCLOGLEVELENV);
long level = atol(slevel);
#ifdef USE_NETCDF4
if(level >= 0)
nc_set_log_level((int)level);
}
#endif
#endif
NC_initialize_reserved();
return ret;
}

View File

@ -36,30 +36,35 @@
*/
/** @internal List of reserved attributes.
WARNING: This list must be in (strcmp) sorted order for binary search. */
static const NC_reservedatt NC_reserved[] = {
WARNING: This list will be sorted in (strcmp) sorted order for binary search.
So order here does not matter; the table will be modified by sorting.
*/
static NC_reservedatt NC_reserved[] = {
{NC_ATT_CLASS, READONLYFLAG|HIDDENATTRFLAG}, /*CLASS*/
{NC_ATT_DIMENSION_LIST, READONLYFLAG|HIDDENATTRFLAG}, /*DIMENSION_LIST*/
{NC_ATT_NAME, READONLYFLAG|HIDDENATTRFLAG}, /*NAME*/
{NC_ATT_REFERENCE_LIST, READONLYFLAG|HIDDENATTRFLAG}, /*REFERENCE_LIST*/
{NC_XARRAY_DIMS, READONLYFLAG|NAMEONLYFLAG|HIDDENATTRFLAG}, /*_ARRAY_DIMENSIONS*/
{NC_XARRAY_DIMS, READONLYFLAG|HIDDENATTRFLAG}, /*_ARRAY_DIMENSIONS*/
{NC_ATT_CODECS, VARFLAG|READONLYFLAG|NAMEONLYFLAG}, /*_Codecs*/
{NC_ATT_FORMAT, READONLYFLAG}, /*_Format*/
{ISNETCDF4ATT, READONLYFLAG|NAMEONLYFLAG}, /*_IsNetcdf4*/
{ISNETCDF4ATT, READONLYFLAG|NAMEONLYFLAG|VIRTUALFLAG}, /*_IsNetcdf4*/
{NCPROPS,READONLYFLAG|NAMEONLYFLAG|HIDDENATTRFLAG}, /*_NCProperties*/
{NC_NCZARR_ATTR_UC, READONLYFLAG|HIDDENATTRFLAG}, /*_NCZARR_ATTR */
{NC_NCZARR_ATTR_UC, READONLYFLAG|NAMEONLYFLAG|HIDDENATTRFLAG}, /*_NCZARR_ATTR */
{NC_ATT_COORDINATES, READONLYFLAG|HIDDENATTRFLAG}, /*_Netcdf4Coordinates*/
{NC_ATT_DIMID_NAME, READONLYFLAG|HIDDENATTRFLAG}, /*_Netcdf4Dimid*/
{SUPERBLOCKATT, READONLYFLAG|NAMEONLYFLAG}, /*_SuperblockVersion*/
{SUPERBLOCKATT, READONLYFLAG|NAMEONLYFLAG|VIRTUALFLAG}, /*_SuperblockVersion*/
{NC_ATT_NC3_STRICT_NAME, READONLYFLAG}, /*_nc3_strict*/
{NC_ATT_NC3_STRICT_NAME, READONLYFLAG}, /*_nc3_strict*/
{NC_NCZARR_ATTR, READONLYFLAG|HIDDENATTRFLAG}, /*_nczarr_attr */
};
#define NRESERVED (sizeof(NC_reserved) / sizeof(NC_reservedatt)) /*|NC_reservedatt|*/
#define NRESERVED (sizeof(NC_reserved) / sizeof(NC_reservedatt)) /*|NC_reservedatt*/
/*Forward */
static int NC4_move_in_NCList(NC* nc, int new_id);
static int bincmp(const void* arg1, const void* arg2);
static int sortcmp(const void* arg1, const void* arg2);
#if NC_HAS_LOGGING
#if LOGGING
/* This is the severity level of messages which will be logged. Use
severity 0 for errors, 1 for important log messages, 2 for less
important, etc. */
@ -129,7 +134,7 @@ nc_log(int severity, const char *fmt, ...)
fprintf(f, "\n");
fflush(f);
}
#endif /* NC_HAS_LOGGING */
#endif /* LOGGING */
/**
* @internal Check and normalize and name.
@ -1706,7 +1711,7 @@ nc4_normalize_name(const char *name, char *norm_name)
return NC_NOERR;
}
#ifdef ENABLE_SET_LOG_LEVEL
#ifdef NETCDF_ENABLE_SET_LOG_LEVEL
/**
* Initialize parallel I/O logging. For parallel I/O builds, open log
@ -1719,7 +1724,7 @@ nc4_init_logging(void)
{
int ret = NC_NOERR;
#if NC_HAS_LOGGING
#if LOGGING
#if NC_HAS_PARALLEL4
if (!LOG_FILE && nc_log_level >= 0)
{
@ -1745,7 +1750,7 @@ nc4_init_logging(void)
return NC_EINTERNAL;
}
#endif /* NC_HAS_PARALLEL4 */
#endif /* NC_HAS_LOGGING */
#endif /* LOGGING */
return ret;
}
@ -1759,7 +1764,7 @@ nc4_init_logging(void)
void
nc4_finalize_logging(void)
{
#if NC_HAS_LOGGING
#if LOGGING
#if NC_HAS_PARALLEL4
if (LOG_FILE)
{
@ -1767,7 +1772,7 @@ nc4_finalize_logging(void)
LOG_FILE = NULL;
}
#endif /* NC_HAS_PARALLEL4 */
#endif /* NC_HAS_LOGGING */
#endif /* LOGGING */
}
/**
@ -1786,7 +1791,7 @@ nc4_finalize_logging(void)
int
nc_set_log_level(int new_level)
{
#if NC_HAS_LOGGING
#if LOGGING
/* Remember the new level. */
nc_log_level = new_level;
@ -1803,13 +1808,13 @@ nc_set_log_level(int new_level)
#endif /* NC_HAS_PARALLEL4 */
LOG((1, "log_level changed to %d", nc_log_level));
#endif /*NC_HAS_LOGGING */
#endif /* LOGGING */
return NC_NOERR;
}
#endif /* ENABLE_SET_LOG_LEVEL */
#endif /* NETCDF_ENABLE_SET_LOG_LEVEL */
#if NC_HAS_LOGGING
#if LOGGING
#define MAX_NESTS 10
/**
* @internal Recursively print the metadata of a group.
@ -1978,7 +1983,7 @@ log_metadata_nc(NC_FILE_INFO_T *h5)
return NC_NOERR;
}
#endif /*NC_HAS_LOGGING */
#endif /*LOGGING */
/**
* @internal Show the in-memory metadata for a netcdf file. This
@ -1995,7 +2000,7 @@ int
NC4_show_metadata(int ncid)
{
int retval = NC_NOERR;
#if NC_HAS_LOGGING
#if LOGGING
NC_FILE_INFO_T *h5;
int old_log_level = nc_log_level;
@ -2007,7 +2012,7 @@ NC4_show_metadata(int ncid)
nc_log_level = 2;
retval = log_metadata_nc(h5);
nc_log_level = old_log_level;
#endif /*NC_HAS_LOGGING*/
#endif /*LOGGING*/
return retval;
}
@ -2021,6 +2026,7 @@ NC4_show_metadata(int ncid)
const NC_reservedatt*
NC_findreserved(const char* name)
{
#if 0
int n = NRESERVED;
int L = 0;
int R = (n - 1);
@ -2037,8 +2043,12 @@ NC_findreserved(const char* name)
R = (m - 1);
}
return NULL;
#else
return (const NC_reservedatt*)bsearch(name,NC_reserved,NRESERVED,sizeof(NC_reservedatt),bincmp);
#endif
}
/* Ed Hartness requires this function */
static int
NC4_move_in_NCList(NC* nc, int new_id)
{
@ -2051,153 +2061,25 @@ NC4_move_in_NCList(NC* nc, int new_id)
return stat;
}
/**************************************************/
/* NCglobal state management */
static NCglobalstate* nc_globalstate = NULL;
static int
NC_createglobalstate(void)
sortcmp(const void* arg1, const void* arg2)
{
int stat = NC_NOERR;
const char* tmp = NULL;
if(nc_globalstate == NULL) {
nc_globalstate = calloc(1,sizeof(NCglobalstate));
}
/* Initialize struct pointers */
if((nc_globalstate->rcinfo = calloc(1,sizeof(struct NCRCinfo)))==NULL)
{stat = NC_ENOMEM; goto done;}
if((nc_globalstate->rcinfo->entries = nclistnew())==NULL)
{stat = NC_ENOMEM; goto done;}
if((nc_globalstate->rcinfo->s3profiles = nclistnew())==NULL)
{stat = NC_ENOMEM; goto done;}
/* Get environment variables */
if(getenv(NCRCENVIGNORE) != NULL)
nc_globalstate->rcinfo->ignore = 1;
tmp = getenv(NCRCENVRC);
if(tmp != NULL && strlen(tmp) > 0)
nc_globalstate->rcinfo->rcfile = strdup(tmp);
/* Initialize chunk cache defaults */
nc_globalstate->chunkcache.size = DEFAULT_CHUNK_CACHE_SIZE; /**< Default chunk cache size. */
nc_globalstate->chunkcache.nelems = DEFAULT_CHUNKS_IN_CACHE; /**< Default chunk cache number of elements. */
nc_globalstate->chunkcache.preemption = DEFAULT_CHUNK_CACHE_PREEMPTION; /**< Default chunk cache preemption. */
done:
return stat;
NC_reservedatt* r1 = (NC_reservedatt*)arg1;
NC_reservedatt* r2 = (NC_reservedatt*)arg2;
return strcmp(r1->name,r2->name);
}
/* Get global state */
NCglobalstate*
NC_getglobalstate(void)
static int
bincmp(const void* arg1, const void* arg2)
{
if(nc_globalstate == NULL)
NC_createglobalstate();
return nc_globalstate;
const char* name = (const char*)arg1;
NC_reservedatt* ra = (NC_reservedatt*)arg2;
return strcmp(name,ra->name);
}
void
NC_freeglobalstate(void)
NC_initialize_reserved(void)
{
if(nc_globalstate != NULL) {
nullfree(nc_globalstate->tempdir);
nullfree(nc_globalstate->home);
nullfree(nc_globalstate->cwd);
nullfree(nc_globalstate->aws.default_region);
nullfree(nc_globalstate->aws.config_file);
nullfree(nc_globalstate->aws.profile);
nullfree(nc_globalstate->aws.access_key_id);
nullfree(nc_globalstate->aws.secret_access_key);
if(nc_globalstate->rcinfo) {
NC_rcclear(nc_globalstate->rcinfo);
free(nc_globalstate->rcinfo);
}
free(nc_globalstate);
nc_globalstate = NULL;
}
}
/**************************************************/
/* Specific property functions */
/**
Provide a function to store global data alignment
information.
Repeated calls to nc_set_alignment will overwrite any existing values.
If defined, then for every file created or opened after the call to
nc_set_alignment, and for every new variable added to the file, the
most recently set threshold and alignment values will be applied
to that variable.
The nc_set_alignment function causes new data written to a
netCDF-4 file to be aligned on disk to a specified block
size. To be effective, alignment should be the system disk block
size, or a multiple of it. This setting is effective with MPI
I/O and other parallel systems.
This is a trade-off of write speed versus file size. Alignment
leaves holes between file objects. The default of no alignment
writes file objects contiguously, without holes. Alignment has
no impact on file readability.
Alignment settings apply only indirectly, through the file open
functions. Call nc_set_alignment first, then nc_create or
nc_open for one or more files. Current alignment settings are
locked in when each file is opened, then forgotten when the same
file is closed. For illustration, it is possible to write
different files at the same time with different alignments, by
interleaving nc_set_alignment and nc_open calls.
Alignment applies to all newly written low-level file objects at
or above the threshold size, including chunks of variables,
attributes, and internal infrastructure. Alignment is not locked
in to a data variable. It can change between data chunks of the
same variable, based on a file's history.
Refer to H5Pset_alignment in HDF5 documentation for more
specific details, interactions, and additional rules.
@param threshold The minimum size to which alignment is applied.
@param alignment The alignment value.
@return ::NC_NOERR No error.
@return ::NC_EINVAL Invalid input.
@author Dennis Heimbigner
@ingroup datasets
*/
int
nc_set_alignment(int threshold, int alignment)
{
NCglobalstate* gs = NC_getglobalstate();
gs->alignment.threshold = threshold;
gs->alignment.alignment = alignment;
gs->alignment.defined = 1;
return NC_NOERR;
}
/**
Provide get function to retrieve global data alignment
information.
The nc_get_alignment function return the last values set by
nc_set_alignment. If nc_set_alignment has not been called, then
it returns the value 0 for both threshold and alignment.
@param thresholdp Return the current minimum size to which alignment is applied or zero.
@param alignmentp Return the current alignment value or zero.
@return ::NC_NOERR No error.
@return ::NC_EINVAL Invalid input.
@author Dennis Heimbigner
@ingroup datasets
*/
int
nc_get_alignment(int* thresholdp, int* alignmentp)
{
NCglobalstate* gs = NC_getglobalstate();
if(thresholdp) *thresholdp = gs->alignment.threshold;
if(alignmentp) *alignmentp = gs->alignment.alignment;
return NC_NOERR;
/* Guarantee the reserved attribute list is sorted */
qsort((void*)NC_reserved,NRESERVED,sizeof(NC_reservedatt),sortcmp);
}

View File

@ -15,34 +15,6 @@
#include "nc4dispatch.h"
#include <stddef.h>
#if 0
#ifdef NETCDF_ENABLE_DAP4
EXTERNL NC* NCD4_get_substrate_nc(int ncid);
#endif
#endif
/* The sizes of types may vary from platform to platform, but within
* netCDF files, type sizes are fixed. */
#define NC_CHAR_LEN sizeof(char) /**< @internal Size of char. */
#define NC_STRING_LEN sizeof(char *) /**< @internal Size of char *. */
#define NC_BYTE_LEN 1 /**< @internal Size of byte. */
#define NC_SHORT_LEN 2 /**< @internal Size of short. */
#define NC_INT_LEN 4 /**< @internal Size of int. */
#define NC_FLOAT_LEN 4 /**< @internal Size of float. */
#define NC_DOUBLE_LEN 8 /**< @internal Size of double. */
#define NC_INT64_LEN 8 /**< @internal Size of int64. */
/** @internal Names of atomic types. */
const char* nc4_atomic_name[NUM_ATOMIC_TYPES] = {"none", "byte", "char",
"short", "int", "float",
"double", "ubyte",
"ushort", "uint",
"int64", "uint64", "string"};
static const size_t nc4_atomic_size[NUM_ATOMIC_TYPES] = {0, NC_BYTE_LEN, NC_CHAR_LEN, NC_SHORT_LEN,
NC_INT_LEN, NC_FLOAT_LEN, NC_DOUBLE_LEN,
NC_BYTE_LEN, NC_SHORT_LEN, NC_INT_LEN, NC_INT64_LEN,
NC_INT64_LEN, NC_STRING_LEN};
/**
* @internal Find all user-defined types for a location. This finds
* all user-defined types in a group.
@ -90,64 +62,6 @@ NC4_inq_typeids(int ncid, int *ntypes, int *typeids)
return NC_NOERR;
}
/**
* @internal Get the name and size of an atomic type. For strings, 1 is
* returned.
*
* @param typeid1 Type ID.
* @param name Gets the name of the type.
* @param size Gets the size of one element of the type in bytes.
*
* @return ::NC_NOERR No error.
* @return ::NC_EBADID Bad ncid.
* @return ::NC_EBADTYPE Type not found.
* @author Dennis Heimbigner
*/
int
NC4_inq_atomic_type(nc_type typeid1, char *name, size_t *size)
{
LOG((2, "nc_inq_atomic_type: typeid %d", typeid1));
if (typeid1 >= NUM_ATOMIC_TYPES)
return NC_EBADTYPE;
if (name)
strcpy(name, nc4_atomic_name[typeid1]);
if (size)
*size = nc4_atomic_size[typeid1];
return NC_NOERR;
}
/**
* @internal Get the id and size of an atomic type by name.
*
* @param name [in] the name of the type.
* @param idp [out] the type index of the type.
* @param sizep [out] the size of one element of the type in bytes.
*
* @return ::NC_NOERR No error.
* @return ::NC_EBADID Bad ncid.
* @return ::NC_EBADTYPE Type not found.
* @author Dennis Heimbigner
*/
int
NC4_lookup_atomic_type(const char *name, nc_type* idp, size_t *sizep)
{
int i;
LOG((2, "nc_lookup_atomic_type: name %s ", name));
if (name == NULL || strlen(name) == 0)
return NC_EBADTYPE;
for(i=0;i<NUM_ATOMIC_TYPES;i++) {
if(strcasecmp(name,nc4_atomic_name[i])==0) {
if(idp) *idp = i;
if(sizep) *sizep = nc4_atomic_size[i];
return NC_NOERR;
}
}
return NC_EBADTYPE;
}
/**
* @internal Get the name and size of a type.
* For VLEN the base type len is returned.
@ -175,10 +89,7 @@ NC4_inq_type(int ncid, nc_type typeid1, char *name, size_t *size)
/* If this is an atomic type, the answer is easy. */
if (typeid1 < NUM_ATOMIC_TYPES)
{
if (name)
strcpy(name, nc4_atomic_name[typeid1]);
if (size)
*size = nc4_atomic_size[typeid1];
if((retval=NC4_inq_atomic_type(typeid1, name, size))) return retval;
return NC_NOERR;
}
@ -197,8 +108,6 @@ NC4_inq_type(int ncid, nc_type typeid1, char *name, size_t *size)
{
if (type->nc_type_class == NC_VLEN)
*size = sizeof(nc_vlen_t);
else if (type->nc_type_class == NC_STRING)
*size = NC_STRING_LEN;
else
*size = type->size;
}
@ -256,9 +165,9 @@ NC4_inq_user_type(int ncid, nc_type typeid1, char *name, size_t *size,
{
if (type->nc_type_class == NC_VLEN)
*size = sizeof(nc_vlen_t);
else if (type->nc_type_class == NC_STRING)
*size = NC_STRING_LEN;
else
else if (type->nc_type_class == NC_STRING) {
if((retval=NC4_inq_type(ncid,typeid1,NULL,size))) return retval;
} else
*size = type->size;
}
if (name)

View File

@ -23,7 +23,6 @@ libsprivate="@LIBS@"
has_dap="@HAS_DAP@"
has_dap4="@HAS_DAP4@"
has_nc2="@HAS_NC2@"
has_nc4="@HAS_NC4@"
has_hdf4="@HAS_HDF4@"
has_pnetcdf="@HAS_PNETCDF@"
has_hdf5="@HAS_HDF5@"
@ -57,8 +56,7 @@ Available values for OPTION include:
--has-dap4 whether DAP4 is enabled in this build
--has-dap same as --has-dap2 (Deprecated)
--has-nc2 whether NetCDF-2 API is enabled
--has-nc4 whether NetCDF-4/HDF-5 is enabled in this build
--has-hdf5 whether HDF5 is used in build (always the same as --has-nc4)
--has-hdf5 whether HDF5 is used in build
--has-hdf4 whether HDF4 was used in build
--has-logging whether logging is enabled with --enable-logging.
--has-pnetcdf whether PnetCDF was used in build
@ -100,7 +98,6 @@ all()
echo " --has-dap2 -> $has_dap"
echo " --has-dap4 -> $has_dap4"
echo " --has-nc2 -> $has_nc2"
echo " --has-nc4 -> $has_nc4"
echo " --has-hdf5 -> $has_hdf5"
echo " --has-hdf4 -> $has_hdf4"
echo " --has-logging -> $has_logging"
@ -189,10 +186,6 @@ while test $# -gt 0; do
echo $has_nc2
;;
--has-nc4)
echo $has_nc4
;;
--has-hdf5)
echo $has_hdf5
;;

View File

@ -15,7 +15,7 @@ echo "Getting KNMI test files $file_list"
for f1 in $file_list
do
if ! test -f $f1; then
wget https://resources.unidata.ucar.edu/sample_data/$f1.gz
wget https://resources.unidata.ucar.edu/netcdf/sample_data/$f1.gz
gunzip $f1.gz
fi
done

View File

@ -87,7 +87,7 @@ endif
TESTS += run_diskless.sh run_diskless5.sh run_inmemory.sh
if LARGE_FILE_TESTS
if ! ENABLE_PARALLEL
if ! NETCDF_ENABLE_PARALLEL
TESTS += run_diskless2.sh
endif
endif

View File

@ -79,6 +79,7 @@ endif
# Filter Tests (requires ncdump and ncgen and HDF5)
if USE_HDF5
if NETCDF_ENABLE_PLUGINS
if NETCDF_ENABLE_FILTER_TESTING
extradir =
check_PROGRAMS += test_filter test_filter_misc test_filter_order test_filter_repeat
@ -108,6 +109,7 @@ TESTS += tst_filterinstall.sh
endif
endif # NETCDF_ENABLE_FILTER_TESTING
endif # NETCDF_ENABLE_PLUGINS
endif # USE_HDF5
endif # NETCDF_BUILD_UTILITIES
@ -127,7 +129,7 @@ ref_tst_interops4.nc CMakeLists.txt run_grp_rename.sh \
run_empty_vlen_test.sh ref_hdf5_compat1.nc ref_hdf5_compat2.nc \
ref_hdf5_compat3.nc tst_misc.sh tdset.h5 tst_szip.sh ref_szip.h5 \
ref_szip.cdl tst_filter.sh bzip2.cdl noop.cdl ref_filtered.cdl \
ref_unfiltered.cdl ref_bzip2.c findplugin.in ref_unfilteredvv.cdl \
ref_unfiltered.cdl ref_bzip2.c ref_unfilteredvv.cdl \
ref_filteredvv.cdl ref_multi.cdl \
ref_ncgenF.cdl ref_nccopyF.cdl \
ref_filter_repeat.txt ref_fillonly.cdl test_fillonly.sh \

View File

@ -39,6 +39,7 @@ IF(NETCDF_ENABLE_TESTS)
add_bin_env_test(ncdap test_vara)
ENDIF()
IF(NETCDF_ENABLE_EXTERNAL_SERVER_TESTS)
IF(NETCDF_ENABLE_DAP_REMOTE_TESTS)
IF(NETCDF_BUILD_UTILITIES)
@ -64,6 +65,7 @@ IF(NETCDF_ENABLE_TESTS)
add_bin_test(ncdap test_manyurls)
SET_TESTS_PROPERTIES(ncdap_test_manyurls PROPERTIES RUN_SERIAL TRUE)
ENDIF()
ENDIF()
IF(NETCDF_ENABLE_DAP_AUTH_TESTS)
##obsolete add_bin_test(ncdap t_auth)

View File

@ -47,6 +47,10 @@ if NETCDF_BUILD_UTILITIES
TESTS += tst_ber.sh tst_remote3.sh tst_formatx.sh testurl.sh tst_fillmismatch.sh tst_zero_len_var.sh
endif
if NETCDF_ENABLE_EXTERNAL_SERVER_TESTS
if NETCDF_ENABLE_DAP_REMOTE_TESTS
if NETCDF_BUILD_UTILITIES
# Remote servers
# iridl.ldeo.columbia.edu
TESTS += tst_encode.sh
@ -55,16 +59,17 @@ TESTS += tst_hyrax.sh
TESTS += test_partvar
if NETCDF_ENABLE_DAP_LONG_TESTS
# Various
TESTS += tst_longremote3.sh
tst_longremote3.log: tst_remote3.log
if NETCDF_ENABLE_EXTERNAL_SERVER_TESTS
endif
if NETCDF_ENABLE_DAP_LONG_TESTS
test_manyurls_SOURCES = test_manyurls.c manyurls.h
check_PROGRAMS += test_manyurls
test_manyurls.log: tst_longremote3.log
TESTS += test_manyurls
endif
endif
test_partvar_SOURCES = test_partvar.c
@ -84,6 +89,9 @@ check_PROGRAMS += test_partvar
check_PROGRAMS += t_misc
check_PROGRAMS += t_ncf330
endif
endif
if NETCDF_ENABLE_DAP_AUTH_TESTS
TESTS += testauth.sh
endif

View File

@ -355,5 +355,5 @@ add_subdirectory(expected)
set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${CLEANFILES}")
if(NOT MSVC)
install(FILES ${MAN_FILES} DESTINATION "share/man/man1" COMPONENT documentation)
install(FILES ${MAN_FILES} DESTINATION "${CMAKE_INSTALL_MANDIR}/man1" COMPONENT documentation)
endif()

View File

@ -6,7 +6,7 @@ if test "x$srcdir" = x ; then srcdir=`pwd`; fi
# This shell script tests BOM support in ncgen
set -e
set -x
# add hack for sunos
export srcdir;

View File

@ -20,6 +20,12 @@
#include "vardata.h"
#include "netcdf_aux.h"
/* If set, then print char variables as utf-8.
If not set, then print non-printable characters as octal.
The latter was the default before this change.
*/
#define UTF8CHARS
/* maximum len of string needed for one value of a primitive type */
#define MAX_OUTPUT_LEN 100
@ -340,6 +346,7 @@ pr_tvals(
sp = vals + len;
while (len != 0 && *--sp == '\0')
len--;
/* Walk the sequence of characters and write control characters in escape form. */
for (iel = 0; iel < len; iel++) {
unsigned char uc;
switch (uc = (unsigned char)(*vals++ & 0377)) {
@ -371,10 +378,12 @@ pr_tvals(
printf("\\\"");
break;
default:
if (isprint(uc))
printf("%c",uc);
else
#ifdef UTF8CHARS
if (!isprint(uc))
printf("\\%.3o",uc);
else
#endif /*UTF8CHARS*/
printf("%c",uc);
break;
}
}

Some files were not shown because too many files have changed in this diff Show More