Merging current main development branch, changing option name to include

NETCDF_ prefix. Thanks!

Merge remote-tracking branch 'upstream/main' into cmake-ncgen-generate
This commit is contained in:
Ward Fisher 2024-03-21 14:08:12 -06:00
commit cdd9cbb83c
166 changed files with 1229 additions and 1187 deletions

View File

@ -72,20 +72,20 @@ jobs:
- run: echo "LDFLAGS=-L${HOME}/environments/${{ matrix.hdf5 }}/lib" >> $GITHUB_ENV
- run: echo "LD_LIBRARY_PATH=${HOME}/environments/${{ matrix.hdf5 }}/lib" >> $GITHUB_ENV
- run: |
echo "ENABLE_HDF4=--disable-hdf4" >> $GITHUB_ENV
echo "ENABLE_HDF5=--disable-hdf5" >> $GITHUB_ENV
echo "NETCDF_ENABLE_HDF4=--disable-hdf4" >> $GITHUB_ENV
echo "NETCDF_ENABLE_HDF5=--disable-hdf5" >> $GITHUB_ENV
if: matrix.use_nc4 == 'nc3'
- run: |
echo "ENABLE_HDF4=--enable-hdf4" >> $GITHUB_ENV
echo "ENABLE_HDF5=--enable-hdf5" >> $GITHUB_ENV
echo "NETCDF_ENABLE_HDF4=--enable-hdf4" >> $GITHUB_ENV
echo "NETCDF_ENABLE_HDF5=--enable-hdf5" >> $GITHUB_ENV
if: matrix.use_nc4 == 'nc4'
- run: echo "ENABLE_DAP=--disable-dap" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_DAP=--disable-dap" >> $GITHUB_ENV
if: matrix.use_dap == 'dap_off'
- run: echo "ENABLE_DAP=--enable-dap" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_DAP=--enable-dap" >> $GITHUB_ENV
if: matrix.use_dap == 'dap_on'
- run: echo "ENABLE_NCZARR=--disable-nczarr" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_NCZARR=--disable-nczarr" >> $GITHUB_ENV
if: matrix.use_nczarr == 'nczarr_off'
- run: echo "ENABLE_NCZARR=--enable-nczarr" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_NCZARR=--enable-nczarr" >> $GITHUB_ENV
if: matrix.use_nczarr == 'nczarr_on'
###
@ -116,7 +116,7 @@ jobs:
- name: Configure
shell: bash -l {0}
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure ${ENABLE_HDF5} ${ENABLE_DAP} ${ENABLE_NCZARR}
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure ${NETCDF_ENABLE_HDF5} ${NETCDF_ENABLE_DAP} ${NETCDF_ENABLE_NCZARR}
if: ${{ success() }}
- name: Look at config.log if error
@ -145,7 +145,7 @@ jobs:
# - name: Make Distcheck
# shell: bash -l {0}
# run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} DISTCHECK_CONFIGURE_FLAGS="${ENABLE_HDF4} ${ENABLE_HDF5} ${ENABLE_DAP} ${ENABLE_NCZARR}" make distcheck
# run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} DISTCHECK_CONFIGURE_FLAGS="${NETCDF_ENABLE_HDF4} ${NETCDF_ENABLE_HDF5} ${NETCDF_ENABLE_DAP} ${NETCDF_ENABLE_NCZARR}" make distcheck
# if: ${{ success() }}
#- name: Start SSH Debug
@ -177,20 +177,20 @@ jobs:
- run: echo "CMAKE_PREFIX_PATH=${HOME}/environments/${{ matrix.hdf5 }}/" >> $GITHUB_ENV
- run: echo "LD_LIBRARY_PATH=${HOME}/environments/${{ matrix.hdf5 }}/lib" >> $GITHUB_ENV
- run: |
echo "ENABLE_HDF4=OFF" >> $GITHUB_ENV
echo "ENABLE_HDF5=OFF" >> $GITHUB_ENV
echo "NETCDF_ENABLE_HDF4=OFF" >> $GITHUB_ENV
echo "NETCDF_ENABLE_HDF5=OFF" >> $GITHUB_ENV
if: matrix.use_nc4 == 'nc3'
- run: |
echo "ENABLE_HDF4=ON" >> $GITHUB_ENV
echo "ENABLE_HDF5=ON" >> $GITHUB_ENV
echo "NETCDF_ENABLE_HDF4=ON" >> $GITHUB_ENV
echo "NETCDF_ENABLE_HDF5=ON" >> $GITHUB_ENV
if: matrix.use_nc4 == 'nc4'
- run: echo "ENABLE_DAP=OFF" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_DAP=OFF" >> $GITHUB_ENV
if: matrix.use_dap == 'dap_off'
- run: echo "ENABLE_DAP=ON" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_DAP=ON" >> $GITHUB_ENV
if: matrix.use_dap == 'dap_on'
- run: echo "ENABLE_NCZARR=OFF" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_NCZARR=OFF" >> $GITHUB_ENV
if: matrix.use_nczarr == 'nczarr_off'
- run: echo "ENABLE_NCZARR=ON" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_NCZARR=ON" >> $GITHUB_ENV
if: matrix.use_nczarr == 'nczarr_on'
- run: echo "CTEST_OUTPUT_ON_FAILURE=1" >> $GITHUB_ENV
@ -218,7 +218,7 @@ jobs:
run: |
mkdir build
cd build
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DENABLE_DAP=${ENABLE_DAP} -DENABLE_HDF5=${ENABLE_HDF5} -DENABLE_NCZARR=${ENABLE_NCZARR}
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DNETCDF_ENABLE_DAP=${NETCDF_ENABLE_DAP} -DNETCDF_ENABLE_HDF5=${NETCDF_ENABLE_HDF5} -DNETCDF_ENABLE_NCZARR=${NETCDF_ENABLE_NCZARR}
- name: Print Summary
shell: bash -l {0}
@ -375,7 +375,7 @@ jobs:
run: |
mkdir build
cd build
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DENABLE_DAP=TRUE -DENABLE_HDF5=TRUE -DENABLE_NCZARR=TRUE -D ENABLE_DAP_LONG_TESTS=TRUE -DENABLE_LIBXML2=FALSE
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DNETCDF_ENABLE_DAP=TRUE -DNETCDF_ENABLE_HDF5=TRUE -DNETCDF_ENABLE_NCZARR=TRUE -D NETCDF_ENABLE_DAP_LONG_TESTS=TRUE -DNETCDF_ENABLE_LIBXML2=FALSE
- name: Print Summary
shell: bash -l {0}
@ -450,7 +450,7 @@ jobs:
run: |
mkdir build
cd build
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DENABLE_DAP=TRUE -DENABLE_HDF5=TRUE -DENABLE_NCZARR=TRUE -D ENABLE_DAP_LONG_TESTS=TRUE -DENABLE_LIBXML2=FALSE -DBUILD_SHARED_LIBS=OFF -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DNETCDF_ENABLE_DAP=TRUE -DNETCDF_ENABLE_HDF5=TRUE -DNETCDF_ENABLE_NCZARR=TRUE -D NETCDF_ENABLE_DAP_LONG_TESTS=TRUE -DNETCDF_ENABLE_LIBXML2=FALSE -DBUILD_SHARED_LIBS=OFF -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
- name: Print Summary
shell: bash -l {0}

View File

@ -223,7 +223,7 @@ jobs:
strategy:
matrix:
hdf5: [ 1.14.0 ]
hdf5: [ 1.14.3 ]
steps:
- uses: actions/checkout@v3
@ -440,7 +440,7 @@ jobs:
run: |
mkdir build
cd build
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DENABLE_DAP=TRUE -DENABLE_HDF5=TRUE -DENABLE_NCZARR=TRUE -DENABLE_DAP_LONG_TESTS=TRUE -DENABLE_LIBXML2=FALSE
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DNETCDF_ENABLE_DAP=TRUE -DNETCDF_ENABLE_HDF5=TRUE -DNETCDF_ENABLE_NCZARR=TRUE -DNETCDF_ENABLE_DAP_LONG_TESTS=TRUE -DNETCDF_ENABLE_LIBXML2=FALSE
- name: Print Summary
shell: bash -l {0}
@ -521,7 +521,7 @@ jobs:
run: |
mkdir build
cd build
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DENABLE_DAP=TRUE -DENABLE_HDF5=TRUE -DENABLE_NCZARR=TRUE -DENABLE_DAP_LONG_TESTS=TRUE -DENABLE_LIBXML2=FALSE -DBUILD_SHARED_LIBS=FALSE -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DNETCDF_ENABLE_DAP=TRUE -DNETCDF_ENABLE_HDF5=TRUE -DNETCDF_ENABLE_NCZARR=TRUE -DNETCDF_ENABLE_DAP_LONG_TESTS=TRUE -DNETCDF_ENABLE_LIBXML2=FALSE -DBUILD_SHARED_LIBS=FALSE -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
- name: Print Summary
shell: bash -l {0}
@ -602,7 +602,7 @@ jobs:
run: |
mkdir build
cd build
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_C_COMPILER=mpicc -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DENABLE_DAP=TRUE -DENABLE_HDF5=TRUE -DENABLE_NCZARR=TRUE -D ENABLE_DAP_LONG_TESTS=TRUE -DENABLE_PNETCDF=TRUE
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_C_COMPILER=mpicc -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DNETCDF_ENABLE_DAP=TRUE -DNETCDF_ENABLE_HDF5=TRUE -DNETCDF_ENABLE_NCZARR=TRUE -D NETCDF_ENABLE_DAP_LONG_TESTS=TRUE -DNETCDF_ENABLE_PNETCDF=TRUE
- name: Print Summary
shell: bash -l {0}
@ -679,18 +679,18 @@ jobs:
- run: echo "LDFLAGS=-L${HOME}/environments/${{ matrix.hdf5 }}/lib" >> $GITHUB_ENV
- run: echo "LD_LIBRARY_PATH=${HOME}/environments/${{ matrix.hdf5 }}/lib" >> $GITHUB_ENV
- run: |
echo "ENABLE_HDF5=--disable-hdf5" >> $GITHUB_ENV
echo "NETCDF_ENABLE_HDF5=--disable-hdf5" >> $GITHUB_ENV
if: matrix.use_nc4 == 'nc3'
- run: |
echo "ENABLE_HDF5=--enable-hdf5" >> $GITHUB_ENV
echo "NETCDF_ENABLE_HDF5=--enable-hdf5" >> $GITHUB_ENV
if: matrix.use_nc4 == 'nc4'
- run: echo "ENABLE_DAP=--disable-dap" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_DAP=--disable-dap" >> $GITHUB_ENV
if: matrix.use_dap == 'dap_off'
- run: echo "ENABLE_DAP=--enable-dap" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_DAP=--enable-dap" >> $GITHUB_ENV
if: matrix.use_dap == 'dap_on'
- run: echo "ENABLE_NCZARR=--disable-nczarr" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_NCZARR=--disable-nczarr" >> $GITHUB_ENV
if: matrix.use_nczarr == 'nczarr_off'
- run: echo "ENABLE_NCZARR=--enable-nczarr" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_NCZARR=--enable-nczarr" >> $GITHUB_ENV
if: matrix.use_nczarr == 'nczarr_on'
###
@ -738,7 +738,7 @@ jobs:
done
current_directory="$(pwd)"
mkdir ../build
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} "${current_directory}/configure" ${ENABLE_HDF5} ${ENABLE_DAP} ${ENABLE_NCZARR}
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} "${current_directory}/configure" ${NETCDF_ENABLE_HDF5} ${NETCDF_ENABLE_DAP} ${NETCDF_ENABLE_NCZARR}
if: ${{ success() }}
- name: Look at config.log if error
@ -821,18 +821,18 @@ jobs:
- run: echo "CMAKE_PREFIX_PATH=${HOME}/environments/${{ matrix.hdf5 }}/" >> $GITHUB_ENV
- run: echo "LD_LIBRARY_PATH=${HOME}/environments/${{ matrix.hdf5 }}/lib" >> $GITHUB_ENV
- run: |
echo "ENABLE_HDF5=OFF" >> $GITHUB_ENV
echo "NETCDF_ENABLE_HDF5=OFF" >> $GITHUB_ENV
if: matrix.use_nc4 == 'nc3'
- run: |
echo "ENABLE_HDF5=ON" >> $GITHUB_ENV
echo "NETCDF_ENABLE_HDF5=ON" >> $GITHUB_ENV
if: matrix.use_nc4 == 'nc4'
- run: echo "ENABLE_DAP=OFF" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_DAP=OFF" >> $GITHUB_ENV
if: matrix.use_dap == 'dap_off'
- run: echo "ENABLE_DAP=ON" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_DAP=ON" >> $GITHUB_ENV
if: matrix.use_dap == 'dap_on'
- run: echo "ENABLE_NCZARR=OFF" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_NCZARR=OFF" >> $GITHUB_ENV
if: matrix.use_nczarr == 'nczarr_off'
- run: echo "ENABLE_NCZARR=ON" >> $GITHUB_ENV
- run: echo "NETCDF_ENABLE_NCZARR=ON" >> $GITHUB_ENV
if: matrix.use_nczarr == 'nczarr_on'
- run: echo "CTEST_OUTPUT_ON_FAILURE=1" >> $GITHUB_ENV
@ -860,7 +860,7 @@ jobs:
run: |
mkdir build
cd build
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DENABLE_DAP=${ENABLE_DAP} -DENABLE_HDF5=${ENABLE_HDF5} -DENABLE_NCZARR=${ENABLE_NCZARR}
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} cmake .. -DCMAKE_PREFIX_PATH=${CMAKE_PREFIX_PATH} -DNETCDF_ENABLE_DAP=${NETCDF_ENABLE_DAP} -DNETCDF_ENABLE_HDF5=${NETCDF_ENABLE_HDF5} -DNETCDF_ENABLE_NCZARR=${NETCDF_ENABLE_NCZARR}
- name: Print Summary
shell: bash -l {0}

File diff suppressed because it is too large Load Diff

View File

@ -69,11 +69,11 @@ would specify it with the '-G' flag.
Common NetCDF/CMake Options
*********************
- ENABLE_NETCDF_4 (On by Default)
- ENABLE_DAP (On by Default)
- NETCDF_ENABLE_NETCDF_4 (On by Default)
- NETCDF_ENABLE_DAP (On by Default)
- BUILD_SHARED_LIBS (Off by Default for Windows,
On by Default for Unix/Linux)
- ENABLE_DLL (Windows Only, Off by Default)
- NETCDF_ENABLE_DLL (Windows Only, Off by Default)
- CMAKE_PREFIX_PATH (Specify list of
This is just a partial list of options available. To see a full list
@ -81,7 +81,7 @@ of options, run 'cmake -L' from the command line, or use a CMake GUI.
To specify an option with CMake, you would use the following syntax:
developer@dummy-machine:/netcdf/build_dir$ cmake .. -D"ENABLE_NETCDF_4=ON"
developer@dummy-machine:/netcdf/build_dir$ cmake .. -D"NETCDF_ENABLE_NETCDF_4=ON"
-D"BUILD_SHARED_LIBS=ON" -D"USE_HDF5=OFF"
Additional References

View File

@ -49,7 +49,7 @@ V2_TEST = nctest
endif
# Does the user want to build ncgen/ncdump?
if BUILD_UTILITIES
if NETCDF_BUILD_UTILITIES
NCGEN3 = ncgen3
NCGEN = ncgen
NCDUMP = ncdump
@ -76,21 +76,21 @@ endif
endif
# Build the dap2 client
if ENABLE_DAP
if NETCDF_ENABLE_DAP
OCLIB = oc2
DAP2 = libdap2
NCDAP2TESTDIR = ncdap_test
endif
if ENABLE_DAP4
if NETCDF_ENABLE_DAP4
DAP4 = libdap4
NCDAP4TESTDIR = dap4_test
XML = libncxml
endif #DAP4
if ENABLE_S3_INTERNAL
if NETCDF_ENABLE_S3_INTERNAL
XML = libncxml # Internal S3 requires XML
endif #ENABLE_S3_INTERNAL
endif #NETCDF_ENABLE_S3_INTERNAL
# Build PnetCDF
if USE_PNETCDF
@ -104,18 +104,18 @@ LIBHDF4 = libhdf4
endif
# Build Plugin support
if ENABLE_PLUGINS
if NETCDF_ENABLE_PLUGINS
NCPOCO = libncpoco
endif
# Build Cloud Storage if desired.
if ENABLE_NCZARR
if NETCDF_ENABLE_NCZARR
ZARR_TEST_DIR = nczarr_test
ZARR = libnczarr
endif
# Optionally build test plugins
if ENABLE_PLUGINS
if NETCDF_ENABLE_PLUGINS
PLUGIN_DIR = plugins
endif
@ -210,7 +210,7 @@ install-data-hook:
# Also track the S3 cleanup id
all-local: liblib/libnetcdf.la
echo ${PACKAGE_VERSION} > VERSION
if ENABLE_S3_TESTALL
if NETCDF_ENABLE_S3_TESTALL
rm -f ${abs_top_builddir}/tmp_@PLATFORMUID@.uids
echo "@TESTUID@" >> ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids
cat ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids | sort | uniq > ${abs_top_builddir}/tmp_@PLATFORMUID@.uids
@ -218,7 +218,7 @@ if ENABLE_S3_TESTALL
mv ${abs_top_builddir}/tmp_@PLATFORMUID@.uids ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids
endif
if ENABLE_S3_TESTALL
if NETCDF_ENABLE_S3_TESTALL
distclean-local:
rm -f ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids
endif

View File

@ -7,6 +7,7 @@ This file contains a high-level description of this package's evolution. Release
## 4.9.3 - TBD
* Changed `cmake` build options to be prefaced with `NETCDF`, to bring things in to line with best practices. This will permit a number of overall quality of life improvements to netCDF, in terms of allowing it to be more easily integrated with upstream projects via `FetchContent()`, `subdirectory()`, etc. Currently, the naming convention in use thus far will still work, but will result in warning messages about deprecation, and instructions on how to update your workflow. See [Github #2895](https://github.com/Unidata/netcdf-c/pull/2895) for more information.
* Fix some problems in handling S3 urls with missing regions. See [Github #2819](https://github.com/Unidata/netcdf-c/pull/2819).
* Incorporate a more modern look and feel to user documentation generated by Doxygen. See [Doxygen Awesome CSS](https://github.com/jothepro/doxygen-awesome-css) and [Github #2864](https://github.com/Unidata/netcdf-c/pull/2864) for more information.
* Added infrastructure to allow for `CMAKE_UNITY_BUILD`, (thanks \@jschueller). See [Github #2839](https://github.com/Unidata/netcdf-c/pull/2839) for more information.

View File

@ -36,7 +36,7 @@ build: off
build_script:
- cmd: mkdir build
- cmd: cd build
- cmd: cmake .. -G "%CMAKE_GENERATOR%" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=%INSTALL_LOC% -DENABLE_BASH_SCRIPT_TESTING=OFF -DENABLE_FILTER_TESTING=OFF -DENABLE_BYTERANGE=ON
- cmd: cmake .. -G "%CMAKE_GENERATOR%" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=%INSTALL_LOC% -DNETCDF_ENABLE_BASH_SCRIPT_TESTING=OFF -DNETCDF_ENABLE_FILTER_TESTING=OFF -DNETCDF_ENABLE_BYTERANGE=ON
- cmd: if errorlevel 1 exit 1
- cmd: cmake --build . --config Release -- /maxcpucount:4

View File

@ -14,7 +14,7 @@ find_package(MakeDist)
################################
# HDF4
################################
if(ENABLE_HDF4)
if(NETCDF_ENABLE_HDF4)
set(USE_HDF4 ON )
# Check for include files, libraries.
@ -69,8 +69,8 @@ if(ENABLE_HDF4)
message(STATUS "Found JPEG libraries: ${JPEG_LIB}")
# Option to enable HDF4 file tests.
option(ENABLE_HDF4_FILE_TESTS "Run HDF4 file tests. This fetches sample HDF4 files from the Unidata ftp site to test with (requires curl)." ON)
if(ENABLE_HDF4_FILE_TESTS)
option(NETCDF_ENABLE_HDF4_FILE_TESTS "Run HDF4 file tests. This fetches sample HDF4 files from the Unidata ftp site to test with (requires curl)." ON)
if(NETCDF_ENABLE_HDF4_FILE_TESTS)
find_program(PROG_CURL NAMES curl)
if(PROG_CURL)
set(USE_HDF4_FILE_TESTS ON )
@ -114,10 +114,10 @@ if(USE_HDF5)
# as a shared library, we will use hdf5 as a shared
# library. If we are building netcdf statically,
# we will use a static library. This can be toggled
# by explicitly modifying NC_FIND_SHARED_LIBS.
# by explicitly modifying NETCDF_FIND_SHARED_LIBS.
##
#if (MSVC)
# if(NC_FIND_SHARED_LIBS)
# if(NETCDF_FIND_SHARED_LIBS)
# set(HDF5_USE_STATIC_LIBRARIES OFF)
# else()
# set(HDF5_USE_STATIC_LIBRARIES ON)
@ -201,7 +201,7 @@ if(USE_HDF5)
# Record if ROS3 Driver is available
if(HAS_HDF5_ROS3)
set(ENABLE_HDF5_ROS3 ON )
set(NETCDF_ENABLE_HDF5_ROS3 ON )
endif()
IF (HDF5_SUPPORTS_PAR_FILTERS)
@ -240,8 +240,8 @@ set(FOUND_CURL ${FOUND_CURL} TRUE )
# Start disabling if curl not found
if(NOT FOUND_CURL)
message(WARNING "ENABLE_REMOTE_FUNCTIONALITY requires libcurl; disabling")
set(ENABLE_REMOTE_FUNCTIONALITY OFF CACHE BOOL "ENABLE_REMOTE_FUNCTIONALITY requires libcurl; disabling" FORCE )
message(WARNING "NETCDF_ENABLE_REMOTE_FUNCTIONALITY requires libcurl; disabling")
set(NETCDF_ENABLE_REMOTE_FUNCTIONALITY OFF CACHE BOOL "NETCDF_ENABLE_REMOTE_FUNCTIONALITY requires libcurl; disabling" FORCE )
endif()
set (CMAKE_REQUIRED_INCLUDES ${CURL_INCLUDE_DIRS})
@ -346,18 +346,18 @@ endif()
################################
# Zips
################################
IF (ENABLE_FILTER_SZIP)
IF (NETCDF_ENABLE_FILTER_SZIP)
find_package(Szip)
elseif(ENABLE_NCZARR)
elseif(NETCDF_ENABLE_NCZARR)
find_package(Szip)
endif()
IF (ENABLE_FILTER_BZ2)
IF (NETCDF_ENABLE_FILTER_BZ2)
find_package(Bz2)
endif()
IF (ENABLE_FILTER_BLOSC)
IF (NETCDF_ENABLE_FILTER_BLOSC)
find_package(Blosc)
endif()
IF (ENABLE_FILTER_ZSTD)
IF (NETCDF_ENABLE_FILTER_ZSTD)
find_package(Zstd)
endif()
@ -381,7 +381,7 @@ else()
set(STD_FILTERS "${STD_FILTERS} bz2")
endif()
IF (ENABLE_NCZARR_ZIP)
IF (NETCDF_ENABLE_NCZARR_ZIP)
find_package(Zip REQUIRED)
target_include_directories(netcdf
PRIVATE
@ -395,20 +395,20 @@ endif ()
# Note we check for the library after checking for enable_s3
# because for some reason this screws up if we unconditionally test for sdk
# and it is not available. Fix someday
if(ENABLE_S3)
if(NOT ENABLE_S3_INTERNAL)
if(NETCDF_ENABLE_S3)
if(NOT NETCDF_ENABLE_S3_INTERNAL)
# See if aws-s3-sdk is available
find_package(AWSSDK REQUIRED COMPONENTS s3;transfer)
if(AWSSDK_FOUND)
set(ENABLE_S3_AWS ON CACHE BOOL "S3 AWS" FORCE)
set(NETCDF_ENABLE_S3_AWS ON CACHE BOOL "S3 AWS" FORCE)
target_include_directories(netcdf
PRIVATE
${AWSSDK_INCLUDE_DIR}
)
else(AWSSDK_FOUND)
set(ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE)
set(NETCDF_ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE)
endif(AWSSDK_FOUND)
else(NOT ENABLE_S3_INTERNAL)
else(NOT NETCDF_ENABLE_S3_INTERNAL)
# Find crypto libraries required with testing with the internal s3 api.
#find_library(SSL_LIB NAMES ssl openssl)
find_package(OpenSSL REQUIRED)
@ -421,16 +421,16 @@ if(ENABLE_S3)
# message(FATAL_ERROR "Can't find a crypto library, required by S3_INTERNAL")
#endif(NOT CRYPTO_LIB)
endif(NOT ENABLE_S3_INTERNAL)
endif(NOT NETCDF_ENABLE_S3_INTERNAL)
else()
set(ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE)
set(NETCDF_ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE)
endif()
################################
# LibXML
################################
# see if we have libxml2
if(ENABLE_LIBXML2)
if(NETCDF_ENABLE_LIBXML2)
find_package(LibXml2)
if(LibXml2_FOUND)
set(HAVE_LIBXML2 TRUE)
@ -442,24 +442,22 @@ if(ENABLE_LIBXML2)
else()
set(HAVE_LIBXML2 FALSE)
endif()
endif(ENABLE_LIBXML2)
endif(NETCDF_ENABLE_LIBXML2)
################################
# MPI
################################
if(ENABLE_PARALLEL4 OR HDF5_PARALLEL)
################################
if(NETCDF_ENABLE_PARALLEL4 OR HDF5_PARALLEL)
find_package(MPI REQUIRED)
endif()
################################
# parallel IO
# Parallel IO
################################
if(ENABLE_PNETCDF)
find_library(PNETCDF NAMES pnetcdf)
find_path(PNETCDF_INCLUDE_DIR pnetcdf.h)
if(NOT PNETCDF)
message(STATUS "Cannot find PnetCDF library. Disabling PnetCDF support.")
set(USE_PNETCDF OFF CACHE BOOL "")
if(NETCDF_ENABLE_PNETCDF)
find_package(PNETCDF 1.6.0 REQUIRED)
if(NOT PNETCDF_HAS_RELAXED_COORD_BOUND)
message(FATAL_ERROR "Pnetcdf must be built with relax-coord-bound enabled")
endif()
endif()
@ -467,7 +465,7 @@ endif()
# Doxygen
################################
if(ENABLE_DOXYGEN)
if(NETCDF_ENABLE_DOXYGEN)
find_package(Doxygen REQUIRED)
endif()

39
cmake/deprecated.cmake Normal file
View File

@ -0,0 +1,39 @@
#######
# Check for deprecated arguments, provide warning, and set the
# corresponding option.
#
# This file is being added in support of https://github.com/Unidata/netcdf-c/pull/2895 and may eventually
# no longer be necessary and removed in the future.
#
#######
function(check_depr_opt arg)
if(DEFINED ${arg})
set(val ${${arg}})
message(WARNING "${arg} is deprecated and will be removed. Please use NETCDF_${arg} in the future")
set(NETCDF_${arg} ${val} PARENT_SCOPE)
set(DEPR_OPT "${DEPR_OPT}\n\to ${arg} --> NETCDF_${arg}" PARENT_SCOPE)
endif()
endfunction()
message(STATUS "Checking for Deprecated Options")
list(APPEND opts BUILD_UTILITIES ENABLE_BENCHMARKS ENABLE_BYTERANGE ENABLE_CDF5 ENABLE_CONVERSION_WARNINGS)
list(APPEND opts ENABLE_DAP ENABLE_DAP2 ENABLE_DAP4 ENABLE_DISKLESS ENABLE_DOXYGEN ENABLE_ERANGE_FILL)
list(APPEND opts ENABLE_EXAMPLES ENABLE_EXAMPLES_TESTS ENABLE_EXTREME_NUMBERS ENABLE_FFIO ENABLE_FILTER_BLOSC)
list(APPEND opts ENABLEFILTER_BZ2 ENABLE_FILTER_SZIP ENABLE_FILTER_TESTING ENABLE_FILTER_ZSTD ENABLE_FSYNC)
list(APPEND opts ENABLE_HDF4 ENABLE_HDF5 ENABLE_LARGE_FILE_SUPPORT ENABLE_LARGE_FILE_TESTS ENABLE_LIBXML2)
list(APPEND opts ENABLE_LOGGING ENABLE_METADATA_PERF_TESTS ENABLE_MMAP ENABLE_NCZARR ENABLE_NCZARR_FILTERS)
list(APPEND opts ENABLE_NCZARR_S3 ENABLE_NCZARR_ZIP ENABLE_NETCDF_4 ENABLE_PARALLEL4 ENABLE_PARALLEL_TESTS)
list(APPEND opts ENABLE_PLUGINS ENABLE_PNETCDF ENABLE_QUANTIZE ENABLE_REMOTE_FUNCTIONALITY ENABLE_S3 ENABLE_S3_AWS)
list(APPEND opts ENABLE_S3_INTERNAL ENABLE_STDIO ENABLE_STRICT_NULL_BYTE_HEADER_PADDING ENABLE_TESTS ENABLE_UNIT_TESTS)
list(APPEND opts FIND_SHARED_LIBS LIB_NAME)
foreach(opt ${opts})
#MESSAGE(STATUS "Option: ${opt}")
check_depr_opt(${opt})
endforeach()

View File

@ -0,0 +1,80 @@
# FindPNETCDF
# -----------
#
# Find parallel IO library for classic netCDF files
#
# This module will define the following variables:
#
# ::
#
# PNETCDF_FOUND
# PNETCDF_INCLUDE_DIRS
# PNETCDF_LIBRARIES
# PNETCDF_VERSION
# PNETCDF_HAS_ERANGE_FILL - True if PnetCDF was built with ``NC_ERANGE`` support`
# PNETCDF_HAS_RELAXED_COORD_BOUND - True if PnetCDF was built with relaxed coordinate bounds
#
# The ``PNETCDF::pnetcdf`` target will also be exported
include(FindPackageHandleStandardArgs)
find_path(PNETCDF_INCLUDE_DIR NAMES pnetcdf.h)
find_library(PNETCDF_LIBRARY NAMES pnetcdf)
if (PNETCDF_INCLUDE_DIR)
set(pnetcdf_h "${PNETCDF_INCLUDE_DIR}/pnetcdf.h" )
message(DEBUG "PnetCDF include file ${pnetcdf_h} will be searched for version")
file(STRINGS "${pnetcdf_h}" pnetcdf_major_string REGEX "^#define PNETCDF_VERSION_MAJOR")
string(REGEX REPLACE "[^0-9]" "" pnetcdf_major "${pnetcdf_major_string}")
file(STRINGS "${pnetcdf_h}" pnetcdf_minor_string REGEX "^#define PNETCDF_VERSION_MINOR")
string(REGEX REPLACE "[^0-9]" "" pnetcdf_minor "${pnetcdf_minor_string}")
file(STRINGS "${pnetcdf_h}" pnetcdf_sub_string REGEX "^#define PNETCDF_VERSION_SUB")
string(REGEX REPLACE "[^0-9]" "" pnetcdf_sub "${pnetcdf_sub_string}")
set(pnetcdf_version "${pnetcdf_major}.${pnetcdf_minor}.${pnetcdf_sub}")
message(DEBUG "Found PnetCDF version ${pnetcdf_version}")
endif()
message(DEBUG "[ ${CMAKE_CURRENT_LIST_FILE}:${CMAKE_CURRENT_LIST_LINE} ]"
" PNETCDF_ROOT = ${PNETCDF_ROOT}"
" PNETCDF_INCLUDE_DIR = ${PNETCDF_INCLUDE_DIR}"
" PNETCDF_LIBRARY = ${PNETCDF_LIBRARY}"
" PNETCDF_VERSION = ${PNETCDF_VERSION}"
)
file(STRINGS "${pnetcdf_h}" enable_erange_fill_pnetcdf REGEX "^#define PNETCDF_ERANGE_FILL")
string(REGEX REPLACE "[^0-9]" "" erange_fill_pnetcdf "${enable_erange_fill_pnetcdf}")
if("${erange_fill_pnetcdf}" STREQUAL "1")
set(PNETCDF_HAS_ERANGE_FILL ON CACHE BOOL "")
else()
set(PNETCDF_HAS_ERANGE_FILL OFF CACHE BOOL "")
endif()
mark_as_advanced(PNETCDF_INCLUDE_DIR PNETCDF_LIBRARY PNETCDF_ERANGE_FILL)
file(STRINGS "${pnetcdf_h}" relax_coord_bound_pnetcdf REGEX "^#define PNETCDF_RELAX_COORD_BOUND")
string(REGEX REPLACE "[^0-9]" "" relax_coord_bound "${relax_coord_bound_pnetcdf}")
if ("${relax_coord_bound}" STREQUAL "1")
set(PNETCDF_HAS_RELAXED_COORD_BOUND ON CACHE BOOL "")
else()
set(PNETCDF_HAS_RELAXED_COORD_BOUND OFF CACHE BOOL "")
endif()
find_package_handle_standard_args(PNETCDF
REQUIRED_VARS PNETCDF_LIBRARY PNETCDF_INCLUDE_DIR
VERSION_VAR PNETCDF_VERSION
)
if (PNETCDF_FOUND AND NOT TARGET PNETCDF::PNETCDF)
find_package(MPI REQUIRED)
add_library(PNETCDF::PNETCDF UNKNOWN IMPORTED)
set_target_properties(PNETCDF::PNETCDF PROPERTIES
IMPORTED_LINK_INTERFACE_LIBRARIES MPI::MPI_C
IMPORTED_LOCATION "${PNETCDF_LIBRARY}"
INTERFACE_INCLUDE_DIRECTORIES "${PNETCDF_INCLUDE_DIR}"
)
find_library(MATH_LIBRARY m)
if (MATH_LIBRARY)
set_target_properties(PNETCDF::PNETCDF PROPERTIES
IMPORTED_LINK_INTERFACE_LIBRARIES ${MATH_LIBRARY})
endif()
endif()

View File

@ -219,11 +219,11 @@ macro(print_conf_summary)
message("Configuration Summary:")
message("")
message(STATUS "Building Shared Libraries: ${BUILD_SHARED_LIBS}")
message(STATUS "Building netCDF-4: ${ENABLE_NETCDF_4}")
message(STATUS "Building DAP2 Support: ${ENABLE_DAP2}")
message(STATUS "Building DAP4 Support: ${ENABLE_DAP4}")
message(STATUS "Building Byte-range Support: ${ENABLE_BYTERANGE}")
message(STATUS "Building Utilities: ${BUILD_UTILITIES}")
message(STATUS "Building netCDF-4: ${NETCDF_ENABLE_NETCDF_4}")
message(STATUS "Building DAP2 Support: ${NETCDF_ENABLE_DAP2}")
message(STATUS "Building DAP4 Support: ${NETCDF_ENABLE_DAP4}")
message(STATUS "Building Byte-range Support: ${NETCDF_ENABLE_BYTERANGE}")
message(STATUS "Building Utilities: ${NETCDF_BUILD_UTILITIES}")
if(CMAKE_PREFIX_PATH)
message(STATUS "CMake Prefix Path: ${CMAKE_PREFIX_PATH}")
endif()
@ -236,15 +236,15 @@ macro(print_conf_summary)
message("")
endif()
message("Tests Enabled: ${ENABLE_TESTS}")
if(ENABLE_TESTS)
message(STATUS "DAP Remote Tests: ${ENABLE_DAP_REMOTE_TESTS}")
message(STATUS "Extra Tests: ${ENABLE_EXTRA_TESTS}")
message(STATUS "Coverage Tests: ${ENABLE_COVERAGE_TESTS}")
message(STATUS "Parallel Tests: ${ENABLE_PARALLEL_TESTS}")
message(STATUS "Large File Tests: ${ENABLE_LARGE_FILE_TESTS}")
message(STATUS "Extreme Numbers: ${ENABLE_EXTREME_NUMBERS}")
message(STATUS "Unit Tests: ${ENABLE_UNIT_TESTS}")
message("Tests Enabled: ${NETCDF_ENABLE_TESTS}")
if(NETCDF_ENABLE_TESTS)
message(STATUS "DAP Remote Tests: ${NETCDF_ENABLE_DAP_REMOTE_TESTS}")
message(STATUS "Extra Tests: ${NETCDF_ENABLE_EXTRA_TESTS}")
message(STATUS "Coverage Tests: ${NETCDF_ENABLE_COVERAGE_TESTS}")
message(STATUS "Parallel Tests: ${NETCDF_ENABLE_PARALLEL_TESTS}")
message(STATUS "Large File Tests: ${NETCDF_ENABLE_LARGE_FILE_TESTS}")
message(STATUS "Extreme Numbers: ${NETCDF_ENABLE_EXTREME_NUMBERS}")
message(STATUS "Unit Tests: ${NETCDF_ENABLE_UNIT_TESTS}")
endif()
message("")

View File

@ -7,7 +7,7 @@ cmake -G "Visual Studio 15 Win64" ^
-D CMAKE_INSTALL_PREFIX=%LIBRARY_PREFIX% ^
-D BUILD_SHARED_LIBS=ON ^
-D ENABLE_TESTS=ON ^
-D ENABLE_HDF4=ON ^
-D NETCDF_ENABLE_HDF4=ON ^
-D CMAKE_PREFIX_PATH=%LIBRARY_PREFIX% ^
-D ZLIB_LIBRARY=%LIBRARY_LIB%\zlib.lib ^
-D ZLIB_INCLUDE_DIR=%LIBRARY_INC% ^

View File

@ -113,22 +113,22 @@ are set when opening a binary file on Windows. */
#cmakedefine DLL_NETCDF 1
/* if true, use atexist */
#cmakedefine ENABLE_ATEXIT_FINALIZE 1
#cmakedefine NETCDF_ENABLE_ATEXIT_FINALIZE 1
/* if true, build byte-range Client */
#cmakedefine ENABLE_BYTERANGE 1
#cmakedefine NETCDF_ENABLE_BYTERANGE 1
/* if true, enable ERANGE fill */
#cmakedefine ENABLE_ERANGE_FILL 1
#ifdef ENABLE_ERANGE_FILL
#cmakedefine NETCDF_ENABLE_ERANGE_FILL 1
#ifdef NETCDF_ENABLE_ERANGE_FILL
#define ERANGE_FILL 1
#endif
/* if true, use hdf5 S3 virtual file reader */
#cmakedefine ENABLE_HDF5_ROS3 1
#cmakedefine NETCDF_ENABLE_HDF5_ROS3 1
/* if true, enable CDF5 Support */
#cmakedefine ENABLE_CDF5 1
#cmakedefine NETCDF_ENABLE_CDF5 1
/* if true, enable client side filters */
#cmakedefine ENABLE_CLIENT_FILTERS 1
@ -137,34 +137,34 @@ are set when opening a binary file on Windows. */
#cmakedefine USE_STRICT_NULL_BYTE_HEADER_PADDING 1
/* if true, build DAP2 and DAP4 Client */
#cmakedefine ENABLE_DAP 1
#cmakedefine NETCDF_ENABLE_DAP 1
/* if true, build DAP4 Client */
#cmakedefine ENABLE_DAP4 1
#cmakedefine NETCDF_ENABLE_DAP4 1
/* if true, do remote tests */
#cmakedefine ENABLE_DAP_REMOTE_TESTS 1
#cmakedefine NETCDF_ENABLE_DAP_REMOTE_TESTS 1
/* if true, enable NCZARR */
#cmakedefine ENABLE_NCZARR 1
#cmakedefine NETCDF_ENABLE_NCZARR 1
/* if true, enable nczarr filter support */
#cmakedefine ENABLE_NCZARR_FILTERS 1
#cmakedefine NETCDF_ENABLE_NCZARR_FILTERS 1
/* if true, enable nczarr zip support */
#cmakedefine ENABLE_NCZARR_ZIP 1
#cmakedefine NETCDF_ENABLE_NCZARR_ZIP 1
/* if true, Allow dynamically loaded plugins */
#cmakedefine ENABLE_PLUGINS 1
#cmakedefine NETCDF_ENABLE_PLUGINS 1
/* if true, enable S3 support */
#cmakedefine ENABLE_S3 1
#cmakedefine NETCDF_ENABLE_S3 1
/* if true, AWS S3 SDK is available */
#cmakedefine ENABLE_S3_AWS 1
#cmakedefine NETCDF_ENABLE_S3_AWS 1
/* if true, Force use of S3 internal library */
#cmakedefine ENABLE_S3_INTERNAL 1
#cmakedefine NETCDF_ENABLE_S3_INTERNAL 1
/* if true, enable S3 testing*/
#cmakedefine WITH_S3_TESTING "PUBLIC"
@ -301,11 +301,8 @@ are set when opening a binary file on Windows. */
/* Define to 1 if you have the `mfhdf' library (-lmfhdf). */
#cmakedefine HAVE_LIBMFHDF 1
/* Define to 1 if you have the `pnetcdf' library (-lpnetcdf). */
#cmakedefine HAVE_LIBPNETCDF 1
/* Define to 1 if you have the libxml2 library. */
#cmakedefine ENABLE_LIBXML2 1
#cmakedefine NETCDF_ENABLE_LIBXML2 1
/* Define to 1 if you have the <locale.h> header file. */
#cmakedefine HAVE_LOCALE_H 1
@ -487,8 +484,8 @@ with zip */
#cmakedefine NO_IEEE_FLOAT 1
#cmakedefine BUILD_V2 1
#cmakedefine ENABLE_DOXYGEN 1
#cmakedefine ENABLE_INTERNAL_DOCS 1
#cmakedefine NETCDF_ENABLE_DOXYGEN 1
#cmakedefine NETCDF_ENABLE_INTERNAL_DOCS 1
#cmakedefine VALGRIND_TESTS 1
#cmakedefine ENABLE_CDMREMOTE 1
#cmakedefine USE_HDF5 1
@ -496,7 +493,7 @@ with zip */
#cmakedefine TEST_PARALLEL ${TEST_PARALLEL}
#cmakedefine BUILD_RPC 1
#cmakedefine USE_X_GETOPT 1
#cmakedefine ENABLE_EXTREME_NUMBERS 1
#cmakedefine NETCDF_ENABLE_EXTREME_NUMBERS 1
/* do not build the netCDF version 2 API */
#cmakedefine NO_NETCDF_2 1

View File

@ -82,14 +82,14 @@ are set when opening a binary file on Windows. */
#cmakedefine NO_NETCDF_2 1
#cmakedefine USE_FSYNC 1
#cmakedefine JNA 1
#cmakedefine ENABLE_DOXYGEN 1
#cmakedefine ENABLE_INTERNAL_DOCS 1
#cmakedefine NETCDF_ENABLE_DOXYGEN 1
#cmakedefine NETCDF_ENABLE_INTERNAL_DOCS 1
#cmakedefine VALGRIND_TESTS 1
#cmakedefine ENABLE_CDMREMOTE 1
#cmakedefine USE_DAP 1
#cmakedefine ENABLE_DAP 1
#cmakedefine ENABLE_DAP_GROUPS 1
#cmakedefine ENABLE_DAP_REMOTE_TESTS 1
#cmakedefine NETCDF_ENABLE_DAP 1
#cmakedefine NETCDF_ENABLE_DAP_GROUPS 1
#cmakedefine NETCDF_ENABLE_DAP_REMOTE_TESTS 1
#cmakedefine EXTRA_TESTS
#cmakedefine USE_NETCDF4 1
#cmakedefine USE_HDF4 1
@ -110,7 +110,7 @@ are set when opening a binary file on Windows. */
#cmakedefine USE_DISKLESS 1
#cmakedefine USE_SZIP 1
#cmakedefine USE_X_GETOPT 1
#cmakedefine ENABLE_EXTREME_NUMBERS
#cmakedefine NETCDF_ENABLE_EXTREME_NUMBERS
#cmakedefine LARGE_FILE_TESTS 1
#cmakedefine HAVE_DECL_ISFINITE 1
#cmakedefine HAVE_DECL_ISNAN 1

View File

@ -280,8 +280,8 @@ AC_SUBST([DOXYGEN_SERVER_BASED_SEARCH], ["NO"])
AC_ARG_ENABLE([doxygen-pdf-output],
[AS_HELP_STRING([--enable-doxygen-pdf-output],
[Build netCDF library documentation in PDF format. Experimental.])])
AM_CONDITIONAL([NC_ENABLE_DOXYGEN_PDF_OUTPUT], [test "x$enable_doxygen_pdf_output" = xyes])
AC_SUBST([NC_ENABLE_DOXYGEN_PDF_OUTPUT], [$enable_doxygen_pdf_output])
AM_CONDITIONAL([NC_NETCDF_ENABLE_DOXYGEN_PDF_OUTPUT], [test "x$enable_doxygen_pdf_output" = xyes])
AC_SUBST([NC_NETCDF_ENABLE_DOXYGEN_PDF_OUTPUT], [$enable_doxygen_pdf_output])
AC_ARG_ENABLE([dot],
[AS_HELP_STRING([--enable-dot],
@ -596,7 +596,7 @@ XMLPARSER="tinyxml2 (bundled)"
fi
# Need a condition and subst for this
AM_CONDITIONAL(ENABLE_LIBXML2, [test "x$enable_libxml2" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_LIBXML2, [test "x$enable_libxml2" = xyes])
AC_SUBST([XMLPARSER],[${XMLPARSER}])
###
@ -616,9 +616,9 @@ test "x$enable_quantize" = xno || enable_quantize=yes
AC_MSG_RESULT($enable_quantize)
if test "x${enable_quantize}" = xyes; then
AC_DEFINE([ENABLE_QUANTIZE], [1], [if true, enable quantize support])
AC_DEFINE([NETCDF_ENABLE_QUANTIZE], [1], [if true, enable quantize support])
fi
AM_CONDITIONAL(ENABLE_QUANTIZE, [test x$enable_quantize = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_QUANTIZE, [test x$enable_quantize = xyes])
# --enable-dap => enable-dap4
enable_dap4=$enable_dap
@ -682,14 +682,14 @@ AC_DEFINE_UNQUOTED([REMOTETESTSERVERS], ["$REMOTETESTSERVERS"], [the testservers
# Set the config.h flags
if test "x$enable_dap" = xyes; then
AC_DEFINE([USE_DAP], [1], [if true, build DAP Client])
AC_DEFINE([ENABLE_DAP], [1], [if true, build DAP Client])
AC_DEFINE([NETCDF_ENABLE_DAP], [1], [if true, build DAP Client])
fi
if test "x$enable_dap_remote_tests" = xyes; then
AC_DEFINE([ENABLE_DAP_REMOTE_TESTS], [1], [if true, do remote tests])
AC_DEFINE([NETCDF_ENABLE_DAP_REMOTE_TESTS], [1], [if true, do remote tests])
fi
if test "x$enable_external_server_tests" = xyes; then
AC_DEFINE([ENABLE_EXTERNAL_SERVER_TESTS], [1], [if true, do remote external tests])
AC_DEFINE([NETCDF_ENABLE_EXTERNAL_SERVER_TESTS], [1], [if true, do remote external tests])
fi
AC_MSG_CHECKING([whether the time-consuming dap tests should be enabled (default off)])
@ -712,10 +712,10 @@ if test "x$enable_nczarr" = xyes ; then
fi
if test "x$enable_nczarr" = xyes; then
AC_DEFINE([ENABLE_NCZARR], [1], [if true, build NCZarr Client])
AC_SUBST(ENABLE_NCZARR)
AC_DEFINE([NETCDF_ENABLE_NCZARR], [1], [if true, build NCZarr Client])
AC_SUBST(NETCDF_ENABLE_NCZARR)
fi
AM_CONDITIONAL(ENABLE_NCZARR, [test x$enable_nczarr = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_NCZARR, [test x$enable_nczarr = xyes])
##########
# Look for Standardized libraries
@ -877,7 +877,7 @@ else
AC_MSG_RESULT([${enable_nczarr_zip}])
if test "x$enable_nczarr_zip" = xyes ; then
AC_DEFINE([ENABLE_NCZARR_ZIP], [1], [If true, then libzip found])
AC_DEFINE([NETCDF_ENABLE_NCZARR_ZIP], [1], [If true, then libzip found])
fi
# Check for enabling of S3 support
@ -969,23 +969,23 @@ else
fi
if test "x$enable_s3" = xyes ; then
AC_DEFINE([ENABLE_S3], [1], [if true, build netcdf-c with S3 support enabled])
AC_DEFINE([NETCDF_ENABLE_S3], [1], [if true, build netcdf-c with S3 support enabled])
fi
if test "x$enable_s3_aws" = xyes ; then
LIBS="$LIBS$S3LIBS"
AC_DEFINE([ENABLE_S3_AWS], [1], [If true, then use aws S3 library])
AC_DEFINE([NETCDF_ENABLE_S3_AWS], [1], [If true, then use aws S3 library])
fi
if test "x$enable_s3_internal" = xyes ; then
AC_DEFINE([ENABLE_S3_INTERNAL], [1], [If true, then use internal S3 library])
AC_DEFINE([NETCDF_ENABLE_S3_INTERNAL], [1], [If true, then use internal S3 library])
fi
AC_DEFINE_UNQUOTED([WITH_S3_TESTING], [$with_s3_testing], [control S3 testing.])
if test "x$with_s3_testing" = xyes ; then
AC_MSG_WARN([*** DO NOT SPECIFY WITH_S3_TESTING=YES UNLESS YOU HAVE ACCESS TO THE UNIDATA S3 BUCKET! ***])
AC_DEFINE([ENABLE_S3_TESTALL], [yes], [control S3 testing.])
AC_DEFINE([NETCDF_ENABLE_S3_TESTALL], [yes], [control S3 testing.])
fi
fi
@ -1066,7 +1066,7 @@ AC_ARG_ENABLE([utilities],
test "x$nc_build_c" = xno && enable_utilities=no
test "x$enable_utilities" = xno && nc_build_utilities=no
AC_MSG_RESULT($nc_build_utilities)
AM_CONDITIONAL(BUILD_UTILITIES, [test x$nc_build_utilities = xyes])
AM_CONDITIONAL(NETCDF_BUILD_UTILITIES, [test x$nc_build_utilities = xyes])
# Does the user want to disable all tests?
AC_MSG_CHECKING([whether test should be built and run])
@ -1330,7 +1330,7 @@ if test "x$enable_hdf5" = "xno" ; then
fi
if test "x$enable_dap4" = xyes; then
AC_DEFINE([ENABLE_DAP4], [1], [if true, build DAP4 Client])
AC_DEFINE([NETCDF_ENABLE_DAP4], [1], [if true, build DAP4 Client])
fi
# check for useful, but not essential, memio support
@ -1399,7 +1399,7 @@ if test "x$found_curl" = xno && test "x$enable_byterange" = xyes ; then
fi
if test "x$enable_byterange" = xyes; then
AC_DEFINE([ENABLE_BYTERANGE], [1], [if true, support byte-range read of remote datasets.])
AC_DEFINE([NETCDF_ENABLE_BYTERANGE], [1], [if true, support byte-range read of remote datasets.])
fi
# Does the user want to disable atexit?
@ -1422,7 +1422,7 @@ if test "x$enable_atexit_finalize" = xyes ; then
fi
if test "x$enable_atexit_finalize" = xyes ; then
AC_DEFINE([ENABLE_ATEXIT_FINALIZE], [1], [If true, enable nc_finalize via atexit()])
AC_DEFINE([NETCDF_ENABLE_ATEXIT_FINALIZE], [1], [If true, enable nc_finalize via atexit()])
fi
# Need libdl(d) for plugins
@ -1448,9 +1448,9 @@ if test "x$enable_plugins" = xyes && test "x$enable_shared" = xno; then
fi
if test "x$enable_plugins" = xyes; then
AC_DEFINE([ENABLE_PLUGINS], [1], [if true, support dynamically loaded plugins])
AC_DEFINE([NETCDF_ENABLE_PLUGINS], [1], [if true, support dynamically loaded plugins])
fi
AM_CONDITIONAL(ENABLE_PLUGINS, [test "x$enable_plugins" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_PLUGINS, [test "x$enable_plugins" = xyes])
AC_SUBST(USEPLUGINS, [${enable_plugins}])
AC_FUNC_ALLOCA
@ -1530,9 +1530,9 @@ else
fi
if test "x${enable_cdf5}" = xyes; then
AC_DEFINE([ENABLE_CDF5], [1], [if true, enable CDF5 Support])
AC_DEFINE([NETCDF_ENABLE_CDF5], [1], [if true, enable CDF5 Support])
fi
AM_CONDITIONAL(ENABLE_CDF5, [test x$enable_cdf5 = xyes ])
AM_CONDITIONAL(NETCDF_ENABLE_CDF5, [test x$enable_cdf5 = xyes ])
$SLEEPCMD
if test "$ac_cv_type_uchar" = yes ; then
@ -1639,7 +1639,7 @@ if test "x$enable_hdf5" = xyes; then
# See if hdf5 library supports Read-Only S3 (byte-range) driver
AC_SEARCH_LIBS([H5Pset_fapl_ros3],[hdf5_hldll hdf5_hl], [has_hdf5_ros3=yes], [has_hdf5_ros3=no])
if test "x$has_hdf5_ros3" = xyes && test "x$enable_byterange" = xyes; then
AC_DEFINE([ENABLE_HDF5_ROS3], [1], [if true, support byte-range using hdf5 virtual file driver.])
AC_DEFINE([NETCDF_ENABLE_HDF5_ROS3], [1], [if true, support byte-range using hdf5 virtual file driver.])
fi
# Check to see if HDF5 library is 1.10.3 or greater. If so, allows
@ -1887,15 +1887,15 @@ enable_nczarr_filter_testing=no
fi
if test "x$enable_nczarr_filters" = xyes; then
AC_DEFINE([ENABLE_NCZARR_FILTERS], [1], [if true, enable NCZarr filters])
AC_DEFINE([NETCDF_ENABLE_NCZARR_FILTERS], [1], [if true, enable NCZarr filters])
fi
# Client side filter registration is permanently disabled
enable_clientside_filters=no
AM_CONDITIONAL(ENABLE_CLIENTSIDE_FILTERS, [test x$enable_clientside_filters = xyes])
AM_CONDITIONAL(ENABLE_FILTER_TESTING, [test x$enable_filter_testing = xyes])
AM_CONDITIONAL(ENABLE_NCZARR_FILTERS, [test x$enable_nczarr_filters = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_FILTER_TESTING, [test x$enable_filter_testing = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_NCZARR_FILTERS, [test x$enable_nczarr_filters = xyes])
# Automake conditionals need to be called, whether the answer is yes
# or no.
@ -1904,14 +1904,14 @@ AM_CONDITIONAL(TEST_PARALLEL4, [test "x$enable_parallel4" = xyes -a "x$enable_pa
AM_CONDITIONAL(BUILD_DAP, [test "x$enable_dap" = xyes])
AM_CONDITIONAL(USE_DAP, [test "x$enable_dap" = xyes]) # Alias
# Provide protocol specific flags
AM_CONDITIONAL(ENABLE_DAP, [test "x$enable_dap" = xyes])
AM_CONDITIONAL(ENABLE_DAP4, [test "x$enable_dap4" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_DAP, [test "x$enable_dap" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_DAP4, [test "x$enable_dap4" = xyes])
AM_CONDITIONAL(USE_STRICT_NULL_BYTE_HEADER_PADDING, [test x$enable_strict_null_byte_header_padding = xyes])
AM_CONDITIONAL(ENABLE_CDF5, [test "x$enable_cdf5" = xyes])
AM_CONDITIONAL(ENABLE_DAP_REMOTE_TESTS, [test "x$enable_dap_remote_tests" = xyes])
AM_CONDITIONAL(ENABLE_EXTERNAL_SERVER_TESTS, [test "x$enable_external_server_tests" = xyes])
AM_CONDITIONAL(ENABLE_DAP_AUTH_TESTS, [test "x$enable_dap_auth_tests" = xyes])
AM_CONDITIONAL(ENABLE_DAP_LONG_TESTS, [test "x$enable_dap_long_tests" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_CDF5, [test "x$enable_cdf5" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_DAP_REMOTE_TESTS, [test "x$enable_dap_remote_tests" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_EXTERNAL_SERVER_TESTS, [test "x$enable_external_server_tests" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_DAP_AUTH_TESTS, [test "x$enable_dap_auth_tests" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_DAP_LONG_TESTS, [test "x$enable_dap_long_tests" = xyes])
AM_CONDITIONAL(USE_PNETCDF_DIR, [test ! "x$PNETCDFDIR" = x])
AM_CONDITIONAL(USE_LOGGING, [test "x$enable_logging" = xyes])
AM_CONDITIONAL(CROSS_COMPILING, [test "x$cross_compiling" = xyes])
@ -1926,17 +1926,17 @@ AM_CONDITIONAL(BUILD_MMAP, [test x$enable_mmap = xyes])
AM_CONDITIONAL(BUILD_DOCS, [test x$enable_doxygen = xyes])
AM_CONDITIONAL(SHOW_DOXYGEN_TAG_LIST, [test x$enable_doxygen_tasks = xyes])
AM_CONDITIONAL(ENABLE_METADATA_PERF, [test x$enable_metadata_perf = xyes])
AM_CONDITIONAL(ENABLE_BYTERANGE, [test "x$enable_byterange" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_BYTERANGE, [test "x$enable_byterange" = xyes])
AM_CONDITIONAL(RELAX_COORD_BOUND, [test "xyes" = xyes])
AM_CONDITIONAL(HAS_PAR_FILTERS, [test x$hdf5_supports_par_filters = xyes ])
# We need to simplify the set of S3 and Zarr flag combinations
AM_CONDITIONAL(ENABLE_S3, [test "x$enable_s3" = xyes])
AM_CONDITIONAL(ENABLE_S3_AWS, [test "x$enable_s3_aws" = xyes])
AM_CONDITIONAL(ENABLE_S3_INTERNAL, [test "x$enable_s3_internal" = xyes])
AM_CONDITIONAL(ENABLE_NCZARR, [test "x$enable_nczarr" = xyes])
AM_CONDITIONAL(ENABLE_S3_TESTPUB, [test "x$with_s3_testing" != xno]) # all => public
AM_CONDITIONAL(ENABLE_S3_TESTALL, [test "x$with_s3_testing" = xyes])
AM_CONDITIONAL(ENABLE_NCZARR_ZIP, [test "x$enable_nczarr_zip" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_S3, [test "x$enable_s3" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_S3_AWS, [test "x$enable_s3_aws" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_S3_INTERNAL, [test "x$enable_s3_internal" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_NCZARR, [test "x$enable_nczarr" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_S3_TESTPUB, [test "x$with_s3_testing" != xno]) # all => public
AM_CONDITIONAL(NETCDF_ENABLE_S3_TESTALL, [test "x$with_s3_testing" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_NCZARR_ZIP, [test "x$enable_nczarr_zip" = xyes])
AM_CONDITIONAL(HAVE_DEFLATE, [test "x$have_deflate" = xyes])
AM_CONDITIONAL(HAVE_SZ, [test "x$have_sz" = xyes])
AM_CONDITIONAL(HAVE_H5Z_SZIP, [test "x$enable_hdf5_szip" = xyes])
@ -2045,7 +2045,7 @@ AC_SUBST(HAS_S3_AWS,[$enable_s3_aws])
AC_SUBST(HAS_S3_INTERNAL,[$enable_s3_internal])
AC_SUBST(HAS_HDF5_ROS3,[$has_hdf5_ros3])
AC_SUBST(HAS_NCZARR,[$enable_nczarr])
AC_SUBST(ENABLE_S3_TESTING,[$with_s3_testing])
AC_SUBST(NETCDF_ENABLE_S3_TESTING,[$with_s3_testing])
AC_SUBST(HAS_NCZARR_ZIP,[$enable_nczarr_zip])
AC_SUBST(DO_NCZARR_ZIP_TESTS,[$enable_nczarr_zip])
AC_SUBST(HAS_QUANTIZE,[$enable_quantize])

View File

@ -34,7 +34,7 @@ find_program(CTEST_GIT_COMMAND NAMES git)
find_program(CTEST_COVERAGE_COMMAND NAMES gcov)
find_program(CTEST_MEMORYCHECK_COMMAND NAMES valgrind)
set(CTEST_BUILD_OPTIONS "-DENABLE_COVERAGE_TESTS=TRUE -DENABLE_ERANGE_FILL=TRUE -DENABLE_LOGGING=TRUE -DENABLE_BYTERANGE=TRUE -DENABLE_LARGE_FILE_TESTS=FALSE -DCMAKE_C_COMPILER=${CTEST_COMPILER}")
set(CTEST_BUILD_OPTIONS "-DNETCDF_ENABLE_COVERAGE_TESTS=TRUE -DNETCDF_ENABLE_ERANGE_FILL=TRUE -DNETCDF_ENABLE_LOGGING=TRUE -DNETCDF_ENABLE_BYTERANGE=TRUE -DNETCDF_ENABLE_LARGE_FILE_TESTS=FALSE -DCMAKE_C_COMPILER=${CTEST_COMPILER}")
set(CTEST_DROP_METHOD https)

View File

@ -33,7 +33,7 @@ find_program(CTEST_GIT_COMMAND NAMES git)
find_program(CTEST_COVERAGE_COMMAND NAMES gcov)
find_program(CTEST_MEMORYCHECK_COMMAND NAMES valgrind)
set(CTEST_BUILD_OPTIONS "-DENABLE_COVERAGE_TESTS=FALSE -DENABLE_ERANGE_FILL=TRUE -DENABLE_LOGGING=TRUE -DENABLE_BYTERANGE=TRUE -DENABLE_LARGE_FILE_TESTS=FALSE")
set(CTEST_BUILD_OPTIONS "-DNETCDF_ENABLE_COVERAGE_TESTS=FALSE -DNETCDF_ENABLE_ERANGE_FILL=TRUE -DNETCDF_ENABLE_LOGGING=TRUE -DNETCDF_ENABLE_BYTERANGE=TRUE -DNETCDF_ENABLE_LARGE_FILE_TESTS=FALSE")
set(CTEST_DROP_METHOD https)

View File

@ -33,7 +33,7 @@ find_program(CTEST_GIT_COMMAND NAMES git)
find_program(CTEST_COVERAGE_COMMAND NAMES gcov)
find_program(CTEST_MEMORYCHECK_COMMAND NAMES valgrind)
set(CTEST_BUILD_OPTIONS "-DENABLE_COVERAGE_TESTS=TRUE -DENABLE_ERANGE_FILL=TRUE -DENABLE_LOGGING=TRUE -DENABLE_BYTERANGE=TRUE -DENABLE_LARGE_FILE_TESTS=FALSE")
set(CTEST_BUILD_OPTIONS "-DNETCDF_ENABLE_COVERAGE_TESTS=TRUE -DNETCDF_ENABLE_ERANGE_FILL=TRUE -DNETCDF_ENABLE_LOGGING=TRUE -DNETCDF_ENABLE_BYTERANGE=TRUE -DNETCDF_ENABLE_LARGE_FILE_TESTS=FALSE")
set(CTEST_DROP_METHOD https)

View File

@ -33,7 +33,7 @@ find_program(CTEST_GIT_COMMAND NAMES git)
find_program(CTEST_COVERAGE_COMMAND NAMES gcov)
find_program(CTEST_MEMORYCHECK_COMMAND NAMES valgrind)
set(CTEST_BUILD_OPTIONS "-DENABLE_COVERAGE_TESTS=TRUE -DENABLE_ERANGE_FILL=TRUE -DENABLE_LOGGING=TRUE -DENABLE_BYTERANGE=TRUE -DENABLE_LARGE_FILE_TESTS=FALSE -DENABLE_DAP_REMOTE_TESTS=TRUE")
set(CTEST_BUILD_OPTIONS "-DNETCDF_ENABLE_COVERAGE_TESTS=TRUE -DNETCDF_ENABLE_ERANGE_FILL=TRUE -DNETCDF_ENABLE_LOGGING=TRUE -DNETCDF_ENABLE_BYTERANGE=TRUE -DNETCDF_ENABLE_LARGE_FILE_TESTS=FALSE -DNETCDF_ENABLE_DAP_REMOTE_TESTS=TRUE")
set(CTEST_DROP_METHOD https)

View File

@ -21,21 +21,21 @@ FILE(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.sh)
FILE(COPY ${COPY_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/ FILE_PERMISSIONS OWNER_WRITE OWNER_READ OWNER_EXECUTE)
IF(ENABLE_DAP_REMOTE_TESTS)
IF(NETCDF_ENABLE_DAP_REMOTE_TESTS)
# Change name (add '4') to avoid cmake
# complaint about duplicate targets.
BUILD_BIN_TEST(findtestserver4)
BUILD_BIN_TEST(pingurl4)
ENDIF()
IF(ENABLE_TESTS)
IF(NETCDF_ENABLE_TESTS)
# Base tests
# The tests are set up as a combination of shell scripts and executables that
# must be run in a particular order. It is painful but will use macros to help
# keep it from being too bad.
IF(BUILD_UTILITIES)
IF(NETCDF_BUILD_UTILITIES)
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/libdap4)
build_bin_test(test_parse)
build_bin_test(test_meta)
@ -48,9 +48,9 @@ ENDIF()
add_sh_test(dap4_test test_raw)
add_sh_test(dap4_test test_meta)
add_sh_test(dap4_test test_data)
ENDIF(BUILD_UTILITIES)
ENDIF(NETCDF_BUILD_UTILITIES)
IF(ENABLE_DAP_REMOTE_TESTS)
IF(NETCDF_ENABLE_DAP_REMOTE_TESTS)
add_sh_test(dap4_test test_remote)
add_sh_test(dap4_test test_hyrax)
add_sh_test(dap4_test test_dap4url)
@ -66,9 +66,9 @@ ENDIF()
IF(RUN_IGNORED_TESTS)
add_sh_test(dap4_test test_thredds)
ENDIF()
ENDIF(ENABLE_DAP_REMOTE_TESTS)
ENDIF(NETCDF_ENABLE_DAP_REMOTE_TESTS)
ENDIF(ENABLE_TESTS)
ENDIF(NETCDF_ENABLE_TESTS)
#FILE(COPY ./baseline DESTINATION ${CMAKE_CURRENT_SOURCE_DIR})
#FILE(COPY ./baselineraw DESTINATION ${CMAKE_CURRENT_SOURCE_DIR})

View File

@ -42,8 +42,8 @@ pingurl4_SOURCES = pingurl4.c
dump_SOURCES = dump.c
# Disable Dap4 Remote Tests until the test server is working
if BUILD_UTILITIES
if ENABLE_DAP_REMOTE_TESTS
if NETCDF_BUILD_UTILITIES
if NETCDF_ENABLE_DAP_REMOTE_TESTS
TESTS += test_remote.sh
TESTS += test_constraints.sh

View File

@ -8,7 +8,7 @@
#####
# Build doxygen documentation, if need be.
#####
IF(ENABLE_DOXYGEN)
IF(NETCDF_ENABLE_DOXYGEN)
# The following is redundant but does not hurt anything.
FILE(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.html ${CMAKE_CURRENT_SOURCE_DIR}/images ${CMAKE_CURRENT_SOURCE_DIR}/*.doc ${CMAKE_CURRENT_SOURCE_DIR}/*.xml ${CMAKE_CURRENT_SOURCE_DIR}/*.m4 ${CMAKE_CURRENT_SOURCE_DIR}/*.texi ${CMAKE_SOURCE_DIR}/oc2/auth.html.in)
@ -43,9 +43,9 @@ IF(ENABLE_DOXYGEN)
# If ENABLE_DOXYGEN_LATEX_OUTPUT is true, automatically build
# If NETCDF_ENABLE_DOXYGEN_LATEX_OUTPUT is true, automatically build
# the PDF files.
IF(ENABLE_DOXYGEN_PDF_OUTPUT)
IF(NETCDF_ENABLE_DOXYGEN_PDF_OUTPUT)
# Process 'main' netcdf documentation.
FIND_PROGRAM(NC_MAKE NAMES make)
FIND_PROGRAM(NC_PDFLATEX NAMES pdflatex)
@ -82,7 +82,7 @@ IF(ENABLE_DOXYGEN)
DESTINATION "${CMAKE_INSTALL_DOCDIR}/html"
COMPONENT documentation)
ENDIF(ENABLE_DOXYGEN)
ENDIF(NETCDF_ENABLE_DOXYGEN)
# Should match list in Makefile.am
SET(CUR_EXTRA_DIST ${CUR_EXTRA_DIST}

View File

@ -1986,7 +1986,7 @@ EXTRA_SEARCH_MAPPINGS =
# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
# The default value is: YES.
GENERATE_LATEX = @NC_ENABLE_DOXYGEN_PDF_OUTPUT@
GENERATE_LATEX = @NC_NETCDF_ENABLE_DOXYGEN_PDF_OUTPUT@
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of

View File

@ -32,7 +32,7 @@ currently do any sort of optimization or caching.
This capability is enabled using the option *--enable-byterange* option
to the *./configure* command for Automake. For Cmake, the option flag is
*-DENABLE_BYTERANGE=true*.
*-DNETCDF_ENABLE_BYTERANGE=true*.
This capability requires access to *libcurl*, and an error will occur
if byterange is enabled, but *libcurl* could not be located.

View File

@ -119,8 +119,8 @@ Note also that if S3 support is enabled, then you need to have a C++ compiler in
The necessary CMake flags are as follows (with defaults)
1. *-DENABLE_S3* -- Controll S3 support
2. *-DENABLE_S3_INTERNAL* -- Force use of the *nch5s3comms* SDK instead of the *aws-cpp-sdk*.
1. *-DNETCDF_ENABLE_S3* -- Controll S3 support
2. *-DNETCDF_ENABLE_S3_INTERNAL* -- Force use of the *nch5s3comms* SDK instead of the *aws-cpp-sdk*.
3. *-DWITH-S3-TESTING_=ON|OFF|PUBLIC -- "ON" means do all S3 tests, "OFF" means do no S3 testing, "PUBLIC" means do only those tests that involve publically accessible S3 data.
Note that unlike Automake, CMake can properly locate C++ libraries, so it should not be necessary to specify _-laws-cpp-sdk-s3_ assuming that the aws s3 libraries are installed in the default location.
@ -267,7 +267,7 @@ This is an experimental SDK provided internally in the netcdf-c library.
### Build Options
In order to enable this SDK, the Automake option *--enable-s3-internal* or the CMake option *-DENABLE_S3_INTERNAL=ON* must be specified.
In order to enable this SDK, the Automake option *--enable-s3-internal* or the CMake option *-DNETCDF_ENABLE_S3_INTERNAL=ON* must be specified.
### Testing S3 Support {#nccloud_testing_S3_support}

View File

@ -711,7 +711,7 @@ and specific script files.
The actual cleanup requires different approaches for cmake and for automake.
In cmake, the CTestCustom.cmake mechanism is used and contains the following command:
````
IF(ENABLE_S3_TESTING)
IF(NETCDF_ENABLE_S3_TESTING)
# Assume run in top-level CMAKE_BINARY_DIR
set(CTEST_CUSTOM_POST_TEST "bash -x ${CMAKE_BINARY_DIR}/s3cleanup.sh")
ENDIF()
@ -722,7 +722,7 @@ because it is invoked after all tests are run in the nczarr_test
directory. So nczarr_test/Makefile.am contains the following
equivalent code:
````
if ENABLE_S3_TESTALL
if NETCDF_ENABLE_S3_TESTALL
check-local:
bash -x ${top_srcdir}/s3cleanup.sh
endif

View File

@ -45,9 +45,9 @@ To build netcdf-c with logging using autotools, add the
## Building netcdf-c with Logging using the CMake Build
To build netcdf-c with logging using CMake, set the ENABLE_LOGGING option to ON:
To build netcdf-c with logging using CMake, set the NETCDF_ENABLE_LOGGING option to ON:
`cmake -DENABLE_LOGGING=ON -DCMAKE_PREFIX_PATH=/usr/local/hdf5-1.14.0 ..`
`cmake -DNETCDF_ENABLE_LOGGING=ON -DCMAKE_PREFIX_PATH=/usr/local/hdf5-1.14.0 ..`
## Checking that Logging was Enabled

View File

@ -503,7 +503,7 @@ The relevant ./configure options are as follows.
The relevant CMake flags are as follows.
1. *-DENABLE_NCZARR=off* -- equivalent to the Automake *--disable-nczarr* option.
1. *-DNETCDF_ENABLE_NCZARR=off* -- equivalent to the Automake *--disable-nczarr* option.
## Testing NCZarr S3 Support {#nczarr_testing_S3_support}
The relevant tests for S3 support are in the _nczarr_test_ directory.
@ -527,7 +527,7 @@ also test S3 support with this option.
Enabling S3 support is controlled by this cmake option:
````
-DENABLE_S3=ON
-DNETCDF_ENABLE_S3=ON
````
However, to find the aws sdk libraries,
the following environment variables must be set:

View File

@ -35,8 +35,8 @@ check_PROGRAMS += simple_nc4_wr simple_nc4_rd simple_xy_nc4_wr \
simple_xy_nc4_rd
TESTS += run_examples4.sh
if BUILD_UTILITIES
if ENABLE_FILTER_TESTING
if NETCDF_BUILD_UTILITIES
if NETCDF_ENABLE_FILTER_TESTING
# filter_example.c should be same as nc_test4/test_filter.c
check_PROGRAMS += filter_example
TESTS += run_filter.sh

View File

@ -6,6 +6,6 @@
# See netcdf-c/COPYRIGHT file for more info.
ADD_SUBDIRECTORY(C)
IF(BUILD_UTILITIES)
IF(NETCDF_BUILD_UTILITIES)
ADD_SUBDIRECTORY(CDL)
ENDIF()

View File

@ -8,7 +8,7 @@
# See netcdf-c/COPYRIGHT file for more info.
# Build ncgen/ncdump.
if BUILD_UTILITIES
if NETCDF_BUILD_UTILITIES
CDL_DIR = CDL
endif

View File

@ -45,21 +45,23 @@ with the H5Lvisit function call
*/
herr_t
op_func (hid_t g_id, const char *name,
#if H5_VERSION_GE(1,12,0)
const H5L_info2_t *info,
#else
const H5L_info_t *info,
#endif
void *op_data)
{
hid_t id;
H5I_type_t obj_type;
strcpy((char *)op_data, name);
#if H5_VERSION_GE(1,12,0)
if ((id = H5Oopen_by_token(g_id, info->u.token)) < 0) ERR;
#else
#if H5_VERSION_LE(1, 10, 11) || defined(H5_USE_110_API_DEFAULT) || defined(H5_USE_18_API_DEFAULT) || defined(H5_USE_16_API_DEFAULT)
/* This library is either 1.10.11 (the last 1.10 release) or earlier
* OR this is a later version of the library built with a 1.10 or
* earlier API (earlier versions did not define their own USE
* API symbol).
*/
if ((id = H5Oopen_by_addr(g_id, info->u.address)) < 0) ERR;
#else
/* HDF5 1.12 switched from addresses to tokens to better support the VOL */
if ((id = H5Oopen_by_token(g_id, info->u.token)) < 0) ERR;
#endif
/* Using H5Ovisit is really slow. Use H5Iget_type for a fast

View File

@ -21,10 +21,10 @@ check_PROGRAMS = tst_chunk_hdf4 tst_h4_lendian tst_hdf4_extra
TESTS = tst_chunk_hdf4 tst_h4_lendian tst_hdf4_extra
# This test script depends on ncdump and tst_interops2.c.
if BUILD_UTILITIES
if NETCDF_BUILD_UTILITIES
check_PROGRAMS += tst_interops2
TESTS += run_formatx_hdf4.sh
endif # BUILD_UTILITIES
endif # NETCDF_BUILD_UTILITIES
# This test script fetches HDF4 files from an FTP server and uses
# program tst_interops3.c to read them.

View File

@ -35,7 +35,7 @@ INSTALL(FILES ${netCDF_BINARY_DIR}/include/netcdf_dispatch.h
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
COMPONENT headers)
IF(ENABLE_PNETCDF OR ENABLE_PARALLEL4)
IF(NETCDF_ENABLE_PNETCDF OR NETCDF_ENABLE_PARALLEL4)
INSTALL(FILES ${netCDF_SOURCE_DIR}/include/netcdf_par.h
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
COMPONENT headers)

View File

@ -26,7 +26,7 @@ if USE_DAP
noinst_HEADERS += ncdap.h
endif
if ENABLE_BYTERANGE
if NETCDF_ENABLE_BYTERANGE
noinst_HEADERS += nchttp.h
endif

View File

@ -62,10 +62,10 @@ typedef struct NC_HDF5_FILE_INFO {
hid_t hdfid;
unsigned transientid; /* counter for transient ids */
NCURI* uri; /* Parse of the incoming path, if url */
#if defined(ENABLE_BYTERANGE)
#if defined(NETCDF_ENABLE_BYTERANGE)
int byterange;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
struct NCauth* auth;
#endif
} NC_HDF5_FILE_INFO_T;

View File

@ -162,7 +162,7 @@ typedef struct NC_ATT_INFO
{
NC_OBJ hdr; /**< The hdr contains the name and ID. */
struct NC_OBJ *container; /**< Pointer to containing group|var. */
int len; /**< Length of attribute data. */
size_t len; /**< Length of attribute data. */
nc_bool_t dirty; /**< True if attribute modified. */
nc_bool_t created; /**< True if attribute already created. */
nc_type nc_typeid; /**< NetCDF type of attribute's data. */

View File

@ -104,12 +104,12 @@ extern const NC_Dispatch* NC3_dispatch_table;
extern int NC3_initialize(void);
extern int NC3_finalize(void);
#ifdef ENABLE_DAP
#ifdef NETCDF_ENABLE_DAP
extern const NC_Dispatch* NCD2_dispatch_table;
extern int NCD2_initialize(void);
extern int NCD2_finalize(void);
#endif
#ifdef ENABLE_DAP4
#ifdef NETCDF_ENABLE_DAP4
extern const NC_Dispatch* NCD4_dispatch_table;
extern int NCD4_initialize(void);
extern int NCD4_finalize(void);
@ -138,7 +138,7 @@ extern int HDF4_initialize(void);
extern int HDF4_finalize(void);
#endif
#ifdef ENABLE_NCZARR
#ifdef NETCDF_ENABLE_NCZARR
extern const NC_Dispatch* NCZ_dispatch_table;
extern int NCZ_initialize(void);
extern int NCZ_finalize(void);

View File

@ -26,7 +26,7 @@ typedef struct NC_HTTP_STATE {
struct NCURI* url; /* parsed url */
long httpcode;
char* errmsg; /* do not free if format is HTTPCURL */
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
struct NC_HTTP_S3 {
void* s3client;
struct NCS3INFO* info;

View File

@ -61,7 +61,7 @@
#define NC_DISPATCH_VERSION @NC_DISPATCH_VERSION@ /*!< Dispatch table version. */
#define NC_HAS_PAR_FILTERS @NC_HAS_PAR_FILTERS@ /* Parallel I/O with filter support. */
#define NC_HAS_LOGGING @NC_HAS_LOGGING@ /*!< Logging support. */
#define NC_HAS_MULTIFILTERS @NC_HAS_MULTIFILTERS@ /*!< Nczarr support. */
#define NC_HAS_QUANTIZE @NC_HAS_QUANTIZE@ /*!< Quantization support. */
#define NC_HAS_ZSTD @NC_HAS_ZSTD@ /*!< Zstd support. */
#define NC_HAS_BENCHMARKS @NC_HAS_BENCHMARKS@ /*!< Benchmarks. */

View File

@ -13,15 +13,15 @@ if USE_DAP
AM_CPPFLAGS += -I${top_srcdir}/oc2
endif
if ENABLE_NCZARR
if NETCDF_ENABLE_NCZARR
AM_CPPFLAGS += -I${top_srcdir}/libnczarr
endif
if ENABLE_S3_AWS
if NETCDF_ENABLE_S3_AWS
AM_LDFLAGS += -lstdc++
endif
if ! ENABLE_LIBXML2
if ! NETCDF_ENABLE_LIBXML2
# => tinyxml2
AM_LDFLAGS += -lstdc++
endif

View File

@ -34,7 +34,7 @@ target_compile_options(dap2
-DCURL_STATICLIB=1
)
if (ENABLE_DLL)
if (NETCDF_ENABLE_DLL)
target_compile_definitions(dap2 PRIVATE DLL_NETCDF DLL_EXPORT)
endif()

View File

@ -24,7 +24,7 @@ HDRS= nccommon.h constraints.h ncd2dispatch.h dapincludes.h \
dapodom.h getvara.h dapnc.h daputil.h dapdebug.h dapdump.h \
dceconstraints.h dcetab.h dceparselex.h
if ENABLE_DAP
if NETCDF_ENABLE_DAP
if USE_NETCDF4
AM_CPPFLAGS += -I$(top_srcdir)/libsrc4
@ -44,7 +44,7 @@ libdap2_la_LIBADD =
# ${top_builddir}/oc2/liboc.la \
# ${top_builddir}/libdispatch/libdispatch.la
#
endif # ENABLE_DAP
endif # NETCDF_ENABLE_DAP
# These rule are used if someone wants to rebuild the grammar files.
# Otherwise never invoked, but records how to do it.

View File

@ -27,7 +27,7 @@ target_compile_options(dap4
-DCURL_STATICLIB=1
)
if (ENABLE_DLL)
if (NETCDF_ENABLE_DLL)
target_compile_definitions(dap4 PRIVATE DLL_NETCDF DLL_EXPORT)
endif()

View File

@ -53,7 +53,7 @@ d4odom.h \
d4bytes.h \
d4includes.h
if ENABLE_DAP4
if NETCDF_ENABLE_DAP4
if USE_NETCDF4
AM_CPPFLAGS += -I$(top_srcdir)/libsrc4
endif
@ -64,4 +64,4 @@ libdap4_la_SOURCES = $(SRC) $(HDRS)
libdap4_la_CPPFLAGS += $(AM_CPPFLAGS)
libdap4_la_LIBADD =
endif # ENABLE_DAP4
endif # NETCDF_ENABLE_DAP4

View File

@ -13,7 +13,7 @@ target_sources(dispatch
dcrc32.c dcrc32.h dcrc64.c ncexhash.c ncxcache.c ncjson.c ds3util.c dparallel.c dmissing.c
)
if (ENABLE_DLL)
if (NETCDF_ENABLE_DLL)
target_compile_definitions(dispatch PRIVATE DLL_NETCDF DLL_EXPORT)
endif()
@ -43,15 +43,15 @@ if(BUILD_V2)
)
endif(BUILD_V2)
if(ENABLE_BYTERANGE)
if(NETCDF_ENABLE_BYTERANGE)
target_sources(dispatch
PRIVATE
dhttp.c
)
ENDIF(ENABLE_BYTERANGE)
ENDIF(NETCDF_ENABLE_BYTERANGE)
IF(ENABLE_S3)
if(ENABLE_S3_INTERNAL)
IF(NETCDF_ENABLE_S3)
if(NETCDF_ENABLE_S3_INTERNAL)
target_sources(dispatch
PRIVATE
ncs3sdk_h5.c nch5s3comms.c nch5s3comms.h nccurl_sha256.c nccurl_sha256.h nccurl_hmac.c nccurl_hmac.h nccurl_setup.h
@ -80,12 +80,16 @@ if(STATUS_PARALLEL)
target_link_libraries(dispatch PUBLIC MPI::MPI_C)
endif(STATUS_PARALLEL)
if(ENABLE_NCZARR)
target_include_directories(dispatch PUBLIC ../libnczarr)
endif(ENABLE_NCZARR)
if (NETCDF_ENABLE_PNETCDF)
target_link_libraries(dispatch PUBLIC PNETCDF::PNETCDF)
endif()
if(ENABLE_S3)
if(ENABLE_S3_AWS)
IF(NETCDF_ENABLE_NCZARR)
target_include_directories(dispatch PUBLIC ../libnczarr)
endif(NETCDF_ENABLE_NCZARR)
if(NETCDF_ENABLE_S3)
if(NETCDF_ENABLE_S3_AWS)
target_include_directories(dispatch PUBLIC ${AWSSDK_INCLUDE_DIRS})
if(NOT MSVC)
target_compile_features(dispatch PUBLIC cxx_std_11)
@ -95,7 +99,7 @@ if(ENABLE_S3)
endif()
endif()
if(ENABLE_TESTS)
if(NETCDF_ENABLE_TESTS)
BUILD_BIN_TEST(ncrandom)
endif()

View File

@ -43,12 +43,12 @@ libnetcdf2_la_SOURCES = dv2i.c
libnetcdf2_la_CPPFLAGS = ${AM_CPPFLAGS} -DDLL_EXPORT
endif # BUILD_V2
if ENABLE_BYTERANGE
if NETCDF_ENABLE_BYTERANGE
libdispatch_la_SOURCES += dhttp.c
endif # ENABLE_BYTERANGE
endif # NETCDF_ENABLE_BYTERANGE
if ENABLE_S3
if ENABLE_S3_INTERNAL
if NETCDF_ENABLE_S3
if NETCDF_ENABLE_S3_INTERNAL
# Renamed to avoid conflicts with the HDF5 files
libdispatch_la_SOURCES += ncs3sdk_h5.c nch5s3comms.c nch5s3comms.h ncutil.h nccurl_setup.h \
nccurl_sha256.c nccurl_sha256.h nccurl_hmac.c nccurl_hmac.h

View File

@ -24,11 +24,11 @@ See LICENSE.txt for license information.
#include <direct.h>
#endif
#if defined(ENABLE_BYTERANGE) || defined(ENABLE_DAP) || defined(ENABLE_DAP4)
#if defined(NETCDF_ENABLE_BYTERANGE) || defined(NETCDF_ENABLE_DAP) || defined(NETCDF_ENABLE_DAP4)
#include <curl/curl.h>
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
#include "ncs3sdk.h"
#endif
@ -118,7 +118,7 @@ NCDISPATCH_initialize(void)
/* Compute type alignments */
NC_compute_alignments();
#if defined(ENABLE_BYTERANGE) || defined(ENABLE_DAP) || defined(ENABLE_DAP4)
#if defined(NETCDF_ENABLE_BYTERANGE) || defined(NETCDF_ENABLE_DAP) || defined(NETCDF_ENABLE_DAP4)
/* Initialize curl if it is being used */
{
CURLcode cstat = curl_global_init(CURL_GLOBAL_ALL);
@ -134,10 +134,10 @@ int
NCDISPATCH_finalize(void)
{
int status = NC_NOERR;
#if defined(ENABLE_BYTERANGE) || defined(ENABLE_DAP) || defined(ENABLE_DAP4)
#if defined(NETCDF_ENABLE_BYTERANGE) || defined(NETCDF_ENABLE_DAP) || defined(NETCDF_ENABLE_DAP4)
curl_global_cleanup();
#endif
#if defined(ENABLE_DAP4)
#if defined(NETCDF_ENABLE_DAP4)
ncxml_finalize();
#endif
NC_freeglobalstate(); /* should be one of the last things done */

View File

@ -1884,7 +1884,7 @@ NC_create(const char *path0, int cmode, size_t initialsz,
if (model.impl == NC_FORMATX_PNETCDF)
{stat = NC_ENOTBUILT; goto done;}
#endif
#ifndef ENABLE_CDF5
#ifndef NETCDF_ENABLE_CDF5
if (model.impl == NC_FORMATX_NC3 && (cmode & NC_64BIT_DATA))
{stat = NC_ENOTBUILT; goto done;}
#endif
@ -1909,7 +1909,7 @@ NC_create(const char *path0, int cmode, size_t initialsz,
dispatcher = UDF1_dispatch_table;
break;
#endif /* USE_NETCDF4 */
#ifdef ENABLE_NCZARR
#ifdef NETCDF_ENABLE_NCZARR
case NC_FORMATX_NCZARR:
dispatcher = NCZ_dispatch_table;
break;
@ -2047,10 +2047,10 @@ NC_open(const char *path0, int omode, int basepe, size_t *chunksizehintp,
#ifdef USE_HDF4
hdf4built = 1;
#endif
#ifdef ENABLE_CDF5
#ifdef NETCDF_ENABLE_CDF5
cdf5built = 1;
#endif
#ifdef ENABLE_NCZARR
#ifdef NETCDF_ENABLE_NCZARR
nczarrbuilt = 1;
#endif
if(UDF0_dispatch_table != NULL)
@ -2081,13 +2081,13 @@ NC_open(const char *path0, int omode, int basepe, size_t *chunksizehintp,
#ifdef USE_HDF4
| (1<<NC_FORMATX_NC_HDF4)
#endif
#ifdef ENABLE_NCZARR
#ifdef NETCDF_ENABLE_NCZARR
| (1<<NC_FORMATX_NCZARR)
#endif
#ifdef ENABLE_DAP
#ifdef NETCDF_ENABLE_DAP
| (1<<NC_FORMATX_DAP2)
#endif
#ifdef ENABLE_DAP4
#ifdef NETCDF_ENABLE_DAP4
| (1<<NC_FORMATX_DAP4)
#endif
#ifdef USE_PNETCDF
@ -2101,7 +2101,7 @@ NC_open(const char *path0, int omode, int basepe, size_t *chunksizehintp,
/* Verify */
if((built & (1 << model.impl)) == 0)
{stat = NC_ENOTBUILT; goto done;}
#ifndef ENABLE_CDF5
#ifndef NETCDF_ENABLE_CDF5
/* Special case because there is no separate CDF5 dispatcher */
if(model.impl == NC_FORMATX_NC3 && (omode & NC_64BIT_DATA))
{stat = NC_ENOTBUILT; goto done;}
@ -2111,17 +2111,17 @@ NC_open(const char *path0, int omode, int basepe, size_t *chunksizehintp,
/* Figure out what dispatcher to use */
if (!dispatcher) {
switch (model.impl) {
#ifdef ENABLE_DAP
#ifdef NETCDF_ENABLE_DAP
case NC_FORMATX_DAP2:
dispatcher = NCD2_dispatch_table;
break;
#endif
#ifdef ENABLE_DAP4
#ifdef NETCDF_ENABLE_DAP4
case NC_FORMATX_DAP4:
dispatcher = NCD4_dispatch_table;
break;
#endif
#ifdef ENABLE_NCZARR
#ifdef NETCDF_ENABLE_NCZARR
case NC_FORMATX_NCZARR:
dispatcher = NCZ_dispatch_table;
break;

View File

@ -25,7 +25,7 @@
#include "hdf5internal.h"
#endif
#ifdef ENABLE_NCZARR
#ifdef NETCDF_ENABLE_NCZARR
#include "zdispatch.h"
#endif

View File

@ -26,7 +26,7 @@
#include "ncuri.h"
#include "ncauth.h"
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
#include "ncs3sdk.h"
#endif
#include "nchttp.h"
@ -100,7 +100,7 @@ nc_http_open_verbose(const char* path, int verbose, NC_HTTP_STATE** statep)
{stat = NCTHROW(NC_ENOMEM); goto done;}
state->path = strdup(path);
state->url = uri; uri = NULL;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
state->format = (NC_iss3(state->url,NULL)?HTTPS3:HTTPCURL);
#else
state->format = HTTPCURL;
@ -122,7 +122,7 @@ nc_http_open_verbose(const char* path, int verbose, NC_HTTP_STATE** statep)
if(cstat != CURLE_OK) {stat = NCTHROW(NC_ECURL); goto done;}
}
} break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case HTTPS3: {
if((state->s3.info = (NCS3INFO*)calloc(1,sizeof(NCS3INFO)))==NULL)
{stat = NCTHROW(NC_ENOMEM); goto done;}
@ -158,7 +158,7 @@ nc_http_close(NC_HTTP_STATE* state)
ncbytesfree(state->curl.response.buf);
nclistfreeall(state->curl.request.headers); state->curl.request.headers = NULL;
break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case HTTPS3: {
if(state->s3.s3client)
NC_s3sdkclose(state->s3.s3client, state->s3.info, 0, NULL);
@ -203,7 +203,7 @@ nc_http_reset(NC_HTTP_STATE* state)
(void)CURLERR(curl_easy_setopt(state->curl.curl, CURLOPT_READDATA, NULL));
headersoff(state);
break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case HTTPS3:
break; /* Done automatically */
#endif
@ -250,7 +250,7 @@ nc_http_read(NC_HTTP_STATE* state, size64_t start, size64_t count, NCbytes* buf)
if((stat = execute(state)))
goto done;
break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case HTTPS3: {
/* Make sure buf has enough space allocated */
ncbytessetalloc(buf,count);
@ -301,7 +301,7 @@ nc_http_write(NC_HTTP_STATE* state, NCbytes* payload)
if((stat = execute(state)))
goto done;
break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case HTTPS3:
if((stat = NC_s3sdkwriteobject(state->s3.s3client,
state->s3.info->bucket,
@ -357,7 +357,7 @@ nc_http_size(NC_HTTP_STATE* state, long long* sizep)
if((stat = lookupheader(state,"content-length",&hdr))==NC_NOERR)
sscanf(hdr,"%llu",sizep);
break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case HTTPS3: {
size64_t len = 0;
if((stat = NC_s3sdkinfo(state->s3.s3client,state->s3.info->bucket,state->s3.info->rootkey,&len,&state->errmsg))) goto done;

View File

@ -27,7 +27,7 @@
#include "nclog.h"
#include "ncrc.h"
#include "nchttp.h"
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
#include "ncs3sdk.h"
#endif
@ -61,7 +61,7 @@ struct MagicFile {
#endif
char* curlurl; /* url to use with CURLOPT_SET_URL */
NC_HTTP_STATE* state;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
NCS3INFO s3;
void* s3client;
char* errmsg;
@ -902,7 +902,7 @@ NC_infermodel(const char* path, int* omodep, int iscreate, int useparallel, void
ncurisetfragments(uri,sfrag);
nullfree(sfrag); sfrag = NULL;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
/* If s3, then rebuild the url */
if(NC_iss3(uri,NULL)) {
NCURI* newuri = NULL;
@ -1323,7 +1323,7 @@ openmagic(struct MagicFile* file)
goto done;
}
if(file->uri != NULL) {
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
/* Construct a URL minus any fragment */
file->curlurl = ncuribuild(file->uri,NULL,NULL,NCURISVC);
/* Open the curl handle */
@ -1411,7 +1411,7 @@ readmagic(struct MagicFile* file, long pos, char* magic)
printmagic("XXX: readmagic",magic,file);
#endif
} else if(file->uri != NULL) {
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
fileoffset_t start = (size_t)pos;
fileoffset_t count = MAGIC_NUMBER_LEN;
status = nc_http_read(file->state, start, count, buf);
@ -1466,7 +1466,7 @@ closemagic(struct MagicFile* file)
if(fIsSet(file->omode,NC_INMEMORY)) {
/* noop */
} else if(file->uri != NULL) {
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
status = nc_http_close(file->state);
#endif
nullfree(file->curlurl);

View File

@ -30,7 +30,7 @@ Currently two operations are defined:
necessary in order to get to the right NC* instance.
*/
#if defined(ENABLE_DAP4) || defined(ENABLE_DAP2)
#if defined(NETCDF_ENABLE_DAP4) || defined(NETCDF_ENABLE_DAP2)
EXTERNL NC* NCD4_get_substrate(NC* nc);
EXTERNL NC* NCD2_get_substrate(NC* nc);
static NC*

View File

@ -124,7 +124,7 @@ nc_set_default_format(int format, int *old_formatp)
*old_formatp = default_create_format;
/* Make sure only valid format is set. */
#ifndef ENABLE_CDF5
#ifndef NETCDF_ENABLE_CDF5
if (format == NC_FORMAT_CDF5)
return NC_ENOTBUILT;
#endif

View File

@ -18,7 +18,7 @@ target_sources(netcdfhdf4
hdf4var.c
)
if (ENABLE_DLL)
if (NETCDF_ENABLE_DLL)
target_compile_definitions(netcdfhdf4 PRIVATE DLL_NETCDF DLL_EXPORT)
endif()

View File

@ -17,11 +17,11 @@ target_sources(netcdfhdf5 PRIVATE
hdf5set_format_compatibility.c hdf5debug.c
)
if (ENABLE_DLL)
if (NETCDF_ENABLE_DLL)
target_compile_definitions(netcdfhdf5 PRIVATE DLL_NETCDF DLL_EXPORT)
endif()
if(ENABLE_BYTERANGE)
if(NETCDF_ENABLE_BYTERANGE)
target_sources(netcdfhdf5 PRIVATE
H5FDhttp.c
)

View File

@ -18,7 +18,7 @@ hdf5dim.c hdf5grp.c hdf5type.c hdf5internal.c hdf5create.c hdf5open.c \
hdf5var.c nc4mem.c nc4memcb.c hdf5dispatch.c hdf5filter.c \
hdf5set_format_compatibility.c hdf5debug.c hdf5debug.h hdf5err.h
if ENABLE_BYTERANGE
if NETCDF_ENABLE_BYTERANGE
libnchdf5_la_SOURCES += H5FDhttp.c H5FDhttp.h
endif

View File

@ -47,7 +47,7 @@ getattlist(NC_GRP_INFO_T *grp, int varid, NC_VAR_INFO_T **varp,
{
NC_VAR_INFO_T *var;
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, varid)))
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, (size_t)varid)))
return NC_ENOTVAR;
assert(var->hdr.id == varid);
@ -92,14 +92,13 @@ nc4_get_att_special(NC_FILE_INFO_T* h5, const char* name,
return NC_EATTMETA;
if(strcmp(name,NCPROPS)==0) {
int len;
if(h5->provenance.ncproperties == NULL)
return NC_ENOTATT;
if(mem_type == NC_NAT) mem_type = NC_CHAR;
if(mem_type != NC_CHAR)
return NC_ECHAR;
if(filetypep) *filetypep = NC_CHAR;
len = strlen(h5->provenance.ncproperties);
size_t len = strlen(h5->provenance.ncproperties);
if(lenp) *lenp = len;
if(data) strncpy((char*)data,h5->provenance.ncproperties,len+1);
} else if(strcmp(name,ISNETCDF4ATT)==0
@ -110,7 +109,7 @@ nc4_get_att_special(NC_FILE_INFO_T* h5, const char* name,
if(strcmp(name,SUPERBLOCKATT)==0)
iv = (unsigned long long)h5->provenance.superblockversion;
else /* strcmp(name,ISNETCDF4ATT)==0 */
iv = NC4_isnetcdf4(h5);
iv = (unsigned long long)NC4_isnetcdf4(h5);
if(mem_type == NC_NAT) mem_type = NC_INT;
if(data)
switch (mem_type) {
@ -271,8 +270,6 @@ NC4_HDF5_del_att(int ncid, int varid, const char *name)
NC_ATT_INFO_T *att;
NCindex* attlist = NULL;
hid_t locid = 0;
int i;
size_t deletedid;
int retval;
/* Name must be provided. */
@ -328,7 +325,7 @@ NC4_HDF5_del_att(int ncid, int varid, const char *name)
return NC_EATTMETA;
}
deletedid = att->hdr.id;
int deletedid = att->hdr.id;
/* reclaim associated HDF5 info */
if((retval=nc4_HDF5_close_att(att))) return retval;
@ -338,7 +335,7 @@ NC4_HDF5_del_att(int ncid, int varid, const char *name)
return retval;
/* Renumber all attributes with higher indices. */
for (i = 0; i < ncindexsize(attlist); i++)
for (size_t i = 0; i < ncindexsize(attlist); i++)
{
NC_ATT_INFO_T *a;
if (!(a = (NC_ATT_INFO_T *)ncindexith(attlist, i)))

View File

@ -164,7 +164,7 @@ nc4_create_file(const char *path, int cmode, size_t initialsz,
{
NCglobalstate* gs = NC_getglobalstate();
if(gs->alignment.defined) {
if (H5Pset_alignment(fapl_id, gs->alignment.threshold, gs->alignment.alignment) < 0) {
if (H5Pset_alignment(fapl_id, (hsize_t)gs->alignment.threshold, (hsize_t)gs->alignment.alignment) < 0) {
BAIL(NC_EHDFERR);
}
}
@ -222,12 +222,12 @@ nc4_create_file(const char *path, int cmode, size_t initialsz,
if(nc4_info->mem.diskless) {
size_t alloc_incr; /* Buffer allocation increment */
size_t min_incr = 65536; /* Minimum buffer increment */
double buf_prcnt = 0.1f; /* Percentage of buffer size to set as increment */
double buf_prcnt = 0.1; /* Percentage of buffer size to set as increment */
/* set allocation increment to a percentage of the supplied buffer size, or
* a pre-defined minimum increment value, whichever is larger
*/
if ((buf_prcnt * initialsz) > min_incr)
alloc_incr = (size_t)(buf_prcnt * initialsz);
if ((size_t)(buf_prcnt * (double)initialsz) > min_incr)
alloc_incr = (size_t)(buf_prcnt * (double)initialsz);
else
alloc_incr = min_incr;
/* Configure FAPL to use the core file driver */

View File

@ -46,7 +46,6 @@ HDF5_def_dim(int ncid, const char *name, size_t len, int *idp)
NC_DIM_INFO_T *dim;
char norm_name[NC_MAX_NAME + 1];
int retval = NC_NOERR;
int i;
LOG((2, "%s: ncid 0x%x name %s len %d", __func__, ncid, name,
(int)len));
@ -65,7 +64,7 @@ HDF5_def_dim(int ncid, const char *name, size_t len, int *idp)
{
/* Only one limited dimenson for strict nc3. */
if (len == NC_UNLIMITED) {
for(i=0;i<ncindexsize(grp->dim);i++) {
for(size_t i=0;i<ncindexsize(grp->dim);i++) {
dim = (NC_DIM_INFO_T*)ncindexith(grp->dim,i);
if(dim == NULL) continue;
if (dim->unlimited)

View File

@ -11,7 +11,7 @@
#include "config.h"
#include "hdf5internal.h"
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
#include "H5FDhttp.h"
#endif
@ -127,7 +127,7 @@ NC_HDF5_initialize(void)
if (!nc4_hdf5_initialized)
nc4_hdf5_initialize();
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
(void)H5FD_http_init();
#endif
return NC4_provenance_init();
@ -142,7 +142,7 @@ NC_HDF5_initialize(void)
int
NC_HDF5_finalize(void)
{
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
(void)H5FD_http_finalize();
#endif
(void)nc4_hdf5_finalize();

View File

@ -15,6 +15,7 @@
#include "hdf5internal.h"
#include "ncrc.h"
#include "ncauth.h"
#include <sys/types.h>
extern int NC4_extract_file_image(NC_FILE_INFO_T* h5, int abort); /* In nc4memcb.c */
@ -51,10 +52,9 @@ detect_preserve_dimids(NC_GRP_INFO_T *grp, nc_bool_t *bad_coord_orderp)
NC_GRP_INFO_T *child_grp;
int last_dimid = -1;
int retval;
int i;
/* Iterate over variables in this group */
for (i=0; i < ncindexsize(grp->vars); i++)
for (size_t i=0; i < ncindexsize(grp->vars); i++)
{
NC_HDF5_VAR_INFO_T *hdf5_var;
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,i);
@ -98,7 +98,7 @@ detect_preserve_dimids(NC_GRP_INFO_T *grp, nc_bool_t *bad_coord_orderp)
}
/* If there are any child groups, check them also for this condition. */
for (i = 0; i < ncindexsize(grp->children); i++)
for (size_t i = 0; i < ncindexsize(grp->children); i++)
{
if (!(child_grp = (NC_GRP_INFO_T *)ncindexith(grp->children, i)))
continue;
@ -221,7 +221,7 @@ nc4_close_netcdf4_file(NC_FILE_INFO_T *h5, int abort, NC_memio *memio)
NC4_clear_provenance(&h5->provenance);
ncurifree(hdf5_info->uri);
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
/* Free the http info */
NC_authfree(hdf5_info->auth);
#endif
@ -326,7 +326,6 @@ static void
dumpopenobjects(NC_FILE_INFO_T* h5)
{
NC_HDF5_FILE_INFO_T *hdf5_info;
int nobjs;
assert(h5 && h5->format_file_info);
hdf5_info = (NC_HDF5_FILE_INFO_T *)h5->format_file_info;
@ -334,7 +333,7 @@ dumpopenobjects(NC_FILE_INFO_T* h5)
if(hdf5_info->hdfid <= 0)
return; /* File was never opened */
nobjs = H5Fget_obj_count(hdf5_info->hdfid, H5F_OBJ_ALL);
ssize_t nobjs = H5Fget_obj_count(hdf5_info->hdfid, H5F_OBJ_ALL);
/* Apparently we can get an error even when nobjs == 0 */
if(nobjs < 0) {
@ -346,7 +345,7 @@ dumpopenobjects(NC_FILE_INFO_T* h5)
* objects open, which means there's a bug in the library. So
* print out some info on to help the poor programmer figure it
* out. */
snprintf(msg,sizeof(msg),"There are %d HDF5 objects open!", nobjs);
snprintf(msg,sizeof(msg),"There are %zd HDF5 objects open!", nobjs);
#ifdef LOGGING
#ifdef LOGOPEN
LOG((0, msg));
@ -485,7 +484,6 @@ NC4_enddef(int ncid)
NC_FILE_INFO_T *nc4_info;
NC_GRP_INFO_T *grp;
int retval;
int i;
NC_VAR_INFO_T* var = NULL;
LOG((1, "%s: ncid 0x%x", __func__, ncid));
@ -497,7 +495,7 @@ NC4_enddef(int ncid)
/* Why is this here? Especially since it is not recursive so it
only applies to the this grp */
/* When exiting define mode, mark all variable written. */
for (i = 0; i < ncindexsize(grp->vars); i++)
for (size_t i = 0; i < ncindexsize(grp->vars); i++)
{
var = (NC_VAR_INFO_T *)ncindexith(grp->vars, i);
assert(var);
@ -658,7 +656,6 @@ NC4_inq(int ncid, int *ndimsp, int *nvarsp, int *nattsp, int *unlimdimidp)
NC_FILE_INFO_T *h5;
NC_GRP_INFO_T *grp;
int retval;
int i;
LOG((2, "%s: ncid 0x%x", __func__, ncid));
@ -697,7 +694,7 @@ NC4_inq(int ncid, int *ndimsp, int *nvarsp, int *nattsp, int *unlimdimidp)
with netcdf-3, then only the last unlimited one will be reported
back in xtendimp. */
/* Note that this code is inconsistent with nc_inq_unlimid() */
for(i=0;i<ncindexsize(grp->dim);i++) {
for(size_t i=0;i<ncindexsize(grp->dim);i++) {
NC_DIM_INFO_T* d = (NC_DIM_INFO_T*)ncindexith(grp->dim,i);
if(d == NULL) continue;
if(d->unlimited) {

View File

@ -121,7 +121,7 @@ find_var_dim_max_length(NC_GRP_INFO_T *grp, int varid, int dimid,
LOG((3, "find_var_dim_max_length varid %d dimid %d", varid, dimid));
/* Find this var. */
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,varid);
var = (NC_VAR_INFO_T*)ncindexith(grp->vars, (size_t)varid);
if (!var) return NC_ENOTVAR;
assert(var->hdr.id == varid);
@ -252,21 +252,20 @@ nc4_find_dim_len(NC_GRP_INFO_T *grp, int dimid, size_t **len)
{
NC_VAR_INFO_T *var;
int retval;
int i;
assert(grp && len);
LOG((3, "%s: grp->name %s dimid %d", __func__, grp->hdr.name, dimid));
/* If there are any groups, call this function recursively on
* them. */
for (i = 0; i < ncindexsize(grp->children); i++)
for (size_t i = 0; i < ncindexsize(grp->children); i++)
if ((retval = nc4_find_dim_len((NC_GRP_INFO_T*)ncindexith(grp->children, i),
dimid, len)))
return retval;
/* For all variables in this group, find the ones that use this
* dimension, and remember the max length. */
for (i = 0; i < ncindexsize(grp->vars); i++)
for (size_t i = 0; i < ncindexsize(grp->vars); i++)
{
size_t mylen;
var = (NC_VAR_INFO_T *)ncindexith(grp->vars, i);
@ -431,23 +430,21 @@ nc4_reform_coord_var(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var, NC_DIM_INFO_T *dim)
{
int dims_detached = 0;
int finished = 0;
int d;
/* Loop over all dimensions for variable. */
for (d = 0; d < var->ndims && !finished; d++)
for (unsigned int d = 0; d < var->ndims && !finished; d++)
{
/* Is there a dimscale attached to this axis? */
if (hdf5_var->dimscale_attached[d])
{
NC_GRP_INFO_T *g;
int k;
for (g = grp; g && !finished; g = g->parent)
{
NC_DIM_INFO_T *dim1;
NC_HDF5_DIM_INFO_T *hdf5_dim1;
for (k = 0; k < ncindexsize(g->dim); k++)
for (size_t k = 0; k < ncindexsize(g->dim); k++)
{
dim1 = (NC_DIM_INFO_T *)ncindexith(g->dim, k);
assert(dim1 && dim1->format_dim_info);
@ -542,9 +539,8 @@ static int
close_gatts(NC_GRP_INFO_T *grp)
{
NC_ATT_INFO_T *att;
int a;
for (a = 0; a < ncindexsize(grp->att); a++)
for (size_t a = 0; a < ncindexsize(grp->att); a++)
{
att = (NC_ATT_INFO_T *)ncindexith(grp->att, a);
assert(att && att->format_att_info);
@ -595,9 +591,8 @@ close_vars(NC_GRP_INFO_T *grp)
NC_VAR_INFO_T *var;
NC_HDF5_VAR_INFO_T *hdf5_var;
NC_ATT_INFO_T *att;
int a, i;
for (i = 0; i < ncindexsize(grp->vars); i++)
for (size_t i = 0; i < ncindexsize(grp->vars); i++)
{
var = (NC_VAR_INFO_T *)ncindexith(grp->vars, i);
assert(var && var->format_var_info);
@ -631,7 +626,7 @@ close_vars(NC_GRP_INFO_T *grp)
nc4_HDF5_close_type(var->type_info);
}
for (a = 0; a < ncindexsize(var->att); a++)
for (size_t a = 0; a < ncindexsize(var->att); a++)
{
att = (NC_ATT_INFO_T *)ncindexith(var->att, a);
assert(att && att->format_att_info);
@ -669,9 +664,8 @@ static int
close_dims(NC_GRP_INFO_T *grp)
{
NC_DIM_INFO_T *dim;
int i;
for (i = 0; i < ncindexsize(grp->dim); i++)
for (size_t i = 0; i < ncindexsize(grp->dim); i++)
{
NC_HDF5_DIM_INFO_T *hdf5_dim;
@ -704,9 +698,7 @@ close_dims(NC_GRP_INFO_T *grp)
static int
close_types(NC_GRP_INFO_T *grp)
{
int i;
for (i = 0; i < ncindexsize(grp->type); i++)
for (size_t i = 0; i < ncindexsize(grp->type); i++)
{
NC_TYPE_INFO_T *type;
@ -764,7 +756,6 @@ int
nc4_rec_grp_HDF5_del(NC_GRP_INFO_T *grp)
{
NC_HDF5_GRP_INFO_T *hdf5_grp;
int i;
int retval;
assert(grp && grp->format_grp_info);
@ -774,7 +765,7 @@ nc4_rec_grp_HDF5_del(NC_GRP_INFO_T *grp)
/* Recursively call this function for each child, if any, stopping
* if there is an error. */
for (i = 0; i < ncindexsize(grp->children); i++)
for (size_t i = 0; i < ncindexsize(grp->children); i++)
if ((retval = nc4_rec_grp_HDF5_del((NC_GRP_INFO_T *)ncindexith(grp->children,
i))))
return retval;
@ -905,7 +896,7 @@ nc4_hdf5_find_grp_var_att(int ncid, int varid, const char *name, int attnum,
}
else
{
if (!(my_var = (NC_VAR_INFO_T *)ncindexith(my_grp->vars, varid)))
if (!(my_var = (NC_VAR_INFO_T *)ncindexith(my_grp->vars, (size_t)varid)))
return NC_ENOTVAR;
/* Do we need to read the var attributes? */
@ -935,7 +926,7 @@ nc4_hdf5_find_grp_var_att(int ncid, int varid, const char *name, int attnum,
if (att)
{
my_att = use_name ? (NC_ATT_INFO_T *)ncindexlookup(attlist, my_norm_name) :
(NC_ATT_INFO_T *)ncindexith(attlist, attnum);
(NC_ATT_INFO_T *)ncindexith(attlist, (size_t)attnum);
if (!my_att)
return NC_ENOTATT;
}

View File

@ -20,11 +20,11 @@
#include "ncpathmgr.h"
#include <stddef.h>
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
#include "H5FDhttp.h"
#endif
#ifdef ENABLE_HDF5_ROS3
#ifdef NETCDF_ENABLE_HDF5_ROS3
#include <H5FDros3.h>
#include "ncs3sdk.h"
#endif
@ -53,7 +53,7 @@ static const nc_type nc_type_constant_g[NUM_TYPES] = {NC_CHAR, NC_BYTE, NC_SHORT
NC_UINT64, NC_STRING};
/** @internal NetCDF atomic type sizes. */
static const int nc_type_size_g[NUM_TYPES] = {sizeof(char), sizeof(char), sizeof(short),
static const size_t nc_type_size_g[NUM_TYPES] = {sizeof(char), sizeof(char), sizeof(short),
sizeof(int), sizeof(float), sizeof(double), sizeof(unsigned char),
sizeof(unsigned short), sizeof(unsigned int), sizeof(long long),
sizeof(unsigned long long), sizeof(char *)};
@ -451,11 +451,10 @@ create_phony_dims(NC_GRP_INFO_T *grp, hid_t hdf_datasetid, NC_VAR_INFO_T *var)
* unless there already is one the correct size. */
for (d = 0; d < var->ndims; d++)
{
int k;
int match = 0;
/* Is there already a phony dimension of the correct size? */
for (k = 0; k < ncindexsize(grp->dim); k++)
for (size_t k = 0; k < ncindexsize(grp->dim); k++)
{
dim = (NC_DIM_INFO_T *)ncindexith(grp->dim, k);
assert(dim);
@ -535,19 +534,18 @@ rec_match_dimscales(NC_GRP_INFO_T *grp)
NC_VAR_INFO_T *var;
NC_DIM_INFO_T *dim;
int retval = NC_NOERR;
int i;
assert(grp && grp->hdr.name);
LOG((4, "%s: grp->hdr.name %s", __func__, grp->hdr.name));
/* Perform var dimscale match for child groups. */
for (i = 0; i < ncindexsize(grp->children); i++)
for (size_t i = 0; i < ncindexsize(grp->children); i++)
if ((retval = rec_match_dimscales((NC_GRP_INFO_T *)ncindexith(grp->children, i))))
return retval;
/* Check all the vars in this group. If they have dimscale info,
* try and find a dimension for them. */
for (i = 0; i < ncindexsize(grp->vars); i++)
for (size_t i = 0; i < ncindexsize(grp->vars); i++)
{
NC_HDF5_VAR_INFO_T *hdf5_var;
int d;
@ -578,7 +576,6 @@ rec_match_dimscales(NC_GRP_INFO_T *grp)
if (!hdf5_var->dimscale)
{
int d;
int j;
/* Are there dimscales for this variable? */
if (hdf5_var->dimscale_hdf5_objids)
@ -600,7 +597,7 @@ rec_match_dimscales(NC_GRP_INFO_T *grp)
for (g = grp; g && !finished; g = g->parent)
{
/* Check all dims in this group. */
for (j = 0; j < ncindexsize(g->dim); j++)
for (size_t j = 0; j < ncindexsize(g->dim); j++)
{
/* Get the HDF5 specific dim info. */
NC_HDF5_DIM_INFO_T *hdf5_dim;
@ -743,7 +740,7 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid)
h5 = (NC_HDF5_FILE_INFO_T*)nc4_info->format_file_info;
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
/* Do path as URL processing */
ncuriparse(path,&h5->uri);
if(h5->uri != NULL) {
@ -755,7 +752,7 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid)
parameters = NULL; /* kill off parallel */
}
}
#endif /*ENABLE_BYTERANGE*/
#endif /*NETCDF_ENABLE_BYTERANGE*/
nc4_info->mem.inmemory = ((mode & NC_INMEMORY) == NC_INMEMORY);
nc4_info->mem.diskless = ((mode & NC_DISKLESS) == NC_DISKLESS);
@ -836,7 +833,7 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid)
{
NCglobalstate* gs = NC_getglobalstate();
if(gs->alignment.defined) {
if (H5Pset_alignment(fapl_id, gs->alignment.threshold, gs->alignment.alignment) < 0) {
if (H5Pset_alignment(fapl_id, (hsize_t)gs->alignment.threshold, (hsize_t)gs->alignment.alignment) < 0) {
BAIL(NC_EHDFERR);
}
}
@ -882,10 +879,10 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid)
if ((h5->hdfid = nc4_H5Fopen(path, flags, fapl_id)) < 0)
BAIL(NC_EHDFERR);
}
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
else if(h5->byterange) { /* Arrange to use the byte-range drivers */
char* newpath = NULL;
#ifdef ENABLE_HDF5_ROS3
#ifdef NETCDF_ENABLE_HDF5_ROS3
H5FD_ros3_fapl_t fa;
const char* awsaccessid0 = NULL;
const char* awssecretkey0 = NULL;
@ -1077,7 +1074,6 @@ static int get_filter_info(hid_t propid, NC_VAR_INFO_T *var)
int num_filters;
unsigned int* cd_values = NULL;
size_t cd_nelems;
int f;
int stat = NC_NOERR;
NC_HDF5_VAR_INFO_T *hdf5_var;
int varsized = 0;
@ -1094,7 +1090,7 @@ static int get_filter_info(hid_t propid, NC_VAR_INFO_T *var)
it has filters defined, suppress the variable. */
varsized = NC4_var_varsized(var);
for (f = 0; f < num_filters; f++)
for (unsigned int f = 0; f < num_filters; f++)
{
htri_t avail = -1;
unsigned flags = 0;
@ -1342,10 +1338,9 @@ get_chunking_info(hid_t propid, NC_VAR_INFO_T *var)
* @author Ed Hartnett, Dennis Heimbigner
*/
static int
get_attached_info(NC_VAR_INFO_T *var, NC_HDF5_VAR_INFO_T *hdf5_var, int ndims,
get_attached_info(NC_VAR_INFO_T *var, NC_HDF5_VAR_INFO_T *hdf5_var, size_t ndims,
hid_t datasetid)
{
int d;
int num_scales = 0;
LOG((4, "%s ndims %d datasetid %ld", __func__, ndims, datasetid));
@ -1374,7 +1369,7 @@ get_attached_info(NC_VAR_INFO_T *var, NC_HDF5_VAR_INFO_T *hdf5_var, int ndims,
/* Store id information allowing us to match hdf5 dimscales to
* netcdf dimensions. */
for (d = 0; d < var->ndims; d++)
for (unsigned int d = 0; d < var->ndims; d++)
{
LOG((4, "about to iterate scales for dim %d", d));
if (H5DSiterate_scales(hdf5_var->hdf_datasetid, d, NULL, dimscale_visitor,
@ -1409,7 +1404,7 @@ get_attached_info(NC_VAR_INFO_T *var, NC_HDF5_VAR_INFO_T *hdf5_var, int ndims,
*/
static int
get_scale_info(NC_GRP_INFO_T *grp, NC_DIM_INFO_T *dim, NC_VAR_INFO_T *var,
NC_HDF5_VAR_INFO_T *hdf5_var, int ndims, hid_t datasetid)
NC_HDF5_VAR_INFO_T *hdf5_var, size_t ndims, hid_t datasetid)
{
int retval;
@ -1575,7 +1570,7 @@ read_var(NC_GRP_INFO_T *grp, hid_t datasetid, const char *obj_name,
finalname = strdup(obj_name);
/* Add a variable to the end of the group's var list. */
if ((retval = nc4_var_list_add(grp, finalname, ndims, &var)))
if ((retval = nc4_var_list_add(grp, finalname, (int)ndims, &var)))
BAIL(retval);
/* Add storage for HDF5-specific var info. */
@ -1831,7 +1826,7 @@ read_hdf5_att(NC_GRP_INFO_T *grp, hid_t attid, NC_ATT_INFO_T *att)
if (att_ndims == 0 && att_npoints == 0)
dims[0] = 0;
else if (att->nc_typeid == NC_STRING)
dims[0] = att_npoints;
dims[0] = (hsize_t)att_npoints;
else if (att->nc_typeid == NC_CHAR)
{
/* NC_CHAR attributes are written as a scalar in HDF5, of type
@ -1845,7 +1840,7 @@ read_hdf5_att(NC_GRP_INFO_T *grp, hid_t attid, NC_ATT_INFO_T *att)
{
/* This is really a string type! */
att->nc_typeid = NC_STRING;
dims[0] = att_npoints;
dims[0] = (hsize_t)att_npoints;
}
}
else
@ -2139,7 +2134,7 @@ read_type(NC_GRP_INFO_T *grp, hid_t hdf_typeid, char *type_name)
return NC_EHDFERR;
for (d = 0; d < ndims; d++)
dim_size[d] = dims[d];
dim_size[d] = (int)dims[d];
/* What is the netCDF typeid of this member? */
if ((retval = get_netcdf_type(grp->nc4_info, H5Tget_super(member_hdf_typeid),
@ -2224,7 +2219,6 @@ read_type(NC_GRP_INFO_T *grp, hid_t hdf_typeid, char *type_name)
hid_t base_hdf_typeid;
nc_type base_nc_type = NC_NAT;
void *value;
int i;
char *member_name = NULL;
#ifdef JNA
char jna[1001];
@ -2261,7 +2255,7 @@ read_type(NC_GRP_INFO_T *grp, hid_t hdf_typeid, char *type_name)
return NC_ENOMEM;
/* Read each name and value defined in the enum. */
for (i = 0; i < nmembers; i++)
for (unsigned int i = 0; i < nmembers; i++)
{
/* Get the name and value from HDF5. */
if (!(member_name = H5Tget_member_name(hdf_typeid, i)))
@ -2466,7 +2460,7 @@ read_scale(NC_GRP_INFO_T *grp, hid_t datasetid, const char *obj_name,
htri_t attr_exists = -1; /* Flag indicating hidden attribute exists */
hid_t attid = -1; /* ID of hidden attribute (to store dim ID) */
int dimscale_created = 0; /* Remember if a dimension was created (for error recovery) */
short initial_next_dimid = grp->nc4_info->next_dimid;/* Retain for error recovery */
int initial_next_dimid = grp->nc4_info->next_dimid;/* Retain for error recovery */
size_t len = 0;
int too_long = NC_FALSE;
int assigned_id = -1;
@ -2638,7 +2632,7 @@ read_dataset(NC_GRP_INFO_T *grp, hid_t datasetid, const char *obj_name,
* unless this is one of those funny dimscales that are a
* dimension in netCDF but not a variable. (Spooky!) */
if (!dim || (dim && !hdf5_dim->hdf_dimscaleid))
if ((retval = read_var(grp, datasetid, obj_name, ndims, dim)))
if ((retval = read_var(grp, datasetid, obj_name, (size_t)ndims, dim)))
BAIL(retval);
exit:

View File

@ -481,7 +481,7 @@ NC4_get_vlen_element(int ncid, int typeid1, const void *vlen_element,
size_t *len, void *data)
{
const nc_vlen_t *tmp = (nc_vlen_t*)vlen_element;
int type_size = 4;
const size_t type_size = 4;
*len = tmp->len;
memcpy(data, tmp->p, tmp->len * type_size);

View File

@ -508,7 +508,7 @@ nc_def_var_extra(int ncid, int varid, int *shuffle, int *unused1,
return NC_EPERM;
/* Find the var. */
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, varid)))
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, (size_t)varid)))
return NC_ENOTVAR;
assert(var && var->hdr.id == varid);
@ -1077,7 +1077,7 @@ nc_def_var_chunking_ints(int ncid, int varid, int storage, int *chunksizesp)
/* Copy to size_t array. */
for (i = 0; i < var->ndims; i++)
cs[i] = chunksizesp[i];
cs[i] = (size_t)chunksizesp[i];
retval = nc_def_var_extra(ncid, varid, NULL, NULL, NULL, NULL,
&storage, cs, NULL, NULL, NULL, NULL, NULL);
@ -1217,7 +1217,7 @@ NC4_rename_var(int ncid, int varid, const char *name)
return retval;
/* Get the variable wrt varid */
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, varid)))
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, (size_t)varid)))
return NC_ENOTVAR;
/* Check if new name is in use; note that renaming to same name is
@ -1257,7 +1257,6 @@ NC4_rename_var(int ncid, int varid, const char *name)
there. */
if (var->created)
{
int v;
char *hdf5_name; /* Dataset will be renamed to this. */
hdf5_name = use_secret_name ? var->alt_name: (char *)name;
@ -1291,7 +1290,7 @@ NC4_rename_var(int ncid, int varid, const char *name)
* and we have just changed that for this var. We must do the
* same for all vars with a > varid, so that the creation order
* will continue to be correct. */
for (v = var->hdr.id + 1; v < ncindexsize(grp->vars); v++)
for (size_t v = (size_t)var->hdr.id + 1; v < ncindexsize(grp->vars); v++)
{
NC_VAR_INFO_T *my_var;
my_var = (NC_VAR_INFO_T *)ncindexith(grp->vars, v);
@ -1597,7 +1596,7 @@ NC4_put_vars(int ncid, int varid, const size_t *startp, const size_t *countp,
start[i] = startp[i];
count[i] = countp ? countp[i] : var->dim[i]->len;
stride[i] = stridep ? stridep[i] : 1;
stride[i] = stridep ? (hsize_t)stridep[i] : 1;
ones[i] = 1;
LOG((4, "start[%d] %ld count[%d] %ld stride[%d] %ld", i, start[i], i, count[i], i, stride[i]));
@ -1660,7 +1659,7 @@ NC4_put_vars(int ncid, int varid, const size_t *startp, const size_t *countp,
/* Create a space for the memory, just big enough to hold the slab
we want. */
if ((mem_spaceid = H5Screate_simple(var->ndims, count, NULL)) < 0)
if ((mem_spaceid = H5Screate_simple((int)var->ndims, count, NULL)) < 0)
BAIL(NC_EHDFERR);
}
@ -1936,7 +1935,7 @@ NC4_get_vars(int ncid, int varid, const size_t *startp, const size_t *countp,
start[i] = startp[i];
count[i] = countp[i];
stride[i] = stridep ? stridep[i] : 1;
stride[i] = stridep ? (hsize_t)stridep[i] : 1;
ones[i] = 1;
/* if any of the count values are zero don't actually read. */
@ -2079,7 +2078,7 @@ NC4_get_vars(int ncid, int varid, const size_t *startp, const size_t *countp,
/* Create a space for the memory, just big enough to hold the slab
we want. */
if ((mem_spaceid = H5Screate_simple(var->ndims, count, NULL)) < 0)
if ((mem_spaceid = H5Screate_simple((int)var->ndims, count, NULL)) < 0)
BAIL(NC_EHDFERR);
}
@ -2366,7 +2365,7 @@ NC4_HDF5_set_var_chunk_cache(int ncid, int varid, size_t size, size_t nelems,
assert(grp && h5);
/* Find the var. */
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, varid)))
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, (size_t)varid)))
return NC_ENOTVAR;
assert(var && var->hdr.id == varid);
@ -2410,7 +2409,7 @@ nc_set_var_chunk_cache_ints(int ncid, int varid, int size, int nelems,
real_size = ((size_t) size) * MEGABYTE;
if (nelems >= 0)
real_nelems = nelems;
real_nelems = (size_t)nelems;
if (preemption >= 0)
real_preemption = (float)(preemption / 100.);

View File

@ -43,13 +43,12 @@ static int
flag_atts_dirty(NCindex *attlist) {
NC_ATT_INFO_T *att = NULL;
int i;
if(attlist == NULL) {
return NC_NOERR;
}
for(i=0;i<ncindexsize(attlist);i++) {
for(size_t i=0;i<ncindexsize(attlist);i++) {
att = (NC_ATT_INFO_T*)ncindexith(attlist,i);
if(att == NULL) continue;
att->dirty = NC_TRUE;
@ -79,7 +78,7 @@ rec_reattach_scales(NC_GRP_INFO_T *grp, int dimid, hid_t dimscaleid)
{
NC_VAR_INFO_T *var;
NC_GRP_INFO_T *child_grp;
int d, i;
size_t i;
int retval;
assert(grp && grp->hdr.name && dimid >= 0 && dimscaleid >= 0);
@ -104,7 +103,7 @@ rec_reattach_scales(NC_GRP_INFO_T *grp, int dimid, hid_t dimscaleid)
hdf5_var = (NC_HDF5_VAR_INFO_T*)var->format_var_info;
assert(hdf5_var != NULL);
for (d = 0; d < var->ndims; d++)
for (unsigned int d = 0; d < var->ndims; d++)
{
if (var->dimids[d] == dimid && !hdf5_var->dimscale)
{
@ -144,7 +143,7 @@ rec_detach_scales(NC_GRP_INFO_T *grp, int dimid, hid_t dimscaleid)
{
NC_VAR_INFO_T *var;
NC_GRP_INFO_T *child_grp;
int d, i;
size_t i;
int retval;
assert(grp && grp->hdr.name && dimid >= 0 && dimscaleid >= 0);
@ -166,7 +165,7 @@ rec_detach_scales(NC_GRP_INFO_T *grp, int dimid, hid_t dimscaleid)
assert(var && var->format_var_info);
hdf5_var = (NC_HDF5_VAR_INFO_T *)var->format_var_info;
for (d = 0; d < var->ndims; d++)
for (unsigned int d = 0; d < var->ndims; d++)
{
if (var->dimids[d] == dimid && !hdf5_var->dimscale)
{
@ -208,7 +207,7 @@ nc4_open_var_grp2(NC_GRP_INFO_T *grp, int varid, hid_t *dataset)
assert(grp && grp->format_grp_info && dataset);
/* Find the requested varid. */
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, varid)))
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, (size_t)varid)))
return NC_ENOTVAR;
assert(var && var->hdr.id == varid && var->format_var_info);
hdf5_var = (NC_HDF5_VAR_INFO_T *)var->format_var_info;
@ -461,7 +460,7 @@ put_att_grpa(NC_GRP_INFO_T *grp, int varid, NC_ATT_INFO_T *att)
/* Get the length ready, and find the HDF type we'll be
* writing. */
dims[0] = att->len;
dims[0] = (hsize_t)att->len;
if ((retval = nc4_get_hdf_typeid(grp->nc4_info, att->nc_typeid,
&file_typeid, 0)))
BAIL(retval);
@ -609,9 +608,8 @@ write_attlist(NCindex *attlist, int varid, NC_GRP_INFO_T *grp)
{
NC_ATT_INFO_T *att;
int retval;
int i;
for(i = 0; i < ncindexsize(attlist); i++)
for(size_t i = 0; i < ncindexsize(attlist); i++)
{
att = (NC_ATT_INFO_T *)ncindexith(attlist, i);
assert(att);
@ -898,19 +896,16 @@ var_create_dataset(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var, nc_bool_t write_dimid
if(H5Pset_shuffle(plistid) < 0)
BAIL(NC_EHDFERR);
} else if(fi->filterid == H5Z_FILTER_DEFLATE) {/* Handle zip case here */
unsigned level;
if(fi->nparams != 1)
BAIL(NC_EFILTER);
level = (int)fi->params[0];
unsigned int level = fi->params[0];
if(H5Pset_deflate(plistid, level) < 0)
BAIL(NC_EFILTER);
} else if(fi->filterid == H5Z_FILTER_SZIP) {/* Handle szip case here */
int options_mask;
int bits_per_pixel;
if(fi->nparams != 2)
BAIL(NC_EFILTER);
options_mask = (int)fi->params[0];
bits_per_pixel = (int)fi->params[1];
unsigned int options_mask = fi->params[0];
unsigned int bits_per_pixel = fi->params[1];
if(H5Pset_szip(plistid, options_mask, bits_per_pixel) < 0)
BAIL(NC_EFILTER);
} else {
@ -970,8 +965,8 @@ var_create_dataset(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var, nc_bool_t write_dimid
if (dim->unlimited)
chunksize[d] = 1;
else
chunksize[d] = pow((double)DEFAULT_CHUNK_SIZE/type_size,
1/(double)(var->ndims - unlimdim));
chunksize[d] = (hsize_t)pow(DEFAULT_CHUNK_SIZE/(double)type_size,
1/(double)((int)var->ndims - unlimdim));
/* If the chunksize is greater than the dim
* length, make it the dim length. */
@ -985,7 +980,7 @@ var_create_dataset(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var, nc_bool_t write_dimid
}
/* Create the dataspace. */
if ((spaceid = H5Screate_simple(var->ndims, dimsize, maxdimsize)) < 0)
if ((spaceid = H5Screate_simple((int)var->ndims, dimsize, maxdimsize)) < 0)
BAIL(NC_EHDFERR);
}
else
@ -1009,7 +1004,7 @@ var_create_dataset(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var, nc_bool_t write_dimid
}
else if (var->ndims)
{
if (H5Pset_chunk(plistid, var->ndims, chunksize) < 0)
if (H5Pset_chunk(plistid, (int)var->ndims, chunksize) < 0)
BAIL(NC_EHDFERR);
}
@ -1214,7 +1209,7 @@ commit_type(NC_GRP_INFO_T *grp, NC_TYPE_INFO_T *type)
hsize_t dims[NC_MAX_VAR_DIMS];
for (d = 0; d < field->ndims; d++)
dims[d] = field->dim_size[d];
dims[d] = (hsize_t)field->dim_size[d];
if ((hdf_typeid = H5Tarray_create1(hdf_base_typeid, field->ndims,
dims, NULL)) < 0)
{
@ -1420,10 +1415,9 @@ attach_dimscales(NC_GRP_INFO_T *grp)
{
NC_VAR_INFO_T *var;
NC_HDF5_VAR_INFO_T *hdf5_var;
int d, v;
/* Attach dimension scales. */
for (v = 0; v < ncindexsize(grp->vars); v++)
for (size_t v = 0; v < ncindexsize(grp->vars); v++)
{
/* Get pointer to var and HDF5-specific var info. */
var = (NC_VAR_INFO_T *)ncindexith(grp->vars, v);
@ -1436,7 +1430,7 @@ attach_dimscales(NC_GRP_INFO_T *grp)
continue;
/* Find the scale for each dimension, if any, and attach it. */
for (d = 0; d < var->ndims; d++)
for (unsigned int d = 0; d < var->ndims; d++)
{
/* Is there a dimscale for this dimension? */
if (hdf5_var->dimscale_attached)
@ -1660,8 +1654,6 @@ write_var(NC_VAR_INFO_T *var, NC_GRP_INFO_T *grp, nc_bool_t write_dimid)
* and delete dimscale attributes from the var. */
if (var->was_coord_var && hdf5_var->dimscale_attached)
{
int d;
/* If the variable already exists in the file, Remove any dimension scale
* attributes from it, if they exist. */
if (var->created)
@ -1669,7 +1661,7 @@ write_var(NC_VAR_INFO_T *var, NC_GRP_INFO_T *grp, nc_bool_t write_dimid)
return retval;
/* If this is a regular var, detach all its dim scales. */
for (d = 0; d < var->ndims; d++)
for (unsigned int d = 0; d < var->ndims; d++)
{
if (hdf5_var->dimscale_attached[d])
{
@ -1941,10 +1933,9 @@ nc4_rec_write_metadata(NC_GRP_INFO_T *grp, nc_bool_t bad_coord_order)
NC_VAR_INFO_T *var = NULL;
NC_GRP_INFO_T *child_grp = NULL;
int coord_varid = -1;
int var_index = 0;
int dim_index = 0;
size_t var_index = 0;
size_t dim_index = 0;
int retval;
int i;
assert(grp && grp->hdr.name &&
((NC_HDF5_GRP_INFO_T *)(grp->format_grp_info))->hdf_grpid);
@ -2003,7 +1994,7 @@ nc4_rec_write_metadata(NC_GRP_INFO_T *grp, nc_bool_t bad_coord_order)
}
/* If there are any child groups, write their metadata. */
for (i = 0; i < ncindexsize(grp->children); i++)
for (size_t i = 0; i < ncindexsize(grp->children); i++)
{
child_grp = (NC_GRP_INFO_T *)ncindexith(grp->children, i);
assert(child_grp);
@ -2029,7 +2020,7 @@ nc4_rec_write_groups_types(NC_GRP_INFO_T *grp)
NC_HDF5_GRP_INFO_T *hdf5_grp;
NC_TYPE_INFO_T *type;
int retval;
int i;
size_t i;
assert(grp && grp->hdr.name && grp->format_grp_info);
LOG((3, "%s: grp->hdr.name %s", __func__, grp->hdr.name));
@ -2085,7 +2076,7 @@ nc4_rec_match_dimscales(NC_GRP_INFO_T *grp)
NC_VAR_INFO_T *var;
NC_DIM_INFO_T *dim;
int retval = NC_NOERR;
int i;
size_t i;
assert(grp && grp->hdr.name);
LOG((4, "%s: grp->hdr.name %s", __func__, grp->hdr.name));
@ -2104,7 +2095,6 @@ nc4_rec_match_dimscales(NC_GRP_INFO_T *grp)
for (i = 0; i < ncindexsize(grp->vars); i++)
{
NC_HDF5_VAR_INFO_T *hdf5_var;
int ndims;
int d;
/* Get pointer to var and to the HDF5-specific var info. */
@ -2127,8 +2117,8 @@ nc4_rec_match_dimscales(NC_GRP_INFO_T *grp)
The solution I choose is to modify nc4_var_list_add to initialize dimids to
illegal values (-1). This is another example of the problems with dimscales.
*/
ndims = var->ndims;
for (d = 0; d < ndims; d++)
const size_t ndims = var->ndims;
for (size_t d = 0; d < ndims; d++)
{
if (var->dim[d] == NULL) {
nc4_find_dim(grp, var->dimids[d], &var->dim[d], NULL);
@ -2139,13 +2129,10 @@ nc4_rec_match_dimscales(NC_GRP_INFO_T *grp)
/* Skip dimension scale variables */
if (!hdf5_var->dimscale)
{
int d;
int j;
/* Are there dimscales for this variable? */
if (hdf5_var->dimscale_hdf5_objids)
{
for (d = 0; d < var->ndims; d++)
for (size_t d = 0; d < var->ndims; d++)
{
nc_bool_t finished = NC_FALSE;
LOG((5, "%s: var %s has dimscale info...", __func__, var->hdr.name));
@ -2154,7 +2141,7 @@ nc4_rec_match_dimscales(NC_GRP_INFO_T *grp)
for (g = grp; g && !finished; g = g->parent)
{
/* Check all dims in this group. */
for (j = 0; j < ncindexsize(g->dim); j++)
for (size_t j = 0; j < ncindexsize(g->dim); j++)
{
/* Get the HDF5 specific dim info. */
NC_HDF5_DIM_INFO_T *hdf5_dim;
@ -2243,19 +2230,18 @@ nc4_rec_match_dimscales(NC_GRP_INFO_T *grp)
* size. */
for (d = 0; d < var->ndims; d++)
{
int k;
int match;
nc_bool_t match = NC_FALSE;
/* Is there already a phony dimension of the correct size? */
for(match=-1,k=0;k<ncindexsize(grp->dim);k++) {
for(size_t k=0;k<ncindexsize(grp->dim);k++) {
if((dim = (NC_DIM_INFO_T*)ncindexith(grp->dim,k)) == NULL) continue;
if ((dim->len == h5dimlen[d]) &&
((h5dimlenmax[d] == H5S_UNLIMITED && dim->unlimited) ||
(h5dimlenmax[d] != H5S_UNLIMITED && !dim->unlimited)))
{match = k; break;}
{match = NC_TRUE; break;}
}
/* Didn't find a phony dim? Then create one. */
if (match < 0)
if (match == NC_FALSE)
{
char phony_dim_name[NC_MAX_NAME + 1];
snprintf(phony_dim_name, sizeof(phony_dim_name), "phony_dim_%d", grp->nc4_info->next_dimid);
@ -2350,7 +2336,6 @@ reportopenobjectsT(int uselog, hid_t fid, int ntypes, unsigned int* otypes)
{
int t,i;
ssize_t ocount;
size_t maxobjs = -1;
hid_t* idlist = NULL;
/* Always report somehow */
@ -2360,7 +2345,7 @@ reportopenobjectsT(int uselog, hid_t fid, int ntypes, unsigned int* otypes)
else
#endif
fprintf(stdout,"\nReport: open objects on %lld\n",(long long)fid);
maxobjs = H5Fget_obj_count(fid,H5F_OBJ_ALL);
size_t maxobjs = (size_t)H5Fget_obj_count(fid,H5F_OBJ_ALL);
if(idlist != NULL) free(idlist);
idlist = (hid_t*)malloc(sizeof(hid_t)*maxobjs);
for(t=0;t<ntypes;t++) {
@ -2573,7 +2558,7 @@ static int
NC4_walk(hid_t gid, int* countp)
{
int ncstat = NC_NOERR;
int i,j,na;
int j,na;
ssize_t len;
hsize_t nobj;
herr_t err;
@ -2585,12 +2570,12 @@ NC4_walk(hid_t gid, int* countp)
err = H5Gget_num_objs(gid, &nobj);
if(err < 0) return err;
for(i = 0; i < nobj; i++) {
for(hsize_t i = 0; i < nobj; i++) {
/* Get name & kind of object in the group */
len = H5Gget_objname_by_idx(gid,(hsize_t)i,name,(size_t)NC_HDF5_MAX_NAME);
if(len < 0) return len;
len = H5Gget_objname_by_idx(gid,i,name,(size_t)NC_HDF5_MAX_NAME);
if(len < 0) return (int)len;
otype = H5Gget_objtype_by_idx(gid,(size_t)i);
otype = H5Gget_objtype_by_idx(gid, i);
switch(otype) {
case H5G_GROUP:
grpid = H5Gopen1(gid,name);
@ -2608,8 +2593,7 @@ NC4_walk(hid_t gid, int* countp)
if(aid >= 0) {
const NC_reservedatt* ra;
ssize_t len = H5Aget_name(aid, NC_HDF5_MAX_NAME, name);
if(len < 0) return len;
/* Is this a netcdf-4 marker attribute */
if(len < 0) return (int)len;
/* Is this a netcdf-4 marker attribute */
ra = NC_findreserved(name);
if(ra != NULL)

View File

@ -723,7 +723,7 @@ NC4_image_init(NC_FILE_INFO_T* h5)
char file_name[64]; /* Filename buffer */
size_t alloc_incr; /* Buffer allocation increment */
size_t min_incr = 65536; /* Minimum buffer increment */
double buf_prcnt = 0.1f; /* Percentage of buffer size to set
double buf_prcnt = 0.1; /* Percentage of buffer size to set
as increment */
unsigned imageflags;
int create = 0;
@ -753,8 +753,8 @@ NC4_image_init(NC_FILE_INFO_T* h5)
/* set allocation increment to a percentage of the supplied buffer size, or
* a pre-defined minimum increment value, whichever is larger
*/
if ((buf_prcnt * h5->mem.memio.size) > min_incr)
alloc_incr = (size_t)(buf_prcnt * h5->mem.memio.size);
if ((size_t)(buf_prcnt * (double)h5->mem.memio.size) > min_incr)
alloc_incr = (size_t)(buf_prcnt * (double)h5->mem.memio.size);
else
alloc_incr = min_incr;

View File

@ -42,7 +42,7 @@ if(USE_HDF4)
)
endif()
if(ENABLE_DAP2)
if(NETCDF_ENABLE_DAP2)
target_sources(netcdf
PRIVATE
$<TARGET_OBJECTS:oc2>
@ -50,7 +50,7 @@ if(ENABLE_DAP2)
)
endif()
if(ENABLE_DAP4)
if(NETCDF_ENABLE_DAP4)
target_sources(netcdf
PRIVATE
$<TARGET_OBJECTS:dap4>
@ -58,21 +58,21 @@ if(ENABLE_DAP4)
)
endif()
if(ENABLE_NCZARR)
if(NETCDF_ENABLE_NCZARR)
target_sources(netcdf
PRIVATE
$<TARGET_OBJECTS:nczarr>
)
endif()
if(ENABLE_S3_INTERNAL)
if(NETCDF_ENABLE_S3_INTERNAL)
target_sources(netcdf
PRIVATE
$<TARGET_OBJECTS:ncxml>
)
endif()
if(ENABLE_PLUGINS)
if(NETCDF_ENABLE_PLUGINS)
target_sources(netcdf
PRIVATE
$<TARGET_OBJECTS:ncpoco>
@ -114,7 +114,7 @@ set(TLL_LIBS ${TLL_LIBS} ${HAVE_LIBM} ${ZLIB_LIBRARY})
# Add extra dependencies specified via NC_EXTRA_DEPS
set(TLL_LIBS ${TLL_LIBS} ${EXTRA_DEPS})
if(ENABLE_NCZARR_ZIP)
if(NETCDF_ENABLE_NCZARR_ZIP)
set(TLL_LIBS ${TLL_LIBS} ${Zip_LIBRARIES})
endif()
@ -135,7 +135,7 @@ if(HAVE_LIBDL)
set(TLL_LIBS ${LIBDL} ${TLL_LIBS})
endif()
if(ENABLE_NCZARR_ZIP)
if(NETCDF_ENABLE_NCZARR_ZIP)
set(TLL_LIBS ${TLL_LIBS} ${Zip_LIBRARIES})
endif()
@ -151,21 +151,21 @@ if(USE_HDF4)
set(TLL_LIBS ${HDF4_LIBRARIES} ${TLL_LIBS})
endif()
if(ENABLE_PNETCDF AND PNETCDF)
set(TLL_LIBS ${TLL_LIBS} ${PNETCDF})
if(NETCDF_ENABLE_PNETCDF)
SET(TLL_LIBS ${TLL_LIBS} PNETCDF::PNETCDF)
endif()
if(ENABLE_S3)
if(ENABLE_S3_AWS)
if(NETCDF_ENABLE_S3)
if(NETCDF_ENABLE_S3_AWS)
target_link_directories(netcdf PUBLIC ${AWSSDK_LIB_DIR})
set(TLL_LIBS ${AWSSDK_LINK_LIBRARIES} ${TLL_LIBS})
elseif(ENABLE_S3_INTERNAL)
elseif(NETCDF_ENABLE_S3_INTERNAL)
if(OPENSSL_FOUND)
set(TLL_LIBS ${OPENSSL_SSL_LIBRARIES} ${OPENSSL_CRYPTO_LIBRARIES} ${TLL_LIBS})
endif(OPENSSL_FOUND)
endif(ENABLE_S3_AWS)
endif(ENABLE_S3)
endif(NETCDF_ENABLE_S3_AWS)
endif(NETCDF_ENABLE_S3)
if(HAVE_LIBXML2)
set(TLL_LIBS ${TLL_LIBS} ${LIBXML2_LIBRARIES})
@ -188,16 +188,17 @@ target_link_libraries(netcdf PRIVATE ${TLL_LIBS})
set(CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES} ${TLL_LIBS})
if(MSVC)
set_target_properties(netcdf PROPERTIES
LINK_FLAGS_DEBUG " /NODEFAULTLIB:MSVCRT"
)
endif()
if(ENABLE_SHARED_LIBRARY_VERSION)
if(NETCDF_ENABLE_SHARED_LIBRARY_VERSION)
set_target_properties(netcdf PROPERTIES
VERSION ${netCDF_LIB_VERSION}
SOVERSION ${netCDF_SO_VERSION}
)
endif(ENABLE_SHARED_LIBRARY_VERSION)
endif(NETCDF_ENABLE_SHARED_LIBRARY_VERSION)
install(
TARGETS netcdf EXPORT netCDFTargets

View File

@ -53,25 +53,25 @@ libnetcdf_la_LIBADD += ${top_builddir}/libhdf4/libnchdf4.la
endif # USE_HDF4
# + dap
if ENABLE_DAP
if NETCDF_ENABLE_DAP
AM_CPPFLAGS += -I${top_srcdir}/libdap2 -I${top_srcdir}/oc
libnetcdf_la_LIBADD += ${top_builddir}/libdap2/libdap2.la
libnetcdf_la_LIBADD += ${top_builddir}/oc2/liboc.la
endif # ENABLE_DAP
endif # NETCDF_ENABLE_DAP
if ENABLE_DAP4
if NETCDF_ENABLE_DAP4
AM_CPPFLAGS += -I${top_srcdir}/libdap4
libnetcdf_la_LIBADD += ${top_builddir}/libdap4/libdap4.la
endif # ENABLE_DAP4
endif # NETCDF_ENABLE_DAP4
AM_CPPFLAGS += -I${top_srcdir}/libncxml
if ENABLE_DAP4
if NETCDF_ENABLE_DAP4
libnetcdf_la_LIBADD += ${top_builddir}/libncxml/libncxml.la
else
if ENABLE_S3_INTERNAL
if NETCDF_ENABLE_S3_INTERNAL
libnetcdf_la_LIBADD += ${top_builddir}/libncxml/libncxml.la
endif
endif # ENABLE_S3_INTERNAL || ENABLE_DAP4
endif # NETCDF_ENABLE_S3_INTERNAL || NETCDF_ENABLE_DAP4
# NetCDF-4 ...
if USE_NETCDF4
@ -79,18 +79,18 @@ AM_CPPFLAGS += -I${top_srcdir}/libsrc4
libnetcdf_la_LIBADD += ${top_builddir}/libsrc4/libnetcdf4.la
endif #USE_NETCDF4
if ENABLE_NCZARR
if NETCDF_ENABLE_NCZARR
AM_CPPFLAGS += -I${top_srcdir}/libnczarr
libnetcdf_la_LIBADD += ${top_builddir}/libnczarr/libnczarr.la
if ENABLE_S3_AWS
if NETCDF_ENABLE_S3_AWS
libnetcdf_la_LIBADD += ${aws_cpp_sdk_core_LIBS} ${aws_cpp_sdk_s3_LIBS}
endif
endif #ENABLE_NCZARR
endif #NETCDF_ENABLE_NCZARR
if ENABLE_PLUGINS
if NETCDF_ENABLE_PLUGINS
AM_CPPFLAGS += -I${top_srcdir}/libncpoco
libnetcdf_la_LIBADD += ${top_builddir}/libncpoco/libncpoco.la
endif #ENABLE_PLUGINS
endif #NETCDF_ENABLE_PLUGINS
if ISCYGWIN
# Force binary mode for file read/write

View File

@ -26,12 +26,12 @@ extern int NC_HDF5_initialize(void);
extern int NC_HDF5_finalize(void);
#endif
#ifdef ENABLE_DAP2
#ifdef NETCDF_ENABLE_DAP2
extern int NCD2_initialize(void);
extern int NCD2_finalize(void);
#endif
#ifdef ENABLE_DAP4
#ifdef NETCDF_ENABLE_DAP4
extern int NCD4_initialize(void);
extern int NCD4_finalize(void);
#endif
@ -59,7 +59,7 @@ EXTERNL int NC_s3sdkfinalize(void);
int NC_initialized = 0;
int NC_finalized = 1;
#ifdef ENABLE_ATEXIT_FINALIZE
#ifdef NETCDF_ENABLE_ATEXIT_FINALIZE
/* Provide the void function to give to atexit() */
static void
finalize_atexit(void)
@ -91,10 +91,10 @@ nc_initialize()
/* Initialize each active protocol */
if((stat = NC3_initialize())) goto done;
#ifdef ENABLE_DAP
#ifdef NETCDF_ENABLE_DAP
if((stat = NCD2_initialize())) goto done;
#endif
#ifdef ENABLE_DAP4
#ifdef NETCDF_ENABLE_DAP4
if((stat = NCD4_initialize())) goto done;
#endif
#ifdef USE_PNETCDF
@ -112,11 +112,11 @@ nc_initialize()
#ifdef ENABLE_S3
if((stat = NC_s3sdkinitialize())) goto done;
#endif
#ifdef ENABLE_NCZARR
#ifdef NETCDF_ENABLE_NCZARR
if((stat = NCZ_initialize())) goto done;
#endif
#ifdef ENABLE_ATEXIT_FINALIZE
#ifdef NETCDF_ENABLE_ATEXIT_FINALIZE
/* Use atexit() to invoke nc_finalize */
if(atexit(finalize_atexit))
fprintf(stderr,"atexit failed\n");
@ -147,10 +147,10 @@ nc_finalize(void)
/* Finalize each active protocol */
#ifdef ENABLE_DAP2
#ifdef NETCDF_ENABLE_DAP2
if((stat = NCD2_finalize())) failed = stat;
#endif
#ifdef ENABLE_DAP4
#ifdef NETCDF_ENABLE_DAP4
if((stat = NCD4_finalize())) failed = stat;
#endif
@ -170,7 +170,7 @@ nc_finalize(void)
if((stat = NC_HDF5_finalize())) failed = stat;
#endif
#ifdef ENABLE_NCZARR
#ifdef NETCDF_ENABLE_NCZARR
if((stat = NCZ_finalize())) failed = stat;
#endif

View File

@ -8,7 +8,7 @@ endif()
add_library(ncpoco OBJECT ${libncpoco_SOURCES})
if (ENABLE_DLL)
if (NETCDF_ENABLE_DLL)
target_compile_definitions(ncpoco PRIVATE DLL_NETCDF DLL_EXPORT)
endif()

View File

@ -19,7 +19,7 @@ else()
)
endif(HAVE_LIBXML2)
if (ENABLE_DLL)
if (NETCDF_ENABLE_DLL)
target_compile_definitions(ncxml PRIVATE DLL_NETCDF DLL_EXPORT)
endif()

View File

@ -11,7 +11,7 @@
include $(top_srcdir)/lib_flags.am
if ENABLE_LIBXML2
if NETCDF_ENABLE_LIBXML2
AM_CPPFLAGS += ${XML2FLAGS}
endif
@ -20,7 +20,7 @@ noinst_LTLIBRARIES = libncxml.la
libncxml_la_LIBADD =
libncxml_la_LDFLAGS =
if ENABLE_LIBXML2
if NETCDF_ENABLE_LIBXML2
libncxml_la_SOURCES = ncxml_xml2.c
else
AM_CXXFLAGS = -std=c++11

View File

@ -46,11 +46,11 @@ zfilter.h
zdebug.h
)
if(ENABLE_NCZARR_ZIP)
if(NETCDF_ENABLE_NCZARR_ZIP)
set(libnczarr_SOURCES ${libnczarr_SOURCES} zmap_zip.c)
endif()
if(ENABLE_S3)
if(NETCDF_ENABLE_S3)
set(libnczarr_SOURCES ${libnczarr_SOURCES} zmap_s3sdk.c)
endif()
@ -69,7 +69,7 @@ if(STATUS_PARALLEL)
target_link_libraries(nczarr PUBLIC MPI::MPI_C)
endif(STATUS_PARALLEL)
if (ENABLE_DLL)
if (NETCDF_ENABLE_DLL)
target_compile_definitions(nczarr PRIVATE DLL_NETCDF DLL_EXPORT)
endif()

View File

@ -15,7 +15,7 @@ AM_CXXFLAGS =
libnczarr_la_LIBADD =
libnczarr_la_LDFLAGS =
# Remember ENABLE_NCZARR implies USE_NETCDF4
# Remember NETCDF_ENABLE_NCZARR implies USE_NETCDF4
# We may have to add to these later.
DISTCLEANFILES =
@ -64,17 +64,17 @@ zprovenance.h \
zfilter.h \
zdebug.h
if ENABLE_NCZARR_ZIP
if NETCDF_ENABLE_NCZARR_ZIP
libnczarr_la_SOURCES += zmap_zip.c
endif
if ENABLE_NCZARR_FILTERS
if NETCDF_ENABLE_NCZARR_FILTERS
libnczarr_la_SOURCES += zfilter.c
endif
if ENABLE_S3
if NETCDF_ENABLE_S3
libnczarr_la_SOURCES += zmap_s3sdk.c
if ENABLE_S3_AWS
if NETCDF_ENABLE_S3_AWS
AM_CXXFLAGS += -std=c++11
endif
endif

View File

@ -102,7 +102,7 @@ ncz_get_att_special(NC_FILE_INFO_T* h5, NC_VAR_INFO_T* var, const char* name,
/* Handle the per-var case(s) first */
if(var != NULL) {
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
if(strcmp(name,NC_ATT_CODECS)==0) {
NClist* filters = (NClist*)var->filters;

View File

@ -161,7 +161,7 @@ NCZ_zclose_var1(NC_VAR_INFO_T* var)
nullfree(zatt);
att->format_att_info = NULL; /* avoid memory errors */
}
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
/* Reclaim filters */
if(var->filters != NULL) {
(void)NCZ_filter_freelists(var);

View File

@ -157,7 +157,7 @@ NCZ_show_metadata(int ncid)
return NC_NOERR;
}
#ifndef ENABLE_NCZARR_FILTERS
#ifndef NETCDF_ENABLE_NCZARR_FILTERS
int
NCZ_def_var_filter(int ncid, int varid, unsigned int id , size_t n , const unsigned int *params)
{
@ -199,7 +199,7 @@ NCZ_inq_filter_avail(int ncid, unsigned id)
}
#endif /*ENABLE_NCZARR_FILTERS*/
#endif /*NETCDF_ENABLE_NCZARR_FILTERS*/
/**************************************************/
/* Following functions call into libsrc4 */

View File

@ -523,7 +523,7 @@ nc_var_filter_remove(int ncid, int varid, unsigned int filterid)
}
#endif
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
int
NCZ_def_var_filter(int ncid, int varid, unsigned int id, size_t nparams,
const unsigned int* params)
@ -743,7 +743,7 @@ done:
return ZUNTRACE(stat);
}
#endif /*ENABLE_NCZARR_FILTERS*/
#endif /*NETCDF_ENABLE_NCZARR_FILTERS*/
/**************************************************/
/* Filter application functions */
@ -760,7 +760,7 @@ NCZ_filter_initialize(void)
codec_defaults = nclistnew();
NCZ_filter_initialized = 1;
memset(loaded_plugins,0,sizeof(loaded_plugins));
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
if((stat = NCZ_load_all_plugins())) goto done;
#endif
@ -775,7 +775,7 @@ NCZ_filter_finalize(void)
int i;
ZTRACE(6,"");
if(!NCZ_filter_initialized) goto done;
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
/* Reclaim all loaded filters */
#ifdef DEBUGL
fprintf(stderr,">>> DEBUGL: finalize reclaim:\n");

View File

@ -82,10 +82,10 @@ NCZ_finalize_internal(void)
{
/* Reclaim global resources */
ncz_initialized = 0;
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
NCZ_filter_finalize();
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
NCZ_s3finalize();
#endif
return NC_NOERR;

View File

@ -19,10 +19,10 @@ nczmap_features(NCZM_IMPL impl)
{
switch (impl) {
case NCZM_FILE: return zmap_file.features;
#ifdef ENABLE_NCZARR_ZIP
#ifdef NETCDF_ENABLE_NCZARR_ZIP
case NCZM_ZIP: return zmap_zip.features;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case NCZM_S3: return zmap_s3sdk.features;
#endif
default: break;
@ -52,13 +52,13 @@ nczmap_create(NCZM_IMPL impl, const char *path, int mode, size64_t flags, void*
stat = zmap_file.create(path, mode, flags, parameters, &map);
if(stat) goto done;
break;
#ifdef ENABLE_NCZARR_ZIP
#ifdef NETCDF_ENABLE_NCZARR_ZIP
case NCZM_ZIP:
stat = zmap_zip.create(path, mode, flags, parameters, &map);
if(stat) goto done;
break;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case NCZM_S3:
stat = zmap_s3sdk.create(path, mode, flags, parameters, &map);
if(stat) goto done;
@ -90,13 +90,13 @@ nczmap_open(NCZM_IMPL impl, const char *path, int mode, size64_t flags, void* pa
stat = zmap_file.open(path, mode, flags, parameters, &map);
if(stat) goto done;
break;
#ifdef ENABLE_NCZARR_ZIP
#ifdef NETCDF_ENABLE_NCZARR_ZIP
case NCZM_ZIP:
stat = zmap_zip.open(path, mode, flags, parameters, &map);
if(stat) goto done;
break;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case NCZM_S3:
stat = zmap_s3sdk.open(path, mode, flags, parameters, &map);
if(stat) goto done;
@ -122,12 +122,12 @@ nczmap_truncate(NCZM_IMPL impl, const char *path)
case NCZM_FILE:
if((stat = zmap_file.truncate(path))) goto done;
break;
#ifdef ENABLE_NCZARR_ZIP
#ifdef NETCDF_ENABLE_NCZARR_ZIP
case NCZM_ZIP:
if((stat = zmap_zip.truncate(path))) goto done;
break;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case NCZM_S3:
if((stat = zmap_s3sdk.truncate(path))) goto done;
break;

View File

@ -217,10 +217,10 @@ extern NCZMAP_DS_API zmap_file;
#ifdef USE_HDF5
extern NCZMAP_DS_API zmap_nz4;
#endif
#ifdef ENABLE_NCZARR_ZIP
#ifdef NETCDF_ENABLE_NCZARR_ZIP
extern NCZMAP_DS_API zmap_zip;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
extern NCZMAP_DS_API zmap_s3sdk;
#endif
@ -323,7 +323,7 @@ EXTERNL int nczmap_close(NCZMAP* map, int deleteit);
EXTERNL int nczmap_create(NCZM_IMPL impl, const char *path, int mode, size64_t constraints, void* parameters, NCZMAP** mapp);
EXTERNL int nczmap_open(NCZM_IMPL impl, const char *path, int mode, size64_t constraints, void* parameters, NCZMAP** mapp);
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
EXTERNL void NCZ_s3finalize(void);
#endif

View File

@ -296,7 +296,7 @@ ncz_sync_var_meta(NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, int isclose)
int purezarr = 0;
size64_t shape[NC_MAX_VAR_DIMS];
NCZ_VAR_INFO_T* zvar = var->format_var_info;
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
NClist* filterchain = NULL;
NCjson* jfilter = NULL;
#endif
@ -316,7 +316,7 @@ ncz_sync_var_meta(NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, int isclose)
if((stat = NCZ_adjust_var_cache(var))) goto done;
/* rebuild the fill chunk */
if((stat = NCZ_ensure_fill_chunk(zvar->cache))) goto done;
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
/* Build the filter working parameters for any filters */
if((stat = NCZ_filter_setup(var))) goto done;
#endif
@ -413,7 +413,7 @@ ncz_sync_var_meta(NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, int isclose)
/* From V2 Spec: A JSON object identifying the primary compression codec and providing
configuration parameters, or ``null`` if no compressor is to be used. */
if((stat = NCJaddstring(jvar,NCJ_STRING,"compressor"))) goto done;
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
filterchain = (NClist*)var->filters;
if(nclistlength(filterchain) > 0) {
struct NCZ_Filter* filter = (struct NCZ_Filter*)nclistget(filterchain,nclistlength(filterchain)-1);
@ -435,7 +435,7 @@ ncz_sync_var_meta(NC_FILE_INFO_T* file, NC_VAR_INFO_T* var, int isclose)
/* A list of JSON objects providing codec configurations, or ``null``
if no filters are to be applied. */
if((stat = NCJaddstring(jvar,NCJ_STRING,"filters"))) goto done;
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
if(nclistlength(filterchain) > 1) {
size_t k;
/* jtmp holds the array of filters */
@ -1486,7 +1486,7 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames)
int vtypelen = 0;
int rank = 0;
int zarr_rank = 0; /* Need to watch out for scalars */
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
NCjson* jfilter = NULL;
int chainindex = 0;
#endif
@ -1736,7 +1736,7 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames)
object MUST contain a "id" key identifying the codec to be used. */
/* Do filters key before compressor key so final filter chain is in correct order */
{
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
if(var->filters == NULL) var->filters = (void*)nclistnew();
if(zvar->incompletefilters == NULL) zvar->incompletefilters = (void*)nclistnew();
chainindex = 0; /* track location of filter in the chain */
@ -1759,7 +1759,7 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames)
/* compressor key */
/* From V2 Spec: A JSON object identifying the primary compression codec and providing
configuration parameters, or ``null`` if no compressor is to be used. */
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
{
if(var->filters == NULL) var->filters = (void*)nclistnew();
if((stat = NCZ_filter_initialize())) goto done;
@ -1783,7 +1783,7 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames)
}
}
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
if(!suppress) {
/* At this point, we can finalize the filters */
if((stat = NCZ_filter_setup(var))) goto done;

View File

@ -389,7 +389,7 @@ NCZ_def_var(int ncid, const char *name, nc_type xtype, int ndims,
var->meta_read = NC_TRUE;
var->atts_read = NC_TRUE;
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
/* Set the filter list */
assert(var->filters == NULL);
var->filters = (void*)nclistnew();
@ -558,7 +558,7 @@ ncz_def_var_extra(int ncid, int varid, int *shuffle, int *unused1,
/* Can't turn on parallel and deflate/fletcher32/szip/shuffle
* before HDF5 1.10.3. */
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
#ifndef HDF5_SUPPORTS_PAR_FILTERS
if (h5->parallel == NC_TRUE)
if (nclistlength(((NClist*)var->filters)) > 0 || fletcher32 || shuffle)

View File

@ -675,7 +675,7 @@ put_chunk(NCZChunkCache* cache, NCZCacheEntry* entry)
}
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
/* Make sure the entry is in filtered state */
if(!entry->isfiltered) {
NC_VAR_INFO_T* var = cache->var;
@ -792,7 +792,7 @@ get_chunk(NCZChunkCache* cache, NCZCacheEntry* entry)
if((stat = NCZ_copy_data(file,cache->var,cache->fillchunk,cache->chunkcount,ZREADING,entry->data))) goto done;
stat = NC_NOERR;
}
#ifdef ENABLE_NCZARR_FILTERS
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
/* Make sure the entry is in unfiltered state */
if(!empty && entry->isfiltered) {
NC_VAR_INFO_T* var = cache->var;

View File

@ -40,26 +40,26 @@ else (USE_FFIO)
list(APPEND libsrc_SOURCES posixio.c)
ENDif (USE_FFIO)
if (ENABLE_BYTERANGE)
if (NETCDF_ENABLE_BYTERANGE)
list(APPEND libsrc_SOURCES httpio.c)
if (ENABLE_S3)
if (NETCDF_ENABLE_S3)
list(APPEND libsrc_SOURCES s3io.c)
endif(ENABLE_S3)
endif(ENABLE_BYTERANGE)
endif(NETCDF_ENABLE_S3)
endif(NETCDF_ENABLE_BYTERANGE)
add_library(netcdf3 OBJECT ${libsrc_SOURCES})
if (ENABLE_DLL)
if (NETCDF_ENABLE_DLL)
target_compile_definitions(netcdf3 PRIVATE DLL_NETCDF DLL_EXPORT)
endif()
if (ENABLE_BYTERANGE)
if (NETCDF_ENABLE_BYTERANGE)
target_include_directories(netcdf3 PUBLIC ${CURL_INCLUDE_DIRS})
target_compile_options(netcdf3
PRIVATE
-DCURL_STATICLIB=1
)
endif (ENABLE_BYTERANGE)
endif (NETCDF_ENABLE_BYTERANGE)
if(STATUS_PARALLEL)
target_link_libraries(netcdf3 PUBLIC MPI::MPI_C)

View File

@ -29,14 +29,14 @@ libnetcdf3_la_SOURCES += posixio.c
endif !USE_STDIO
endif !USE_FFIO
if ENABLE_BYTERANGE
if NETCDF_ENABLE_BYTERANGE
libnetcdf3_la_SOURCES += httpio.c
if ENABLE_S3
if NETCDF_ENABLE_S3
libnetcdf3_la_SOURCES += s3io.c
endif
endif ENABLE_BYTERANGE
endif NETCDF_ENABLE_BYTERANGE
noinst_LTLIBRARIES = libnetcdf3.la

View File

@ -92,7 +92,7 @@ err:
int
nc3_cktype(int mode, nc_type type)
{
#ifdef ENABLE_CDF5
#ifdef NETCDF_ENABLE_CDF5
if (mode & NC_CDF5) { /* CDF-5 format */
if (type >= NC_BYTE && type < NC_STRING) return NC_NOERR;
} else
@ -1134,7 +1134,7 @@ nc_set_default_format(int format, int *old_formatp)
return NC_EINVAL;
#else
if (format != NC_FORMAT_CLASSIC && format != NC_FORMAT_64BIT_OFFSET
#ifdef ENABLE_CDF5
#ifdef NETCDF_ENABLE_CDF5
&& format != NC_FORMAT_CDF5
#endif
)
@ -1563,7 +1563,7 @@ NC3_inq_format(int ncid, int *formatp)
return NC_NOERR;
/* only need to check for netCDF-3 variants, since this is never called for netCDF-4 files */
#ifdef ENABLE_CDF5
#ifdef NETCDF_ENABLE_CDF5
if (fIsSet(nc3->flags, NC_64BIT_DATA))
*formatp = NC_FORMAT_CDF5;
else

View File

@ -37,11 +37,11 @@ extern int ffio_open(const char*,int,off_t,size_t,size_t*,void*,ncio**,void** co
extern int mmapio_open(const char*,int,off_t,size_t,size_t*,void*,ncio**,void** const);
# endif
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
extern int httpio_open(const char*,int,off_t,size_t,size_t*,void*,ncio**,void** const);
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
extern int s3io_open(const char*,int,off_t,size_t,size_t*,void*,ncio**,void** const);
#endif
@ -49,7 +49,7 @@ extern int ffio_open(const char*,int,off_t,size_t,size_t*,void*,ncio**,void** co
extern int memio_open(const char*,int,off_t,size_t,size_t*,void*,ncio**,void** const);
/* Forward */
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
static int urlmodetest(const char* path);
#endif
@ -85,7 +85,7 @@ ncio_open(const char *path, int ioflags,
void* parameters,
ncio** iopp, void** const mempp)
{
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
int modetest = urlmodetest(path);
#endif
@ -103,16 +103,16 @@ ncio_open(const char *path, int ioflags,
return mmapio_open(path,ioflags,igeto,igetsz,sizehintp,parameters,iopp,mempp);
}
# endif /*USE_MMAP*/
# ifdef ENABLE_BYTERANGE
# ifdef NETCDF_ENABLE_BYTERANGE
if(modetest == NC_HTTP) {
return httpio_open(path,ioflags,igeto,igetsz,sizehintp,parameters,iopp,mempp);
}
# ifdef ENABLE_S3
# ifdef NETCDF_ENABLE_S3
if(modetest == NC_S3SDK) {
return s3io_open(path,ioflags,igeto,igetsz,sizehintp,parameters,iopp,mempp);
}
# endif
# endif /*ENABLE_BYTERANGE*/
# endif /*NETCDF_ENABLE_BYTERANGE*/
#ifdef USE_STDIO
return stdio_open(path,ioflags,igeto,igetsz,sizehintp,parameters,iopp,mempp);
@ -181,7 +181,7 @@ NC_HTTP => byterange
NC_S3SDK => s3
0 => Not URL
*/
#ifdef ENABLE_BYTERANGE
#ifdef NETCDF_ENABLE_BYTERANGE
static int
urlmodetest(const char* path)
{

View File

@ -11,7 +11,7 @@ nc4internal.c nc4type.c nc4var.c ncfunc.c ncindex.c nc4cache.c)
add_library(netcdf4 OBJECT ${libsrc4_SOURCES})
if (ENABLE_DLL)
if (NETCDF_ENABLE_DLL)
target_compile_definitions(netcdf4 PRIVATE DLL_NETCDF DLL_EXPORT)
endif()

View File

@ -195,7 +195,7 @@ nc4_get_att(int ncid, int varid, const char *name, nc_type *xtype,
/* Check varid */
if (varid != NC_GLOBAL)
{
if (!(var = (NC_VAR_INFO_T*)ncindexith(grp->vars,varid)))
if (!(var = (NC_VAR_INFO_T*)ncindexith(grp->vars,(size_t)varid)))
return NC_ENOTVAR;
assert(var->hdr.id == varid);
}

View File

@ -127,8 +127,8 @@ nc_set_chunk_cache_ints(int size, int nelems, int preemption)
NCglobalstate* gs = NC_getglobalstate();
if (size <= 0 || nelems <= 0 || preemption < 0 || preemption > 100)
return NC_EINVAL;
gs->chunkcache.size = size;
gs->chunkcache.nelems = nelems;
gs->chunkcache.size = (size_t)size;
gs->chunkcache.nelems = (size_t)nelems;
gs->chunkcache.preemption = (float)preemption / 100;
return NC_NOERR;
}

View File

@ -37,7 +37,6 @@ NC4_inq_unlimdim(int ncid, int *unlimdimidp)
NC_DIM_INFO_T *dim;
int found = 0;
int retval;
int i;
LOG((2, "%s: called", __func__));
@ -52,7 +51,7 @@ NC4_inq_unlimdim(int ncid, int *unlimdimidp)
*unlimdimidp = -1;
for (g = grp; g && !found; g = g->parent)
{
for(i=0;i<ncindexsize(grp->dim);i++)
for(size_t i=0;i<ncindexsize(grp->dim);i++)
{
dim = (NC_DIM_INFO_T*)ncindexith(grp->dim,i);
if(dim == NULL) continue;
@ -178,7 +177,6 @@ NC4_inq_unlimdims(int ncid, int *nunlimdimsp, int *unlimdimidsp)
NC_FILE_INFO_T *h5;
int num_unlim = 0;
int retval;
int i;
LOG((2, "%s: ncid 0x%x", __func__, ncid));
@ -190,7 +188,7 @@ NC4_inq_unlimdims(int ncid, int *nunlimdimsp, int *unlimdimidsp)
/* Get our dim info. */
assert(h5);
{
for(i=0;i<ncindexsize(grp->dim);i++)
for(size_t i=0;i<ncindexsize(grp->dim);i++)
{
dim = (NC_DIM_INFO_T*)ncindexith(grp->dim,i);
if(dim == NULL) continue;

View File

@ -87,7 +87,6 @@ NC4_inq_grps(int ncid, int *numgrps, int *ncids)
NC_FILE_INFO_T *h5;
int num = 0;
int retval;
int i;
LOG((2, "nc_inq_grps: ncid 0x%x", ncid));
@ -97,7 +96,7 @@ NC4_inq_grps(int ncid, int *numgrps, int *ncids)
assert(h5);
/* Count the number of groups in this group. */
for(i=0;i<ncindexsize(grp->children);i++)
for(size_t i=0;i<ncindexsize(grp->children);i++)
{
g = (NC_GRP_INFO_T*)ncindexith(grp->children,i);
if(g == NULL) continue;
@ -349,7 +348,6 @@ NC4_inq_varids(int ncid, int *nvars, int *varids)
NC_VAR_INFO_T *var;
int num_vars = 0;
int retval;
int i;
LOG((2, "nc_inq_varids: ncid 0x%x", ncid));
@ -360,7 +358,7 @@ NC4_inq_varids(int ncid, int *nvars, int *varids)
/* This is a netCDF-4 group. Round up them doggies and count
* 'em. The list is in correct (i.e. creation) order. */
for (i=0; i < ncindexsize(grp->vars); i++)
for (size_t i=0; i < ncindexsize(grp->vars); i++)
{
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,i);
if (!var) continue;
@ -438,10 +436,9 @@ NC4_inq_dimids(int ncid, int *ndims, int *dimids, int include_parents)
if (dimids)
{
int n = 0;
int i;
/* Get dimension ids from this group. */
for(i=0;i<ncindexsize(grp->dim);i++) {
for(size_t i=0;i<ncindexsize(grp->dim);i++) {
dim = (NC_DIM_INFO_T*)ncindexith(grp->dim,i);
if(dim == NULL) continue;
dimids[n++] = dim->hdr.id;
@ -450,13 +447,13 @@ NC4_inq_dimids(int ncid, int *ndims, int *dimids, int include_parents)
/* Get dimension ids from parent groups. */
if (include_parents)
for (g = grp->parent; g; g = g->parent) {
for(i=0;i<ncindexsize(g->dim);i++) {
for(size_t i=0;i<ncindexsize(g->dim);i++) {
dim = (NC_DIM_INFO_T*)ncindexith(g->dim,i);
if(dim == NULL) continue;
dimids[n++] = dim->hdr.id;
}
}
qsort(dimids, num, sizeof(int), int_cmp);
qsort(dimids, (size_t)num, sizeof(int), int_cmp);
}
/* If the user wants the number of dims, give it. */

View File

@ -472,7 +472,7 @@ nc4_find_grp_h5_var(int ncid, int varid, NC_FILE_INFO_T **h5, NC_GRP_INFO_T **gr
assert(my_grp && my_h5);
/* Find the var. */
if (!(my_var = (NC_VAR_INFO_T *)ncindexith(my_grp->vars, varid)))
if (!(my_var = (NC_VAR_INFO_T *)ncindexith(my_grp->vars, (size_t)varid)))
return NC_ENOTVAR;
assert(my_var && my_var->hdr.id == varid);
@ -552,7 +552,6 @@ nc4_rec_find_named_type(NC_GRP_INFO_T *start_grp, char *name)
{
NC_GRP_INFO_T *g;
NC_TYPE_INFO_T *type, *res;
int i;
assert(start_grp);
@ -562,7 +561,7 @@ nc4_rec_find_named_type(NC_GRP_INFO_T *start_grp, char *name)
return type;
/* Search subgroups. */
for(i=0;i<ncindexsize(start_grp->children);i++) {
for(size_t i=0;i<ncindexsize(start_grp->children);i++) {
g = (NC_GRP_INFO_T*)ncindexith(start_grp->children,i);
if(g == NULL) continue;
if ((res = nc4_rec_find_named_type(g, name)))
@ -639,7 +638,7 @@ nc4_find_grp_att(NC_GRP_INFO_T *grp, int varid, const char *name, int attnum,
}
else
{
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,varid);
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,(size_t)varid);
if (!var) return NC_ENOTVAR;
attlist = var->att;
@ -651,7 +650,7 @@ nc4_find_grp_att(NC_GRP_INFO_T *grp, int varid, const char *name, int attnum,
if (name)
my_att = (NC_ATT_INFO_T *)ncindexlookup(attlist, name);
else
my_att = (NC_ATT_INFO_T *)ncindexith(attlist, attnum);
my_att = (NC_ATT_INFO_T *)ncindexith(attlist, (size_t)attnum);
if (!my_att)
return NC_ENOTATT;
@ -715,7 +714,7 @@ obj_track(NC_FILE_INFO_T* file, NC_OBJ* obj)
assert(NC_FALSE);
}
/* Insert at the appropriate point in the list */
nclistset(list,obj->id,obj);
nclistset(list,(size_t)obj->id,obj);
}
/**
@ -748,7 +747,7 @@ nc4_var_list_add2(NC_GRP_INFO_T *grp, const char *name, NC_VAR_INFO_T **var)
new_var->chunkcache.preemption = gs->chunkcache.preemption;
/* Now fill in the values in the var info structure. */
new_var->hdr.id = ncindexsize(grp->vars);
new_var->hdr.id = (int)ncindexsize(grp->vars);
if (!(new_var->hdr.name = strdup(name))) {
if(new_var)
free(new_var);
@ -784,7 +783,7 @@ nc4_var_set_ndims(NC_VAR_INFO_T *var, int ndims)
assert(var);
/* Remember the number of dimensions. */
var->ndims = ndims;
var->ndims = (size_t)ndims;
/* Allocate space for dimension information. */
if (ndims)
@ -912,7 +911,7 @@ nc4_att_list_add(NCindex *list, const char *name, NC_ATT_INFO_T **att)
new_att->hdr.sort = NCATT;
/* Fill in the information we know. */
new_att->hdr.id = ncindexsize(list);
new_att->hdr.id = (int)ncindexsize(list);
if (!(new_att->hdr.name = strdup(name))) {
if(new_att)
free(new_att);
@ -1171,7 +1170,7 @@ nc4_field_list_add(NC_TYPE_INFO_T *parent, const char *name,
}
/* Add object to lists */
field->hdr.id = nclistlength(parent->u.c.field);
field->hdr.id = (int)nclistlength(parent->u.c.field);
nclistpush(parent->u.c.field,field);
return NC_NOERR;
@ -1363,14 +1362,13 @@ done:
static int
var_free(NC_VAR_INFO_T *var)
{
int i;
int retval;
assert(var);
LOG((4, "%s: deleting var %s", __func__, var->hdr.name));
/* First delete all the attributes attached to this var. */
for (i = 0; i < ncindexsize(var->att); i++)
for (size_t i = 0; i < ncindexsize(var->att); i++)
if ((retval = nc4_att_free((NC_ATT_INFO_T *)ncindexith(var->att, i))))
return retval;
ncindexfree(var->att);
@ -1429,7 +1427,7 @@ nc4_var_list_del(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var)
/* Remove from lists */
i = ncindexfind(grp->vars, (NC_OBJ *)var);
if (i >= 0)
ncindexidel(grp->vars, i);
ncindexidel(grp->vars, (size_t)i);
return var_free(var);
}
@ -1472,7 +1470,7 @@ nc4_dim_list_del(NC_GRP_INFO_T *grp, NC_DIM_INFO_T *dim)
{
int pos = ncindexfind(grp->dim, (NC_OBJ *)dim);
if(pos >= 0)
ncindexidel(grp->dim, pos);
ncindexidel(grp->dim, (size_t)pos);
}
return dim_free(dim);
@ -1490,7 +1488,6 @@ nc4_dim_list_del(NC_GRP_INFO_T *grp, NC_DIM_INFO_T *dim)
int
nc4_rec_grp_del(NC_GRP_INFO_T *grp)
{
int i;
int retval;
assert(grp);
@ -1498,34 +1495,34 @@ nc4_rec_grp_del(NC_GRP_INFO_T *grp)
/* Recursively call this function for each child, if any, stopping
* if there is an error. */
for (i = 0; i < ncindexsize(grp->children); i++)
for (size_t i = 0; i < ncindexsize(grp->children); i++)
if ((retval = nc4_rec_grp_del((NC_GRP_INFO_T *)ncindexith(grp->children,
i))))
return retval;
ncindexfree(grp->children);
/* Free attributes */
for (i = 0; i < ncindexsize(grp->att); i++)
for (size_t i = 0; i < ncindexsize(grp->att); i++)
if ((retval = nc4_att_free((NC_ATT_INFO_T *)ncindexith(grp->att, i))))
return retval;
ncindexfree(grp->att);
/* Delete all vars. */
for (i = 0; i < ncindexsize(grp->vars); i++) {
NC_VAR_INFO_T* v = (NC_VAR_INFO_T *)ncindexith(grp->vars, i);
for (size_t i = 0; i < ncindexsize(grp->vars); i++) {
NC_VAR_INFO_T* v = (NC_VAR_INFO_T *)ncindexith(grp->vars, i);
if ((retval = var_free(v)))
return retval;
}
ncindexfree(grp->vars);
/* Delete all dims, and free the list of dims. */
for (i = 0; i < ncindexsize(grp->dim); i++)
for (size_t i = 0; i < ncindexsize(grp->dim); i++)
if ((retval = dim_free((NC_DIM_INFO_T *)ncindexith(grp->dim, i))))
return retval;
ncindexfree(grp->dim);
/* Delete all types. */
for (i = 0; i < ncindexsize(grp->type); i++)
for (size_t i = 0; i < ncindexsize(grp->type); i++)
if ((retval = nc4_type_free((NC_TYPE_INFO_T *)ncindexith(grp->type, i))))
return retval;
ncindexfree(grp->type);
@ -1551,7 +1548,6 @@ nc4_rec_grp_del(NC_GRP_INFO_T *grp)
int
nc4_rec_grp_del_att_data(NC_GRP_INFO_T *grp)
{
int i;
int retval;
assert(grp);
@ -1559,25 +1555,24 @@ nc4_rec_grp_del_att_data(NC_GRP_INFO_T *grp)
/* Recursively call this function for each child, if any, stopping
* if there is an error. */
for (i = 0; i < ncindexsize(grp->children); i++)
for (size_t i = 0; i < ncindexsize(grp->children); i++)
if ((retval = nc4_rec_grp_del_att_data((NC_GRP_INFO_T *)ncindexith(grp->children, i))))
return retval;
/* Free attribute data in this group */
for (i = 0; i < ncindexsize(grp->att); i++) {
NC_ATT_INFO_T * att = (NC_ATT_INFO_T*)ncindexith(grp->att, i);
if((retval = NC_reclaim_data_all(grp->nc4_info->controller,att->nc_typeid,att->data,att->len)))
return retval;
for (size_t i = 0; i < ncindexsize(grp->att); i++) {
NC_ATT_INFO_T * att = (NC_ATT_INFO_T*)ncindexith(grp->att, i);
if((retval = NC_reclaim_data_all(grp->nc4_info->controller,att->nc_typeid,att->data,att->len)))
return retval;
att->data = NULL;
att->len = 0;
att->dirty = 0;
}
/* Delete att data from all contained vars in this group */
for (i = 0; i < ncindexsize(grp->vars); i++) {
int j;
for (size_t i = 0; i < ncindexsize(grp->vars); i++) {
NC_VAR_INFO_T* v = (NC_VAR_INFO_T *)ncindexith(grp->vars, i);
for(j=0;j<ncindexsize(v->att);j++) {
for(size_t j=0;j<ncindexsize(v->att);j++) {
NC_ATT_INFO_T* att = (NC_ATT_INFO_T*)ncindexith(v->att, j);
if((retval = NC_reclaim_data_all(grp->nc4_info->controller,att->nc_typeid,att->data,att->len)))
return retval;
@ -1604,7 +1599,7 @@ int
nc4_att_list_del(NCindex *list, NC_ATT_INFO_T *att)
{
assert(att && list);
ncindexidel(list, ((NC_OBJ *)att)->id);
ncindexidel(list, (size_t)((NC_OBJ *)att)->id);
return nc4_att_free(att);
}

View File

@ -16,7 +16,7 @@
#include <stddef.h>
#if 0
#ifdef ENABLE_DAP4
#ifdef NETCDF_ENABLE_DAP4
EXTERNL NC* NCD4_get_substrate_nc(int ncid);
#endif
#endif
@ -38,7 +38,7 @@ const char* nc4_atomic_name[NUM_ATOMIC_TYPES] = {"none", "byte", "char",
"double", "ubyte",
"ushort", "uint",
"int64", "uint64", "string"};
static const int nc4_atomic_size[NUM_ATOMIC_TYPES] = {0, NC_BYTE_LEN, NC_CHAR_LEN, NC_SHORT_LEN,
static const size_t nc4_atomic_size[NUM_ATOMIC_TYPES] = {0, NC_BYTE_LEN, NC_CHAR_LEN, NC_SHORT_LEN,
NC_INT_LEN, NC_FLOAT_LEN, NC_DOUBLE_LEN,
NC_BYTE_LEN, NC_SHORT_LEN, NC_INT_LEN, NC_INT64_LEN,
NC_INT64_LEN, NC_STRING_LEN};
@ -74,8 +74,7 @@ NC4_inq_typeids(int ncid, int *ntypes, int *typeids)
/* Count types. */
if (grp->type) {
int i;
for(i=0;i<ncindexsize(grp->type);i++)
for(size_t i=0;i<ncindexsize(grp->type);i++)
{
if((type = (NC_TYPE_INFO_T*)ncindexith(grp->type,i)) == NULL) continue;
if (typeids)

View File

@ -84,7 +84,7 @@ NC4_get_var_chunk_cache(int ncid, int varid, size_t *sizep,
assert(nc && grp && h5);
/* Find the var. */
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,varid);
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,(size_t)varid);
if(!var)
return NC_ENOTVAR;
assert(var && var->hdr.id == varid);
@ -129,7 +129,7 @@ nc_get_var_chunk_cache_ints(int ncid, int varid, int *sizep,
return ret;
if (sizep)
*sizep = real_size / MEGABYTE;
*sizep = (int)(real_size / MEGABYTE);
if (nelemsp)
*nelemsp = (int)real_nelems;
if(preemptionp)
@ -204,7 +204,7 @@ NC4_inq_var_all(int ncid, int varid, char *name, nc_type *xtypep,
}
/* Find the var. */
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, varid)))
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, (size_t)varid)))
return NC_ENOTVAR;
assert(var && var->hdr.id == varid);
@ -214,7 +214,7 @@ NC4_inq_var_all(int ncid, int varid, char *name, nc_type *xtypep,
if (xtypep)
*xtypep = var->type_info->hdr.id;
if (ndimsp)
*ndimsp = var->ndims;
*ndimsp = (int)var->ndims;
if (dimidsp)
for (d = 0; d < var->ndims; d++)
dimidsp[d] = var->dimids[d];
@ -575,7 +575,7 @@ nc4_convert_type(const void *src, void *dest, const nc_type src_type,
}else if (quantize_mode == NC_QUANTIZE_BITROUND){
/* BitRound interprets nsd as number of significant binary digits (bits) */
prc_bnr_xpl_rqr = nsd;
prc_bnr_xpl_rqr = (unsigned short)nsd;
}
@ -1457,7 +1457,7 @@ nc4_convert_type(const void *src, void *dest, const nc_type src_type,
/* 20211003 Continuous determination of dgt_nbr improves CR by ~10% */
dgt_nbr = (int)floor(xpn_bs2 * dgt_per_bit + mnt_log10_fabs) + 1; /* DGG19 p. 4102 (8.67) */
qnt_pwr = (int)floor(bit_per_dgt * (dgt_nbr - nsd)); /* DGG19 p. 4101 (7) */
prc_bnr_xpl_rqr = mnt_fabs == 0.0 ? 0 : abs((int)floor(xpn_bs2 - bit_per_dgt*mnt_log10_fabs) - qnt_pwr); /* Protect against mnt = -0.0 */
prc_bnr_xpl_rqr = mnt_fabs == 0.0 ? 0 : (unsigned short)abs((int)floor(xpn_bs2 - bit_per_dgt*mnt_log10_fabs) - qnt_pwr); /* Protect against mnt = -0.0 */
prc_bnr_xpl_rqr--; /* 20211003 Reduce formula result by 1 bit: Passes all tests, improves CR by ~10% */
bit_xpl_nbr_zro = BIT_XPL_NBR_SGN_FLT - prc_bnr_xpl_rqr;
@ -1491,7 +1491,7 @@ nc4_convert_type(const void *src, void *dest, const nc_type src_type,
/* 20211003 Continuous determination of dgt_nbr improves CR by ~10% */
dgt_nbr = (int)floor(xpn_bs2 * dgt_per_bit + mnt_log10_fabs) + 1; /* DGG19 p. 4102 (8.67) */
qnt_pwr = (int)floor(bit_per_dgt * (dgt_nbr - nsd)); /* DGG19 p. 4101 (7) */
prc_bnr_xpl_rqr = mnt_fabs == 0.0 ? 0 : abs((int)floor(xpn_bs2 - bit_per_dgt*mnt_log10_fabs) - qnt_pwr); /* Protect against mnt = -0.0 */
prc_bnr_xpl_rqr = mnt_fabs == 0.0 ? 0 : (unsigned short)abs((int)floor(xpn_bs2 - bit_per_dgt*mnt_log10_fabs) - qnt_pwr); /* Protect against mnt = -0.0 */
prc_bnr_xpl_rqr--; /* 20211003 Reduce formula result by 1 bit: Passes all tests, improves CR by ~10% */
bit_xpl_nbr_zro = BIT_XPL_NBR_SGN_DBL - prc_bnr_xpl_rqr;

View File

@ -162,8 +162,7 @@ int
ncindexcount(NCindex* index)
{
int count = 0;
int i;
for(i=0;i<ncindexsize(index);i++) {
for(size_t i=0;i<ncindexsize(index);i++) {
if(ncindexith(index,i) != NULL) count++;
}
return count;

View File

@ -6,10 +6,11 @@
# See netcdf-c/COPYRIGHT file for more info.
add_library(netcdfp OBJECT ${libsrcp_SOURCES})
target_link_libraries(netcdfp PUBLIC PNETCDF::PNETCDF)
target_sources(netcdfp PRIVATE ncpdispatch.c)
if (ENABLE_DLL)
if (NETCDF_ENABLE_DLL)
target_compile_definitions(netcdfp PRIVATE DLL_NETCDF DLL_EXPORT)
endif()

Some files were not shown because too many files have changed in this diff Show More