Merge branch 'main' into export-targets-for-build-dir

* main: (110 commits)
  Escape a character causing a doxygen error.
  Updated release notes.
  Added a comment block for future reference.
  more syntax fixes
  Update CMakeLists.txt
  CMake: Find HDF5 header we can safely include for other checks
  moving functions and macros to new file, lowercase things
  Update release notes.
  lowercase
  lower case
  lowercase
  moving functions and macros to a file
  moving the dependencies inclusion
  CMake: Add support for UNITY_BUILD
  removing debug messages
  actually adding the dependencies file...
  putting dependencies into separate file
  Define USE_SZIP variable for nc-config.cmake.in
  matching cmake variables in autotools configuration
  moving the version into the project command in cmake
  ...
This commit is contained in:
Peter Hill 2024-01-24 11:26:44 +00:00
commit 25dc1faa60
No known key found for this signature in database
GPG Key ID: 0C6B9742E72848EE
370 changed files with 6078 additions and 5036 deletions

179
.github/workflows/run_tests_cdash.yml vendored Normal file
View File

@ -0,0 +1,179 @@
###
# Build hdf5 dependencies and cache them in a combined directory.
###
name: Run CDash Ubuntu/Linux netCDF Tests
on: workflow_dispatch
concurrency:
group: ${{ github.workflow}}-${{ github.head_ref }}
cancel-in-progress: true
jobs:
build-deps-cdash:
runs-on: ubuntu-latest
strategy:
matrix:
hdf5: [ 1.10.8, 1.12.2, 1.14.0 ]
steps:
- uses: actions/checkout@v3
- name: Install System dependencies
shell: bash -l {0}
run: sudo apt update && sudo apt install -y libaec-dev zlib1g-dev automake autoconf libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev doxygen openssl
###
# Installing libhdf5
###
- name: Cache libhdf5-${{ matrix.hdf5 }}
id: cache-hdf5
uses: actions/cache@v3
with:
path: ~/environments/${{ matrix.hdf5 }}
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
- name: Build libhdf5-${{ matrix.hdf5 }}
if: steps.cache-hdf5.outputs.cache-hit != 'true'
run: |
set -x
wget https://support.hdfgroup.org/ftp/HDF/releases/HDF4.2.15/src/hdf-4.2.15.tar.bz2
tar -jxf hdf-4.2.15.tar.bz2
pushd hdf-4.2.15
./configure --prefix=${HOME}/environments/${{ matrix.hdf5 }} --disable-static --enable-shared --disable-fortran --disable-netcdf --with-szlib --enable-hdf4-xdr
make -j
make install -j
popd
wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-$(echo ${{ matrix.hdf5 }} | cut -d. -f 1,2)/hdf5-${{ matrix.hdf5 }}/src/hdf5-${{ matrix.hdf5 }}.tar.bz2
tar -jxf hdf5-${{ matrix.hdf5 }}.tar.bz2
pushd hdf5-${{ matrix.hdf5 }}
./configure --disable-static --enable-shared --prefix=${HOME}/environments/${{ matrix.hdf5 }} --enable-hl --with-szlib
make -j
make install -j
popd
build-deps-parallel:
runs-on: ubuntu-latest
strategy:
matrix:
hdf5: [ 1.14.0 ]
steps:
- uses: actions/checkout@v3
- name: Install System dependencies
shell: bash -l {0}
run: sudo apt update && sudo apt install -y libaec-dev zlib1g-dev automake autoconf libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev mpich libmpich-dev
###
# Installing libhdf5
###
- name: Cache libhdf5-parallel-${{ matrix.hdf5 }}
id: cache-hdf5
uses: actions/cache@v3
with:
path: ~/environments/${{ matrix.hdf5 }}
key: hdf5-parallel-${{ runner.os }}-${{ matrix.hdf5 }}
- name: Build libhdf5-${{ matrix.hdf5 }}-pnetcdf-1.12.3
if: steps.cache-hdf5.outputs.cache-hit != 'true'
run: |
set -x
wget https://support.hdfgroup.org/ftp/HDF/releases/HDF4.2.15/src/hdf-4.2.15.tar.bz2
tar -jxf hdf-4.2.15.tar.bz2
pushd hdf-4.2.15
CC=mpicc ./configure --prefix=${HOME}/environments/${{ matrix.hdf5 }} --disable-static --enable-shared --disable-fortran --disable-netcdf --with-szlib --enable-parallel --enable-hdf4-xdr
make -j
make install -j
popd
wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-$(echo ${{ matrix.hdf5 }} | cut -d. -f 1,2)/hdf5-${{ matrix.hdf5 }}/src/hdf5-${{ matrix.hdf5 }}.tar.bz2
tar -jxf hdf5-${{ matrix.hdf5 }}.tar.bz2
pushd hdf5-${{ matrix.hdf5 }}
CC=mpicc ./configure --disable-static --enable-shared --prefix=${HOME}/environments/${{ matrix.hdf5 }} --enable-hl --with-szlib --enable-parallel
make -j
make install -j
popd
wget https://parallel-netcdf.github.io/Release/pnetcdf-1.12.3.tar.gz
tar -zxf pnetcdf-1.12.3.tar.gz
pushd pnetcdf-1.12.3
CC=mpicc ./configure --disable-static --enable-shared --prefix=${HOME}/environments/${{ matrix.hdf5 }}
make -j
make install -j
popd
###
# Run CTest Serial Script
###
nc-ctest-serial:
needs: build-deps-cdash
runs-on: ubuntu-latest
environment: CDashCI
strategy:
matrix:
hdf5: [ 1.10.8, 1.12.2, 1.14.0 ]
steps:
- uses: actions/checkout@v3
with:
CDASH_TOKEN: ${{ secrets.CDASH_TOKEN }}
env:
CDASH_TOKEN: ${{ secrets.CDASH_TOKEN }}
- name: Install System dependencies
shell: bash -l {0}
run: sudo apt update && sudo apt install -y libaec-dev zlib1g-dev automake autoconf libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev
###
# Set Environmental Variables
###
- run: echo "CMAKE_PREFIX_PATH=${HOME}/environments/${{ matrix.hdf5 }}/" >> $GITHUB_ENV
- run: echo "LD_LIBRARY_PATH=${HOME}/environments/${{ matrix.hdf5 }}/lib" >> $GITHUB_ENV
- run: echo "CTEST_OUTPUT_ON_FAILURE=1" >> $GITHUB_ENV
###
# Fetch Cache
###
- name: Fetch HDF Cache
id: cache-hdf5
uses: actions/cache@v3
with:
path: ~/environments/${{ matrix.hdf5 }}
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
- name: Check Cache
shell: bash -l {0}
run: ls ${HOME}/environments/${{ matrix.hdf5 }} && ls ${HOME}/environments/${{ matrix.hdf5}}/lib
- name: Run ctest serial script
shell: bash -l {0}
env:
CDASH_TOKEN: ${{ secrets.CDASH_TOKEN }}
run: |
mkdir build
cd build
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ctest -j 12 -V -S ../ctest_scripts/ctest_serial.ctest
- name: Verbose Output if CTest Failure
shell: bash -l {0}
run: |
cd build
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ctest -j 12 --rerun-failed --output-on-failure -VV
if: ${{ failure() }}

View File

@ -6,9 +6,12 @@
name: Run macOS-based netCDF Tests
on: [pull_request,workflow_dispatch]
concurrency:
group: ${{ github.workflow}}-${{ github.head_ref }}
cancel-in-progress: true
jobs:
build-deps-osx:

View File

@ -11,6 +11,10 @@ name: Run S3 netCDF Tests (under Ubuntu Linux)
on: [workflow_dispatch]
concurrency:
group: ${{ github.workflow}}-${{ github.head_ref }}
cancel-in-progress: true
jobs:
build-deps-serial:

View File

@ -6,6 +6,10 @@ name: Run Ubuntu/Linux netCDF Tests
on: [pull_request, workflow_dispatch]
concurrency:
group: ${{ github.workflow}}-${{ github.head_ref }}
cancel-in-progress: true
jobs:
build-deps-serial:
@ -196,6 +200,19 @@ jobs:
CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check -j
if: ${{ success() }}
- name: Create source distribution
shell: bash -l {0}
if: ${{ success() }}
run: make dist -j
- uses: actions/upload-artifact@v3
with:
name: netcdf-c-autotools-source-distribution
path: |
*.tar*
*.zip
*.tgz
##
# Parallel
##
@ -449,7 +466,28 @@ jobs:
use_nczarr: [ nczarr_off, nczarr_on ]
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
with:
name: netcdf-c-autotools-source-distribution
- name: Unpack source distribution
shell: bash -l {0}
run: |
if [ -f *.zip ];
then
unzip *.zip
else
tar xvzf $(ls *.tar* *.tgz *.zip | head -1)
fi
ls -d netcdf-c*
for name in netcdf-c*;
do
if [ -d ${name} ];
then
cd ${name}
break
fi
done
- name: Install System dependencies
shell: bash -l {0}
@ -498,11 +536,28 @@ jobs:
- name: Run autoconf
shell: bash -l {0}
run: autoreconf -if
run: |
for name in netcdf-c*;
do
if [ -d ${name} ];
then
cd ${name}
break
fi
done
autoreconf -if
- name: Configure
shell: bash -l {0}
run: |
for name in netcdf-c*;
do
if [ -d ${name} ];
then
cd ${name}
break
fi
done
current_directory="$(pwd)"
mkdir ../build
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} "${current_directory}/configure" ${ENABLE_HDF5} ${ENABLE_DAP} ${ENABLE_NCZARR}
@ -510,29 +565,56 @@ jobs:
- name: Look at config.log if error
shell: bash -l {0}
run: cd ../build && cat config.log
run: |
if [ -d ../build ];
then
cd ../build
else
cd build
fi && cat config.log
if: ${{ failure() }}
- name: Print Summary
shell: bash -l {0}
run: cd ../build && cat libnetcdf.settings
run: |
if [ -d ../build ];
then
cd ../build
else
cd build
fi && cat libnetcdf.settings
- name: Build Library and Utilities
shell: bash -l {0}
run: |
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make -j
if [ -d ../build ];
then
cd ../build
else
cd build
fi && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make -j
if: ${{ success() }}
- name: Build Tests
shell: bash -l {0}
run: |
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check TESTS="" -j
if [ -d ../build ];
then
cd ../build
else
cd build
fi && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check TESTS="" -j
if: ${{ success() }}
- name: Run Tests
shell: bash -l {0}
run: |
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check -j
if [ -d ../build ];
then
cd ../build
else
cd build
fi && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check -j
if: ${{ success() }}
nc-cmake:

View File

@ -2,6 +2,10 @@ name: Run Cygwin-based tests
on: [pull_request,workflow_dispatch]
concurrency:
group: ${{ github.workflow}}-${{ github.head_ref }}
cancel-in-progress: true
env:
SHELLOPTS: igncr
CHERE_INVOKING: 1

View File

@ -4,13 +4,17 @@
# for information related to github runners.
###
name: Run MSYS2, MinGW64-based Tests
name: Run MSYS2, MinGW64-based Tests (Not Visual Studio)
env:
CPPFLAGS: "-D_BSD_SOURCE"
on: [pull_request,workflow_dispatch]
concurrency:
group: ${{ github.workflow}}-${{ github.head_ref }}
cancel-in-progress: true
jobs:
build-and-test-autotools:

1
.gitignore vendored
View File

@ -35,6 +35,7 @@ test-driver
#####
### 'Normal' gitignore files.
docs/auth.html
.vscode
nug.tag
netcdf-c.tag

View File

@ -10,13 +10,13 @@
# build binary installers.
#####
SET(CPACK_PACKAGE_VENDOR "Unidata")
set(CPACK_PACKAGE_VENDOR "Unidata")
##
# Declare exclusions list used when building a source file.
# NOTE!! This list uses regular expressions, NOT wildcards!!
##
SET(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}"
set(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}"
"/expecttds3/"
"/nocacheremote3/"
"/nocacheremote4/"
@ -46,21 +46,21 @@ SET(CPACK_SOURCE_IGNORE_FILES "${CPACK_SOURCE_IGNORE_FILES}"
# Nullsoft Installation System (NSIS)
###
SET(CPACK_PACKAGE_CONTACT "NetCDF Support <support-netcdf@unidata.ucar.edu>")
set(CPACK_PACKAGE_CONTACT "NetCDF Support <support-netcdf@unidata.ucar.edu>")
IF(WIN32)
SET(CPACK_NSIS_MODIFY_PATH ON)
SET(CPACK_NSIS_DISPLAY_NAME "NetCDF ${netCDF_VERSION}")
SET(CPACK_NSIS_PACKAGE_NAME "NetCDF ${netCDF_VERSION}")
SET(CPACK_NSIS_HELP_LINK "https://www.unidata.ucar.edu/netcdf")
SET(CPACK_NSIS_URL_INFO_ABOUT "https://www.unidata.ucar.edu/netcdf")
SET(CPACK_NSIS_CONTACT "support-netcdf@unidata.ucar.edu")
SET(CPACK_NSIS_ENABLE_UNINSTALL_BEFORE_INSTALL ON)
SET(CPACK_NSIS_MENU_LINKS
if(WIN32)
set(CPACK_NSIS_MODIFY_PATH ON)
set(CPACK_NSIS_DISPLAY_NAME "NetCDF ${netCDF_VERSION}")
set(CPACK_NSIS_PACKAGE_NAME "NetCDF ${netCDF_VERSION}")
set(CPACK_NSIS_HELP_LINK "https://www.unidata.ucar.edu/netcdf")
set(CPACK_NSIS_URL_INFO_ABOUT "https://www.unidata.ucar.edu/netcdf")
set(CPACK_NSIS_CONTACT "support-netcdf@unidata.ucar.edu")
set(CPACK_NSIS_ENABLE_UNINSTALL_BEFORE_INSTALL ON)
set(CPACK_NSIS_MENU_LINKS
"https://www.unidata.ucar.edu/software/netcdf" "Unidata Website"
"https://docs.unidata.ucar.edu/netcdf-c" "NetCDF Stable Documentation")
ENDIF()
endif()
###
# Set debian-specific options used when
@ -70,52 +70,48 @@ ENDIF()
###
# This should be set using the output of dpkg --print-architecture.
FIND_PROGRAM(NC_DPKG NAMES dpkg)
IF(NC_DPKG)
if(NC_DPKG)
# Define a macro for getting the dpkg architecture.
MACRO(getdpkg_arch arch)
exec_program("${NC_DPKG}" ARGS "--print-architecture" OUTPUT_VARIABLE "${arch}")
ENDMACRO(getdpkg_arch)
getdpkg_arch(dpkg_arch)
SET(CPACK_DEBIAN_PACKAGE_NAME "netcdf4-dev")
SET(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "${dpkg_arch}")
SET(CPACK_DEBIAN_PACKAGE_DEPENDS "zlib1g (>= 1:1.2.3.4), libhdf5-7 (>= 1.8.11), libcurl4-openssl-dev (>= 7.22.0)")
ENDIF()
set(CPACK_DEBIAN_PACKAGE_NAME "netcdf4-dev")
set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "${dpkg_arch}")
set(CPACK_DEBIAN_PACKAGE_DEPENDS "zlib1g (>= 1:1.2.3.4), libhdf5-7 (>= 1.8.11), libcurl4-openssl-dev (>= 7.22.0)")
endif()
##
# Set Copyright, License info for CPack.
##
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/COPYRIGHT
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/COPYRIGHT
${CMAKE_CURRENT_BINARY_DIR}/COPYRIGHT.txt
@ONLY
)
SET(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_BINARY_DIR}/COPYRIGHT.txt")
IF(NOT CPACK_PACK_VERSION)
SET(CPACK_PACKAGE_VERSION ${VERSION})
ENDIF()
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_BINARY_DIR}/COPYRIGHT.txt")
if(NOT CPACK_PACK_VERSION)
set(CPACK_PACKAGE_VERSION ${VERSION})
endif()
IF(UNIX)
SET(CPACK_GENERATOR "STGZ" "TBZ2" "DEB" "ZIP")
ENDIF()
if(UNIX)
set(CPACK_GENERATOR "STGZ" "TBZ2" "DEB" "ZIP")
endif()
IF(APPLE)
SET(CPACK_SOURCE_GENERATOR "TGZ")
SET(CPACK_GENERATOR "productbuild" "STGZ" "TBZ2" "TGZ" "ZIP")
ENDIF()
if(APPLE)
set(CPACK_SOURCE_GENERATOR "TGZ")
set(CPACK_GENERATOR "productbuild" "STGZ" "TBZ2" "TGZ" "ZIP")
endif()
##
# Create an 'uninstall' target.
##
CONFIGURE_FILE(
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/cmake_uninstall.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake"
IMMEDIATE @ONLY)
ADD_CUSTOM_TARGET(uninstall
add_custom_target(uninstall
COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake)
##
@ -133,4 +129,4 @@ set(CPACK_COMPONENT_DEPENDENCIES_DESCRIPTION
set(CPACK_COMPONENT_DOCUMENTATION_DESCRIPTION
"The NetCDF-C user documentation.")
INCLUDE(CPack)
include(CPack)

File diff suppressed because it is too large Load Diff

View File

@ -211,7 +211,11 @@ install-data-hook:
all-local: liblib/libnetcdf.la
echo ${PACKAGE_VERSION} > VERSION
if ENABLE_S3_TESTALL
rm -f ${abs_top_builddir}/tmp_@PLATFORMUID@.uids
echo "@TESTUID@" >> ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids
cat ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids | sort | uniq > ${abs_top_builddir}/tmp_@PLATFORMUID@.uids
rm -f ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids
mv ${abs_top_builddir}/tmp_@PLATFORMUID@.uids ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids
endif
if ENABLE_S3_TESTALL

View File

@ -1 +1 @@
EXECUTE_PROCESS(COMMAND sh -c "${CMAKE_BINARY_DIR}/postinstall.sh -t cmake")
execute_process(COMMAND sh -c "${CMAKE_BINARY_DIR}/postinstall.sh -t cmake")

View File

@ -7,7 +7,17 @@ This file contains a high-level description of this package's evolution. Release
## 4.9.3 - TBD
* Added infrastructure to allow for `CMAKE_UNITY_BUILD`, (thanks \@jschueller). See [Github #2839](https://github.com/Unidata/netcdf-c/pull/2839) for more information.
* [cmake] Move dependency management out of the root-level `CMakeLists.txt` into two different files in the `cmake/` folder, `dependencies.cmake` and `netcdf_functions_macros.cmake`. See [Github #2838](https://github.com/Unidata/netcdf-c/pull/2838/) for more information.
* Obviate a number of irrelevant warnings. See [Github #2781](https://github.com/Unidata/netcdf-c/pull/2781).
* Improve the speed and data quantity for DAP4 queries. See [Github #2765](https://github.com/Unidata/netcdf-c/pull/2765).
* Remove the use of execinfo to programmatically dump the stack; it never worked. See [Github #2789](https://github.com/Unidata/netcdf-c/pull/2789).
* Update the internal copy of tinyxml2 to latest code. See [Github #2771](https://github.com/Unidata/netcdf-c/pull/2771).
* Mitigate the problem of remote/nczarr-related test interference. See [Github #2755](https://github.com/Unidata/netcdf-c/pull/2755).
* Fix DAP2 proxy problems. See [Github #2764](https://github.com/Unidata/netcdf-c/pull/2764).
* Cleanup a number of misc issues. See [Github #2763](https://github.com/Unidata/netcdf-c/pull/2763).
* Mitigate the problem of test interference. See [Github #2755](https://github.com/Unidata/netcdf-c/pull/2755).
* Extend NCZarr to support unlimited dimensions. See [Github #2755](https://github.com/Unidata/netcdf-c/pull/2755).
* Fix significant bug in the NCZarr cache management. See [Github #2737](https://github.com/Unidata/netcdf-c/pull/2737).
* Fix default parameters for caching of NCZarr. See [Github #2734](https://github.com/Unidata/netcdf-c/pull/2734).

55
cmake/check_hdf5.cmake Normal file
View File

@ -0,0 +1,55 @@
# Work out which HDF5 config header we can safely include
#
# We'd like to just use H5public.h, but if HDF5 was built against MPI, this
# might require us to have found MPI already. The next best file is H5pubconf.h,
# which actually has all the feature macros we want to check, but some
# distributions rename this for multiarch, so we've got to check some different
# names.
#
# HDF5_INCLUDE_DIR should already be set before calling this
function(check_hdf5_feature_header)
if (_H5_FEATURE_HEADER)
return()
endif()
include(CheckIncludeFile)
set(CMAKE_REQUIRED_INCLUDES ${HDF5_INCLUDE_DIR})
message(STATUS "Checking for HDF5 config header")
foreach(_h5_header "H5public.h" "H5pubconf.h" "H5pubconf-64.h" "H5pubconf-32.h")
check_include_file(${_h5_header} _can_include_h5_header)
if (_can_include_h5_header)
message(STATUS "Using ${_h5_header} to check for feature macros")
set(_H5_FEATURE_HEADER ${_h5_header} CACHE INTERNAL "")
return()
endif()
endforeach()
message(FATAL_ERROR "Could not include any HDF5 config headers")
endfunction()
# Check for an HDF5 feature macro named FEATURE and store the result in VAR
#
# This just wraps `check_c_source_compiles` but ensures we use the correct header
function(check_hdf5_feature VAR FEATURE)
if (NOT _H5_FEATURE_HEADER)
check_hdf5_feature_header()
endif()
include(CheckCSourceCompiles)
set(CMAKE_REQUIRED_INCLUDES ${HDF5_INCLUDE_DIR})
message(STATUS "Checking for ${FEATURE}")
check_c_source_compiles("
#include <${_H5_FEATURE_HEADER}>
#if !${FEATURE}
#error
#endif
int main() {}"
_has_${FEATURE})
set(${VAR} ${_has_${FEATURE}} PARENT_SCOPE)
endfunction()

650
cmake/dependencies.cmake Normal file
View File

@ -0,0 +1,650 @@
################################
# PkgConfig
################################
find_package(PkgConfig QUIET)
################################
# MakeDist
################################
# Enable 'dist and distcheck'.
# File adapted from http://ensc.de/cmake/FindMakeDist.cmake
find_package(MakeDist)
# End 'enable dist and distcheck'
################################
# HDF4
################################
if(ENABLE_HDF4)
set(USE_HDF4 ON )
# Check for include files, libraries.
find_path(MFHDF_H_INCLUDE_DIR mfhdf.h)
if(NOT MFHDF_H_INCLUDE_DIR)
message(FATAL_ERROR "HDF4 Support specified, cannot find file mfhdf.h")
else()
include_directories(${MFHDF_H_INCLUDE_DIR})
endif()
find_library(HDF4_DF_LIB NAMES df libdf hdf)
if(NOT HDF4_DF_LIB)
message(FATAL_ERROR "Can't find or link to the hdf4 df library.")
endif()
find_library(HDF4_MFHDF_LIB NAMES mfhdf libmfhdf)
if(NOT HDF4_MFHDF_LIB)
message(FATAL_ERROR "Can't find or link to the hdf4 mfhdf library.")
endif()
set(HAVE_LIBMFHDF TRUE )
set(HDF4_LIBRARIES ${HDF4_DF_LIB} ${HDF4_MFHDF_LIB} )
# End include files, libraries.
message(STATUS "HDF4 libraries: ${HDF4_DF_LIB}, ${HDF4_MFHDF_LIB}")
message(STATUS "Seeking HDF4 jpeg dependency.")
# Look for the jpeglib.h header file.
find_path(JPEGLIB_H_INCLUDE_DIR jpeglib.h)
if(NOT JPEGLIB_H_INCLUDE_DIR)
message(FATAL_ERROR "HDF4 Support enabled but cannot find jpeglib.h")
else()
set(HAVE_JPEGLIB_H ON CACHE BOOL "")
set(HAVE_LIBJPEG TRUE )
include_directories(${JPEGLIB_H_INCLUDE_DIR})
endif()
find_library(JPEG_LIB NAMES jpeg libjpeg)
if(NOT JPEG_LIB)
message(FATAL_ERROR "HDF4 Support enabled but cannot find libjpeg")
endif()
set(HDF4_LIBRARIES ${JPEG_LIB} ${HDF4_LIBRARIES} )
message(STATUS "Found JPEG libraries: ${JPEG_LIB}")
# Option to enable HDF4 file tests.
option(ENABLE_HDF4_FILE_TESTS "Run HDF4 file tests. This fetches sample HDF4 files from the Unidata ftp site to test with (requires curl)." ON)
if(ENABLE_HDF4_FILE_TESTS)
find_program(PROG_CURL NAMES curl)
if(PROG_CURL)
set(USE_HDF4_FILE_TESTS ON )
else()
message(STATUS "Unable to locate 'curl'. Disabling hdf4 file tests.")
set(USE_HDF4_FILE_TESTS OFF )
endif()
set(USE_HDF4_FILE_TESTS ${USE_HDF4_FILE_TESTS} )
endif()
endif()
################################
# HDF5
################################
##
# Option to Enable HDF5
#
# The HDF5 cmake variables differ between platform (linux/osx and Windows),
# as well as between HDF5 versions. As a result, this section is a bit convoluted.
#
# Note that the behavior seems much more stable across HDF5 versions under linux,
# so we do not have to do as much version-based tweaking.
#
# At the end of it, we should have the following defined:
#
# * HDF5_C_LIBRARY
# * HDF5_HL_LIBRARY
# * HDF5_LIBRARIES
# * HDF5_INCLUDE_DIR
# *
##
if(USE_HDF5)
##
# Assert HDF5 version meets minimum required version.
##
set(HDF5_VERSION_REQUIRED 1.8.10)
##
# Accommodate developers who have hdf5 libraries and
# headers on their system, but do not have a the hdf
# .cmake files. If this is the case, they should
# specify HDF5_HL_LIBRARY, HDF5_LIBRARY, HDF5_INCLUDE_DIR manually.
#
# This script will attempt to determine the version of the HDF5 library programatically.
##
if(HDF5_C_LIBRARY AND HDF5_HL_LIBRARY AND HDF5_INCLUDE_DIR)
set(HDF5_LIBRARIES ${HDF5_C_LIBRARY} ${HDF5_HL_LIBRARY} )
set(HDF5_C_LIBRARIES ${HDF5_C_LIBRARY} )
set(HDF5_C_LIBRARY_hdf5 ${HDF5_C_LIBRARY} )
set(HDF5_HL_LIBRARIES ${HDF5_HL_LIBRARY} )
include_directories(${HDF5_INCLUDE_DIR})
message(STATUS "Using HDF5 C Library: ${HDF5_C_LIBRARY}")
message(STATUS "Using HDF5 HL LIbrary: ${HDF5_HL_LIBRARY}")
if (EXISTS "${HDF5_INCLUDE_DIR}/H5pubconf.h")
file(READ "${HDF5_INCLUDE_DIR}/H5pubconf.h" _hdf5_version_lines
REGEX "#define[ \t]+H5_VERSION")
string(REGEX REPLACE ".*H5_VERSION .*\"\(.*\)\".*" "\\1" _hdf5_version "${_hdf5_version_lines}")
set(HDF5_VERSION "${_hdf5_version}" CACHE STRING "")
set(HDF5_VERSION ${HDF5_VERSION} PARENT_SCOPE)
unset(_hdf5_version)
unset(_hdf5_version_lines)
endif ()
message(STATUS "Found HDF5 libraries version ${HDF5_VERSION}")
###
# If HDF5_VERSION is still empty, we have a problem.
# Error out.
###
if("${HDF5_VERSION}" STREQUAL "")
message(FATAL_ERR "Unable to determine HDF5 version. NetCDF requires at least version ${HDF5_VERSION_REQUIRED}. Please ensure that libhdf5 is installed and accessible.")
endif()
###
# Now that we know HDF5_VERSION isn't empty, we can check for minimum required version,
# and toggle various options.
###
if(${HDF5_VERSION} VERSION_LESS ${HDF5_VERSION_REQUIRED})
message(FATAL_ERROR "netCDF requires at least HDF5 ${HDF5_VERSION_REQUIRED}. Found ${HDF5_VERSION}.")
endif()
else(HDF5_C_LIBRARY AND HDF5_HL_LIBRARY AND HDF5_INCLUDE_DIR) # We are seeking out HDF5 with Find Package.
###
# For now we assume that if we are building netcdf
# as a shared library, we will use hdf5 as a shared
# library. If we are building netcdf statically,
# we will use a static library. This can be toggled
# by explicitly modifying NC_FIND_SHARED_LIBS.
##
if(NC_FIND_SHARED_LIBS)
set(NC_HDF5_LINK_TYPE "shared")
set(NC_HDF5_LINK_TYPE_UPPER "SHARED")
ADD_DEFINITIONS(-DH5_BUILT_AS_DYNAMIC_LIB)
else(NC_FIND_SHARED_LIBS)
set(NC_HDF5_LINK_TYPE "static")
set(NC_HDF5_LINK_TYPE_UPPER "STATIC")
ADD_DEFINITIONS(-DH5_BUILT_AS_STATIC_LIB )
endif(NC_FIND_SHARED_LIBS)
#####
# First, find the C and HL libraries.
#
# This has been updated to reflect what is in the hdf5
# examples, even though the previous version of what we
# had worked.
#####
if(MSVC)
set(SEARCH_PACKAGE_NAME ${HDF5_PACKAGE_NAME})
find_package(HDF5 NAMES ${SEARCH_PACKAGE_NAME} COMPONENTS C HL CONFIG REQUIRED ${NC_HDF5_LINK_TYPE})
else(MSVC)
find_package(HDF5 COMPONENTS C HL REQUIRED)
endif(MSVC)
##
# Next, check the HDF5 version. This will inform which
# HDF5 variables we need to munge.
##
# Some versions of HDF5 set HDF5_VERSION_STRING instead of HDF5_VERSION
if(HDF5_VERSION_STRING AND NOT HDF5_VERSION)
set(HDF5_VERSION ${HDF5_VERSION_STRING})
endif()
###
# If HDF5_VERSION is undefined, attempt to determine it programatically.
###
if("${HDF5_VERSION}" STREQUAL "")
message(STATUS "HDF5_VERSION not detected. Attempting to determine programatically.")
IF (EXISTS "${HDF5_INCLUDE_DIR}/H5pubconf.h")
file(READ "${HDF5_INCLUDE_DIR}/H5pubconf.h" _hdf5_version_lines
REGEX "#define[ \t]+H5_VERSION")
string(REGEX REPLACE ".*H5_VERSION .*\"\(.*\)\".*" "\\1" _hdf5_version "${_hdf5_version_lines}")
set(HDF5_VERSION "${_hdf5_version}" CACHE STRING "")
unset(_hdf5_version)
unset(_hdf5_version_lines)
message(STATUS "Found HDF5 libraries version ${HDF5_VERSION}")
endif()
else()
set(HDF5_VERSION ${HDF5_VERSION} CACHE STRING "")
endif()
###
# If HDF5_VERSION is still empty, we have a problem.
# Error out.
###
if("${HDF5_VERSION}" STREQUAL "")
message(FATAL_ERR "Unable to determine HDF5 version. NetCDF requires at least version ${HDF5_VERSION_REQUIRED}. Please ensure that libhdf5 is installed and accessible.")
endif()
###
# Now that we know HDF5_VERSION isn't empty, we can check for minimum required version,
# and toggle various options.
###
if(${HDF5_VERSION} VERSION_LESS ${HDF5_VERSION_REQUIRED})
message(FATAL_ERROR "netCDF requires at least HDF5 ${HDF5_VERSION_REQUIRED}. Found ${HDF5_VERSION}.")
endif()
##
# Include the HDF5 include directory.
##
if(HDF5_INCLUDE_DIRS AND NOT HDF5_INCLUDE_DIR)
set(HDF5_INCLUDE_DIR ${HDF5_INCLUDE_DIRS} )
endif()
message(STATUS "Using HDF5 include dir: ${HDF5_INCLUDE_DIR}")
include_directories(${HDF5_INCLUDE_DIR})
###
# This is the block where we figure out what the appropriate
# variables are, and we ensure that we end up with
# HDF5_C_LIBRARY, HDF5_HL_LIBRARY and HDF5_LIBRARIES.
###
if(MSVC)
####
# Environmental variables in Windows when using MSVC
# are a hot mess between versions.
####
##
# HDF5 1.8.15 defined HDF5_LIBRARIES.
##
if(${HDF5_VERSION} VERSION_LESS "1.8.16")
set(HDF5_C_LIBRARY hdf5 )
set(HDF5_C_LIBRARY_hdf5 hdf5 )
endif(${HDF5_VERSION} VERSION_LESS "1.8.16")
if(${HDF5_VERSION} VERSION_GREATER "1.8.15")
if(NOT HDF5_LIBRARIES AND HDF5_C_${NC_HDF5_LINK_TYPE_UPPER}_LIBRARY AND HDF5_HL_${NC_HDF5_LINK_TYPE_UPPER}_LIBRARY)
set(HDF5_C_LIBRARY ${HDF5_C_${NC_HDF5_LINK_TYPE_UPPER}_LIBRARY} )
set(HDF5_C_LIBRARY_hdf5 ${HDF5_C_${NC_HDF5_LINK_TYPE_UPPER}_LIBRARY} )
set(HDF5_HL_LIBRARY ${HDF5_HL_${NC_HDF5_LINK_TYPE_UPPER}_LIBRARY} )
set(HDF5_LIBRARIES ${HDF5_C_${NC_HDF5_LINK_TYPE_UPPER}_LIBRARY} ${HDF5_HL_${NC_HDF5_LINK_TYPE_UPPER}_LIBRARY} )
endif()
endif(${HDF5_VERSION} VERSION_GREATER "1.8.15")
else(MSVC)
# Depending on the install, either HDF5_hdf_library or
# HDF5_C_LIBRARIES may be defined. We must check for either.
if(HDF5_C_LIBRARIES AND NOT HDF5_hdf5_LIBRARY)
set(HDF5_hdf5_LIBRARY ${HDF5_C_LIBRARIES} )
endif()
# Some versions of find_package set HDF5_C_LIBRARIES, but not HDF5_C_LIBRARY
# We use HDF5_C_LIBRARY below, so need to make sure it is set.
if(HDF5_C_LIBRARIES AND NOT HDF5_C_LIBRARY)
set(HDF5_C_LIBRARY ${HDF5_C_LIBRARIES} )
endif()
# Same issue as above...
if(HDF5_HL_LIBRARIES AND NOT HDF5_HL_LIBRARY)
set(HDF5_HL_LIBRARY ${HDF5_HL_LIBRARIES} )
endif()
endif(MSVC)
if(NOT HDF5_C_LIBRARY)
set(HDF5_C_LIBRARY hdf5 )
endif()
endif(HDF5_C_LIBRARY AND HDF5_HL_LIBRARY AND HDF5_INCLUDE_DIR)
find_package(Threads)
# There is a missing case in the above code so default it
if(NOT HDF5_C_LIBRARY_hdf5 OR "${HDF5_C_LIBRARY_hdf5}" STREQUAL "" )
set(HDF5_C_LIBRARY_hdf5 "${HDF5_C_LIBRARY}" )
endif()
find_path(HAVE_HDF5_H hdf5.h PATHS ${HDF5_INCLUDE_DIR} NO_DEFAULT_PATH)
if(NOT HAVE_HDF5_H)
message(FATAL_ERROR "Compiling a test with hdf5 failed. Either hdf5.h cannot be found, or the log messages should be checked for another reason.")
else(NOT HAVE_HDF5_H)
include_directories(${HAVE_HDF5_H})
endif(NOT HAVE_HDF5_H)
include(cmake/check_hdf5.cmake)
# Check to ensure that HDF5 was built with zlib.
# This needs to be near the beginning since we
# need to know whether to add "-lz" to the symbol
# tests below.
check_hdf5_feature(HAVE_HDF5_ZLIB H5_HAVE_ZLIB_H)
if(NOT HAVE_HDF5_ZLIB)
message(FATAL_ERROR "HDF5 was built without zlib. Rebuild HDF5 with zlib.")
else()
# If user has specified the `ZLIB_LIBRARY`, use it; otherwise try to find...
if(NOT ZLIB_LIBRARY)
find_package(ZLIB)
if(ZLIB_FOUND)
set(ZLIB_LIBRARY ${ZLIB_LIBRARIES} )
else()
message(FATAL_ERROR "HDF5 Requires ZLIB, but cannot find libz.")
endif()
endif()
set(CMAKE_REQUIRED_LIBRARIES ${ZLIB_LIBRARY} ${CMAKE_REQUIRED_LIBRARIES} )
message(STATUS "HDF5 has zlib.")
endif()
# Check to see if H5Z_SZIP exists in HDF5_Libraries. If so, we must use szip library.
check_hdf5_feature(HAVE_H5Z_SZIP H5_HAVE_FILTER_SZIP)
####
# Check to see if HDF5 library is 1.10.6 or greater.
# Used to control path name conversion
####
if(${HDF5_VERSION} VERSION_GREATER "1.10.5")
set(HDF5_UTF8_PATHS ON )
else()
set(HDF5_UTF8_PATHS OFF )
endif()
message("-- HDF5_UTF8_PATHS (HDF5 version 1.10.6+): ${HDF5_UTF8_PATHS}")
# Find out if HDF5 was built with parallel support.
# Do that by checking for the targets H5Pget_fapl_mpiposx and
# H5Pget_fapl_mpio in ${HDF5_LIB}.
# H5Pset_fapl_mpiposix and H5Pget_fapl_mpiposix have been removed since HDF5 1.8.12.
# Use H5Pset_fapl_mpio and H5Pget_fapl_mpio, instead.
# CHECK_LIBRARY_EXISTS(${HDF5_C_LIBRARY_hdf5} H5Pget_fapl_mpiposix "" HDF5_IS_PARALLEL_MPIPOSIX)
CHECK_LIBRARY_EXISTS(${HDF5_C_LIBRARY_hdf5} H5Pget_fapl_mpio "" HDF5_IS_PARALLEL_MPIO)
if(HDF5_IS_PARALLEL_MPIO)
set(HDF5_PARALLEL ON)
else()
set(HDF5_PARALLEL OFF)
endif()
set(HDF5_PARALLEL ${HDF5_PARALLEL} )
#Check to see if HDF5 library has collective metadata APIs, (HDF5 >= 1.10.0)
CHECK_LIBRARY_EXISTS(${HDF5_C_LIBRARY_hdf5} H5Pset_all_coll_metadata_ops "" HDF5_HAS_COLL_METADATA_OPS)
if(HDF5_PARALLEL)
set(HDF5_CC h5pcc )
else()
set(HDF5_CC h5cc )
endif()
# Check to see if H5Dread_chunk is available
CHECK_LIBRARY_EXISTS(${HDF5_C_LIBRARY_hdf5} H5Dread_chunk "" HAS_READCHUNKS)
# Check to see if H5Pset_fapl_ros3 is available
CHECK_LIBRARY_EXISTS(${HDF5_C_LIBRARY_hdf5} H5Pset_fapl_ros3 "" HAS_HDF5_ROS3)
# Check to see if this is hdf5-1.10.3 or later.
if(HAS_READCHUNKS)
set(HDF5_SUPPORTS_PAR_FILTERS ON )
set(ENABLE_NCDUMPCHUNKS ON )
endif()
# Record if ROS3 Driver is available
if(HAS_HDF5_ROS3)
set(ENABLE_HDF5_ROS3 ON )
endif()
IF (HDF5_SUPPORTS_PAR_FILTERS)
set(HDF5_HAS_PAR_FILTERS TRUE CACHE BOOL "" )
set(HAS_PAR_FILTERS yes CACHE STRING "" )
else()
set(HDF5_HAS_PAR_FILTERS FALSE CACHE BOOL "" )
set(HAS_PAR_FILTERS no CACHE STRING "" )
endif()
find_path(HAVE_HDF5_H hdf5.h PATHS ${HDF5_INCLUDE_DIR} NO_DEFAULT_PATH)
if(NOT HAVE_HDF5_H)
message(FATAL_ERROR "Compiling a test with hdf5 failed. Either hdf5.h cannot be found, or the log messages should be checked for another reason.")
else(NOT HAVE_HDF5_H)
include_directories(${HAVE_HDF5_H})
endif(NOT HAVE_HDF5_H)
#option to include HDF5 High Level header file (hdf5_hl.h) in case we are not doing a make install
include_directories(${HDF5_HL_INCLUDE_DIR})
endif(USE_HDF5)
################################
# Curl
################################
# See if we have libcurl
find_package(CURL)
ADD_DEFINITIONS(-DCURL_STATICLIB=1)
include_directories(${CURL_INCLUDE_DIRS})
# Define a test flag for have curl library
if(CURL_LIBRARIES OR CURL_LIBRARY)
set(FOUND_CURL TRUE)
else()
set(FOUND_CURL FALSE)
endif()
set(FOUND_CURL ${FOUND_CURL} TRUE )
# Start disabling if curl not found
if(NOT FOUND_CURL)
message(WARNING "ENABLE_REMOTE_FUNCTIONALITY requires libcurl; disabling")
set(ENABLE_REMOTE_FUNCTIONALITY OFF CACHE BOOL "ENABLE_REMOTE_FUNCTIONALITY requires libcurl; disabling" FORCE )
endif()
set (CMAKE_REQUIRED_INCLUDES ${CURL_INCLUDE_DIRS})
# Check to see if we have libcurl 7.66 or later
CHECK_C_SOURCE_COMPILES("
#include <curl/curl.h>
int main() {
#if LIBCURL_VERSION_NUM < 0x074200
choke me;
#endif
}" HAVE_LIBCURL_766)
IF (HAVE_LIBCURL_766)
# If libcurl version is >= 7.66, then can skip tests
# for these symbols which were added in an earlier version
set(HAVE_CURLOPT_USERNAME TRUE)
set(HAVE_CURLOPT_PASSWORD TRUE)
set(HAVE_CURLOPT_KEYPASSWD TRUE)
set(HAVE_CURLINFO_RESPONSE_CODE TRUE)
set(HAVE_CURLINFO_HTTP_CONNECTCODE TRUE)
set(HAVE_CURLOPT_BUFFERSIZE TRUE)
set(HAVE_CURLOPT_KEEPALIVE TRUE)
else()
# Check to see if CURLOPT_USERNAME is defined.
# It is present starting version 7.19.1.
CHECK_C_SOURCE_COMPILES("
#include <curl/curl.h>
int main() {int x = CURLOPT_USERNAME;}" HAVE_CURLOPT_USERNAME)
# Check to see if CURLOPT_PASSWORD is defined.
# It is present starting version 7.19.1.
CHECK_C_SOURCE_COMPILES("
#include <curl/curl.h>
int main() {int x = CURLOPT_PASSWORD;}" HAVE_CURLOPT_PASSWORD)
# Check to see if CURLOPT_KEYPASSWD is defined.
# It is present starting version 7.16.4.
CHECK_C_SOURCE_COMPILES("
#include <curl/curl.h>
int main() {int x = CURLOPT_KEYPASSWD;}" HAVE_CURLOPT_KEYPASSWD)
# Check to see if CURLINFO_RESPONSE_CODE is defined.
# It showed up in curl 7.10.7.
CHECK_C_SOURCE_COMPILES("
#include <curl/curl.h>
int main() {int x = CURLINFO_RESPONSE_CODE;}" HAVE_CURLINFO_RESPONSE_CODE)
# Check to see if CURLINFO_HTTP_CONNECTCODE is defined.
# It showed up in curl 7.10.7.
CHECK_C_SOURCE_COMPILES("
#include <curl/curl.h>
int main() {int x = CURLINFO_HTTP_CONNECTCODE;}" HAVE_CURLINFO_HTTP_CONNECTCODE)
# Check to see if CURLOPT_BUFFERSIZE is defined.
# It is present starting version 7.59
CHECK_C_SOURCE_COMPILES("
#include <curl/curl.h>
int main() {int x = CURLOPT_BUFFERSIZE;}" HAVE_CURLOPT_BUFFERSIZE)
# Check to see if CURLOPT_TCP_KEEPALIVE is defined.
# It is present starting version 7.25
CHECK_C_SOURCE_COMPILES("
#include <curl/curl.h>
int main() {int x = CURLOPT_TCP_KEEPALIVE;}" HAVE_CURLOPT_KEEPALIVE)
endif()
################################
# Math
################################
# Check for the math library so it can be explicitly linked.
if(NOT WIN32)
find_library(HAVE_LIBM NAMES math m libm)
if(NOT HAVE_LIBM)
CHECK_FUNCTION_EXISTS(exp HAVE_LIBM_FUNC)
if(NOT HAVE_LIBM_FUNC)
message(FATAL_ERROR "Unable to find the math library.")
else(NOT HAVE_LIBM_FUNC)
set(HAVE_LIBM "")
endif()
else(NOT HAVE_LIBM)
message(STATUS "Found Math library: ${HAVE_LIBM}")
endif()
endif()
################################
# zlib
################################
# See if we have zlib
find_package(ZLIB)
# Define a test flag for have zlib library
if(ZLIB_FOUND)
include_directories(${ZLIB_INCLUDE_DIRS})
set(ENABLE_ZLIB TRUE)
else()
set(ENABLE_ZLIB FALSE)
endif()
################################
# Zips
################################
IF (ENABLE_FILTER_SZIP)
find_package(Szip)
elseif(ENABLE_NCZARR)
find_package(Szip)
endif()
IF (ENABLE_FILTER_BZ2)
find_package(Bz2)
endif()
IF (ENABLE_FILTER_BLOSC)
find_package(Blosc)
endif()
IF (ENABLE_FILTER_ZSTD)
find_package(Zstd)
endif()
# Accumulate standard filters
set(STD_FILTERS "deflate") # Always have deflate*/
set_std_filter(Szip)
set(HAVE_SZ ${Szip_FOUND})
set(USE_SZIP ${HAVE_SZ})
set_std_filter(Blosc)
if(Zstd_FOUND)
set_std_filter(Zstd)
set(HAVE_ZSTD ON)
endif()
if(Bz2_FOUND)
set_std_filter(Bz2)
else()
# The reason we use a local version is to support a more comples test case
message("libbz2 not found using built-in version")
set(HAVE_LOCAL_BZ2 ON)
set(HAVE_BZ2 ON CACHE BOOL "")
set(STD_FILTERS "${STD_FILTERS} bz2")
endif()
IF (ENABLE_NCZARR_ZIP)
find_package(Zip REQUIRED)
include_directories(${Zip_INCLUDE_DIRS})
endif ()
################################
# S3
################################
# Note we check for the library after checking for enable_s3
# because for some reason this screws up if we unconditionally test for sdk
# and it is not available. Fix someday
if(ENABLE_S3)
if(NOT ENABLE_S3_INTERNAL)
# See if aws-s3-sdk is available
find_package(AWSSDK REQUIRED COMPONENTS s3;transfer)
if(AWSSDK_FOUND)
set(ENABLE_S3_AWS ON CACHE BOOL "S3 AWS" FORCE)
include_directories(${AWSSDK_INCLUDE_DIR})
else(AWSSDK_FOUND)
set(ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE)
endif(AWSSDK_FOUND)
else(NOT ENABLE_S3_INTERNAL)
# Find crypto libraries required with testing with the internal s3 api.
#find_library(SSL_LIB NAMES ssl openssl)
find_package(OpenSSL REQUIRED)
if(NOT OpenSSL_FOUND)
message(FATAL_ERROR "Can't find an ssl library, required by S3_INTERNAL")
endif(NOT OpenSSL_FOUND)
#find_package(Crypto REQUIRED)
#if(NOT CRYPTO_LIB)
# message(FATAL_ERROR "Can't find a crypto library, required by S3_INTERNAL")
#endif(NOT CRYPTO_LIB)
endif(NOT ENABLE_S3_INTERNAL)
else()
set(ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE)
endif()
################################
# LibXML
################################
# see if we have libxml2
if(ENABLE_LIBXML2)
find_package(LibXml2)
if(LibXml2_FOUND)
set(HAVE_LIBXML2 TRUE)
include_directories(${LIBXML2_INCLUDE_DIRS})
set(XMLPARSER "libxml2")
else()
set(HAVE_LIBXML2 FALSE)
endif()
endif(ENABLE_LIBXML2)
################################
# MPI
################################
if(ENABLE_PARALLEL4 OR HDF5_PARALLEL)
find_package(MPI REQUIRED)
endif()
################################
# parallel IO
################################
if(ENABLE_PNETCDF)
find_library(PNETCDF NAMES pnetcdf)
find_path(PNETCDF_INCLUDE_DIR pnetcdf.h)
if(NOT PNETCDF)
message(STATUS "Cannot find PnetCDF library. Disabling PnetCDF support.")
set(USE_PNETCDF OFF CACHE BOOL "")
endif()
endif()
################################
# Doxygen
################################
if(ENABLE_DOXYGEN)
find_package(Doxygen REQUIRED)
endif()
################################
# NC_DPKG
################################
if (NETCDF_PACKAGE)
find_program(NC_DPKG NAMES dpkg)
endif()

0
cmake/modules/FindBlosc.cmake Executable file → Normal file
View File

View File

@ -2,7 +2,10 @@
#
# Szip_FOUND Set to true to indicate the szip library was found
# Szip_INCLUDE_DIRS The directory containing the header file szip/szip.h
# Szip_LIBRARIES The libraries needed to use the szip library
# Szip_LIBRARIES The libraries needed to use the szip library with the word "debug" and "optimized" when both are found
# Szip_RELEASE_LIBRARY The path to the Szip release library if available
# Szip_DEBUG_LIBRARY The path to the Szip debug library if available
# Szip_LIBRARY The path to a Szip library, preferentially release but fallback to debug
#
# To specify an additional directory to search, set Szip_ROOT.
#
@ -38,13 +41,17 @@ IF(Szip_INCLUDE_DIRS)
PATH_SUFFIXES Release ${CMAKE_LIBRARY_ARCHITECTURE} ${CMAKE_LIBRARY_ARCHITECTURE}/Release
PATHS ${Szip_LIBRARY_DIRS} NO_DEFAULT_PATH)
SET(Szip_LIBRARIES )
SET(Szip_LIBRARIES)
SET(Szip_LIBRARY)
IF(Szip_DEBUG_LIBRARY AND Szip_RELEASE_LIBRARY)
SET(Szip_LIBRARIES debug ${Szip_DEBUG_LIBRARY} optimized ${Szip_RELEASE_LIBRARY})
SET(Szip_LIBRARY ${Szip_RELEASE_LIBRARY})
ELSEIF(Szip_DEBUG_LIBRARY)
SET(Szip_LIBRARIES ${Szip_DEBUG_LIBRARY})
SET(Szip_LIBRARY ${Szip_DEBUG_LIBRARY})
ELSEIF(Szip_RELEASE_LIBRARY)
SET(Szip_LIBRARIES ${Szip_RELEASE_LIBRARY})
SET(Szip_LIBRARY ${Szip_RELEASE_LIBRARY})
ENDIF(Szip_DEBUG_LIBRARY AND Szip_RELEASE_LIBRARY)
IF(Szip_LIBRARIES)

0
cmake/modules/FindZip.cmake Executable file → Normal file
View File

View File

@ -0,0 +1,329 @@
################################
# Macros
################################
macro(set_std_filter filter)
# Upper case the filter name
string(TOUPPER "${filter}" upfilter)
string(TOLOWER "${filter}" downfilter)
if(ENABLE_FILTER_${upfilter})
# Define a test flag for filter
if(${filter}_FOUND)
include_directories(${${filter}_INCLUDE_DIRS})
set(ENABLE_${upfilter} TRUE)
set(HAVE_${upfilter} ON)
set(STD_FILTERS "${STD_FILTERS} ${downfilter}")
message(">>> Standard Filter: ${downfilter}")
else()
set(ENABLE_${upfilter} FALSE)
set(HAVE_${upfilter} OFF)
endif()
else()
set(HAVE_${upfilter} OFF)
endif()
endmacro(set_std_filter)
macro(getuname name flag)
execute_process(COMMAND "${UNAME}" "${flag}" OUTPUT_VARIABLE "${name}" OUTPUT_STRIP_TRAILING_WHITESPACE)
endmacro(getuname)
# A macro to check if a C linker supports a particular flag.
macro(CHECK_C_LINKER_FLAG M_FLAG M_RESULT)
set(T_REQ_FLAG "${CMAKE_REQUIRED_FLAGS}")
set(CMAKE_REQUIRED_FLAGS "${M_FLAG}")
CHECK_C_SOURCE_COMPILES("int main() {return 0;}" ${M_RESULT})
set(CMAKE_REQUIRED_FLAGS "${T_REQ_FLAG}")
endmacro()
# Macro for replacing '/MD' with '/MT'.
# Used only on Windows, /MD tells VS to use the shared
# CRT libs, MT tells VS to use the static CRT libs.
#
# Taken From:
# http://www.cmake.org/Wiki/CMake_FAQ#How_can_I_build_my_MSVC_application_with_a_static_runtime.3F
#
macro(specify_static_crt_flag)
set(vars
CMAKE_C_FLAGS
CMAKE_C_FLAGS_DEBUG
CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_MINSIZEREL
CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_CXX_FLAGS_RELWITHDEBINFO)
foreach(flag_var ${vars})
if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif()
endforeach()
foreach(flag_var ${vars})
message(STATUS " '${flag_var}': ${${flag_var}}")
endforeach()
message(STATUS "")
endmacro()
# Macro to append files to the EXTRA_DIST files.
# Note: can only be used in subdirectories because of the use of PARENT_SCOPE
set(EXTRA_DIST "")
macro(ADD_EXTRA_DIST files)
foreach(F ${files})
set(EXTRA_DIST ${EXTRA_DIST} ${CMAKE_CURRENT_SOURCE_DIR}/${F})
set(EXTRA_DIST ${EXTRA_DIST} PARENT_SCOPE)
endforeach()
endmacro()
macro(GEN_m4 filename)
set(fallbackdest "${CMAKE_CURRENT_SOURCE_DIR}/${filename}.c")
set(dest "${CMAKE_CURRENT_BINARY_DIR}/${filename}.c")
# If m4 isn't present, and the generated file doesn't exist,
# it cannot be generated and an error should be thrown.
if(NOT HAVE_M4)
if(NOT EXISTS ${fallbackdest})
message(FATAL_ERROR "m4 is required to generate ${filename}.c. Please install m4 so that it is on the PATH and try again.")
else()
set(dest ${fallbackdest})
endif()
else()
add_custom_command(
OUTPUT ${dest}
COMMAND ${NC_M4}
ARGS ${M4FLAGS} ${CMAKE_CURRENT_SOURCE_DIR}/${filename}.m4 > ${dest}
VERBATIM
)
endif()
endmacro(GEN_m4)
# Binary tests, but ones which depend on value of 'TEMP_LARGE' being defined.
macro(add_bin_env_temp_large_test prefix F)
add_executable(${prefix}_${F} ${F}.c)
target_link_libraries(${prefix}_${F} netcdf)
IF(MSVC)
set_target_properties(${prefix}_${F}
PROPERTIES LINK_FLAGS_DEBUG " /NODEFAULTLIB:MSVCRT"
)
endif()
add_test(${prefix}_${F} bash "-c" "TEMP_LARGE=${CMAKE_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/${prefix}_${F}")
if(MSVC)
set_property(TARGET ${prefix}_${F} PROPERTY FOLDER "tests")
set_target_properties(${prefix}_${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY
${CMAKE_CURRENT_BINARY_DIR})
set_target_properties(${prefix}_${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG
${CMAKE_CURRENT_BINARY_DIR})
set_target_properties(${prefix}_${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELEASE
${CMAKE_CURRENT_BINARY_DIR})
set_target_properties(${prefix}_${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO
${CMAKE_CURRENT_BINARY_DIR})
endif()
endmacro()
# Tests which are binary, but depend on a particular environmental variable.
macro(add_bin_env_test prefix F)
add_executable(${prefix}_${F} ${F}.c)
target_link_libraries(${prefix}_${F} netcdf)
if(MSVC)
set_target_properties(${prefix}_${F}
PROPERTIES LINK_FLAGS_DEBUG " /NODEFAULTLIB:MSVCRT"
)
endif()
add_test(${prefix}_${F} bash "-c" "TOPSRCDIR=${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/${prefix}_${F}")
if(MSVC)
set_property(TARGET ${prefix}_${F} PROPERTY FOLDER "tests")
endif()
endmacro()
# Build a binary used by a script, but don't make a test out of it.
macro(build_bin_test F)
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/${F}.c")
add_executable(${F} "${CMAKE_CURRENT_SOURCE_DIR}/${F}.c" ${ARGN})
else()
# File should have been copied to the binary directory
add_executable(${F} "${CMAKE_CURRENT_BINARY_DIR}/${F}.c" ${ARGN})
endif()
target_link_libraries(${F} netcdf ${ALL_TLL_LIBS})
if(MSVC)
set_target_properties(${F}
PROPERTIES LINK_FLAGS_DEBUG " /NODEFAULTLIB:MSVCRT"
)
set_target_properties(${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY
${CMAKE_CURRENT_BINARY_DIR})
set_target_properties(${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG
${CMAKE_CURRENT_BINARY_DIR})
set_target_properties(${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELEASE
${CMAKE_CURRENT_BINARY_DIR})
endif()
endmacro()
# Binary tests which are used by a script looking for a specific name.
macro(add_bin_test_no_prefix F)
build_bin_test(${F} ${ARGN})
add_test(${F} ${EXECUTABLE_OUTPUT_PATH}/${F})
if(MSVC)
set_property(TEST ${F} PROPERTY FOLDER "tests/")
set_target_properties(${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY
${CMAKE_CURRENT_BINARY_DIR})
set_target_properties(${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG
${CMAKE_CURRENT_BINARY_DIR})
set_target_properties(${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELEASE
${CMAKE_CURRENT_BINARY_DIR})
endif()
endmacro()
# Binary tests which are used by a script looking for a specific name.
macro(build_bin_test_no_prefix F)
build_bin_test(${F})
if(MSVC)
#SET_PROPERTY(TEST ${F} PROPERTY FOLDER "tests/")
set_target_properties(${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY
${CMAKE_CURRENT_BINARY_DIR})
set_target_properties(${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG
${CMAKE_CURRENT_BINARY_DIR})
set_target_properties(${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELEASE
${CMAKE_CURRENT_BINARY_DIR})
endif()
endmacro()
# Build a test and add it to the test list.
macro(add_bin_test prefix F)
add_executable(${prefix}_${F} ${F}.c ${ARGN})
target_link_libraries(${prefix}_${F}
${ALL_TLL_LIBS}
netcdf
)
if(MSVC)
set_target_properties(${prefix}_${F}
PROPERTIES LINK_FLAGS_DEBUG " /NODEFAULTLIB:MSVCRT"
)
endif()
add_test(${prefix}_${F}
${EXECUTABLE_OUTPUT_PATH}/${prefix}_${F}
)
if(MSVC)
set_property(TEST ${prefix}_${F} PROPERTY FOLDER "tests/")
set_target_properties(${prefix}_${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY
${CMAKE_CURRENT_BINARY_DIR})
set_target_properties(${prefix}_${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG
${CMAKE_CURRENT_BINARY_DIR})
set_target_properties(${prefix}_${F} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELEASE
${CMAKE_CURRENT_BINARY_DIR})
endif()
endmacro()
# A cmake script to print out information at the end of the configuration step.
macro(print_conf_summary)
message("")
message("")
message("Configuration Summary:")
message("")
message(STATUS "Building Shared Libraries: ${BUILD_SHARED_LIBS}")
message(STATUS "Building netCDF-4: ${ENABLE_NETCDF_4}")
message(STATUS "Building DAP2 Support: ${ENABLE_DAP2}")
message(STATUS "Building DAP4 Support: ${ENABLE_DAP4}")
message(STATUS "Building Byte-range Support: ${ENABLE_BYTERANGE}")
message(STATUS "Building Utilities: ${BUILD_UTILITIES}")
if(CMAKE_PREFIX_PATH)
message(STATUS "CMake Prefix Path: ${CMAKE_PREFIX_PATH}")
endif()
message("")
if(${STATUS_PNETCDF} OR ${STATUS_PARALLEL})
message("Building Parallel NetCDF")
message(STATUS "Using PnetCDF: ${STATUS_PNETCDF}")
message(STATUS "Using Parallel IO: ${STATUS_PARALLEL}")
message("")
endif()
message("Tests Enabled: ${ENABLE_TESTS}")
if(ENABLE_TESTS)
message(STATUS "DAP Remote Tests: ${ENABLE_DAP_REMOTE_TESTS}")
message(STATUS "Extra Tests: ${ENABLE_EXTRA_TESTS}")
message(STATUS "Coverage Tests: ${ENABLE_COVERAGE_TESTS}")
message(STATUS "Parallel Tests: ${ENABLE_PARALLEL_TESTS}")
message(STATUS "Large File Tests: ${ENABLE_LARGE_FILE_TESTS}")
message(STATUS "Extreme Numbers: ${ENABLE_EXTREME_NUMBERS}")
message(STATUS "Unit Tests: ${ENABLE_UNIT_TESTS}")
endif()
message("")
message("Compiler:")
message("")
message(STATUS "Build Type: ${CMAKE_BUILD_TYPE}")
message(STATUS "CMAKE_C_COMPILER: ${CMAKE_C_COMPILER}")
message(STATUS "CMAKE_C_FLAGS: ${CMAKE_C_FLAGS}")
if("${CMAKE_BUILD_TYPE}" STREQUAL "DEBUG")
message(STATUS "CMAKE_C_FLAGS_DEBUG: ${CMAKE_C_FLAGS_DEBUG}")
endif()
if("${CMAKE_BUILD_TYPE}" STREQUAL "RELEASE")
message(STATUS "CMAKE_C_FLAGS_RELEASE: ${CMAKE_C_FLAGS_RELEASE}")
endif()
message(STATUS "Linking against: ${ALL_TLL_LIBS}")
message("")
endmacro()
macro(add_sh_test prefix F)
if(HAVE_BASH)
add_test(${prefix}_${F} bash "-c" "export srcdir=${CMAKE_CURRENT_SOURCE_DIR};export TOPSRCDIR=${CMAKE_SOURCE_DIR};${CMAKE_CURRENT_BINARY_DIR}/${F}.sh ${ARGN}")
endif()
endmacro()
macro(getdpkg_arch arch)
execute_process(COMMAND "${NC_DPKG}" "--print-architecture" OUTPUT_VARIABLE "${arch}" OUTPUT_STRIP_TRAILING_WHITESPACE)
endmacro(getdpkg_arch)
################################
# Functions
################################
function(booleanize VALUE RETVAR)
# force case
string(TOLOWER "${VALUE}" LCVALUE)
# Now do all the comparisons
if(LCVALUE IN_LIST TRUELIST OR LCVALUE GREATER 0)
set(${RETVAR} TRUE PARENT_SCOPE)
elseif(LCVALUE IN_LIST FALSELIST OR LCVALUE MATCHES ".*-notfound" OR LCVALUE STREQUAL "")
set(${RETVAR} FALSE PARENT_SCOPE)
else()
set(${RETVAR} NOTFOUND PARENT_SCOPE)
endif()
endfunction()
# A function used to create autotools-style 'yes/no' definitions.
# If a variable is set, it 'yes' is returned. Otherwise, 'no' is
# returned.
#
# Also creates a version of the ret_val prepended with 'NC',
# when feature is true, which is used to generate netcdf_meta.h.
function(is_enabled feature ret_val)
if(${feature})
set(${ret_val} "yes" PARENT_SCOPE)
set("NC_${ret_val}" 1 PARENT_SCOPE)
else()
set(${ret_val} "no" PARENT_SCOPE)
set("NC_${ret_val}" 0 PARENT_SCOPE)
endif(${feature})
endfunction()
# A function used to create autotools-style 'yes/no' definitions.
# If a variable is set, it 'yes' is returned. Otherwise, 'no' is
# returned.
#
# Also creates a version of the ret_val prepended with 'NC',
# when feature is true, which is used to generate netcdf_meta.h.
function(is_disabled feature ret_val)
if(${feature})
set(${ret_val} "no" PARENT_SCOPE)
else()
set(${ret_val} "yes" PARENT_SCOPE)
set("NC_${ret_val}" 1 PARENT_SCOPE)
endif(${feature})
endfunction()

View File

@ -118,6 +118,12 @@ are set when opening a binary file on Windows. */
/* if true, build byte-range Client */
#cmakedefine ENABLE_BYTERANGE 1
/* if true, enable ERANGE fill */
#cmakedefine ENABLE_ERANGE_FILL 1
#ifdef ENABLE_ERANGE_FILL
#define ERANGE_FILL 1
#endif
/* if true, use hdf5 S3 virtual file reader */
#cmakedefine ENABLE_HDF5_ROS3 1
@ -462,9 +468,6 @@ with zip */
/* if true, HDF5 is at least version 1.10.5 and supports UTF8 paths */
#cmakedefine HDF5_UTF8_PATHS 1
/* if true, backtrace support will be used. */
#cmakedefine HAVE_EXECINFO_H 1
/* if true, include JNA bug fix */
#cmakedefine JNA 1

View File

@ -19,9 +19,9 @@ AC_INIT([netCDF],[4.9.3-development],[support-netcdf@unidata.ucar.edu],[netcdf-c
##
: ${CFLAGS=""}
AC_SUBST([NC_VERSION_MAJOR]) NC_VERSION_MAJOR=4
AC_SUBST([NC_VERSION_MINOR]) NC_VERSION_MINOR=9
AC_SUBST([NC_VERSION_PATCH]) NC_VERSION_PATCH=3
AC_SUBST([netCDF_VERSION_MAJOR]) netCDF_VERSION_MAJOR=4
AC_SUBST([netCDF_VERSION_MINOR]) netCDF_VERSION_MINOR=9
AC_SUBST([netCDF_VERSION_PATCH]) netCDF_VERSION_PATCH=3
AC_SUBST([NC_VERSION_NOTE]) NC_VERSION_NOTE="-development"
##
@ -857,129 +857,136 @@ AC_MSG_RESULT([${have_sz}])
##########
# See if we have libzip for NCZarr
AC_SEARCH_LIBS([zip_open],[zip zip.dll cygzip.dll],[have_zip=yes],[have_zip=no])
AC_MSG_CHECKING([whether libzip library is available])
AC_MSG_RESULT([${have_zip}])
enable_nczarr_zip=${have_zip} # alias
##
# Check to see if we're using NCZarr. If not, we don't need to check for dependencies and such.
##
if test "x$enable_nczarr" = xno ; then
enable_nczarr_zip=no
fi
AC_MSG_CHECKING([whether nczarr zip support is enabled])
AC_MSG_RESULT([${enable_nczarr_zip}])
if test "x$enable_nczarr_zip" = xyes ; then
AC_DEFINE([ENABLE_NCZARR_ZIP], [1], [If true, then libzip found])
fi
# Check for enabling of S3 support
AC_MSG_CHECKING([whether netcdf S3 support should be enabled])
AC_ARG_ENABLE([s3],
[AS_HELP_STRING([--enable-s3],
[enable netcdf S3 support])])
test "x$enable_s3" = xyes || enable_s3=no
AC_MSG_RESULT($enable_s3)
if test "x$enable_remote_functionality" = xno ; then
AC_MSG_WARN([--disable-remote-functionality => --disable-s3])
enable_s3=no
fi
# --enable-nczarr-s3 is synonym for --enable-s3 (but...)
AC_MSG_CHECKING([whether netcdf NCZarr S3 support should be enabled])
AC_ARG_ENABLE([nczarr-s3],
[AS_HELP_STRING([--enable-nczarr-s3],
[(Deprecated) enable netcdf NCZarr S3 support; Deprecated in favor of --enable-s3])])
AC_MSG_RESULT([$enable_nczarr_s3 (Deprecated) Please use --enable-s3)])
# Set enable_s3 instead of enable_nczarr_s3
if test "x$enable_s3" = xno && test "x$enable_nczarr_s3" = xyes && test "x$enable_remote_functionality" = xyes; then
enable_s3=yes # back compatibility
fi
unset enable_nczarr_s3
# Note we check for the library after checking for enable_s3
# because for some reason this fails if we unconditionally test for sdk
# and it is not available. Fix someday
S3LIBS=""
if test "x$enable_s3" = xyes ; then
# See if we have the s3 aws library
# Check for the AWS S3 SDK library
AC_LANG_PUSH([C++])
AC_CHECK_LIB([aws-c-common], [aws_string_destroy], [enable_s3_aws=yes],[enable_s3_aws=no])
if test "x$enable_s3_aws" = "xyes" ; then
S3LIBS="-laws-cpp-sdk-core -laws-cpp-sdk-s3"
fi
AC_LANG_POP
else
# See if we have libzip for NCZarr
AC_SEARCH_LIBS([zip_open],[zip zip.dll cygzip.dll],[have_zip=yes],[have_zip=no])
AC_MSG_CHECKING([whether libzip library is available])
AC_MSG_RESULT([${have_zip}])
enable_nczarr_zip=${have_zip} # alias
AC_MSG_CHECKING([whether nczarr zip support is enabled])
AC_MSG_RESULT([${enable_nczarr_zip}])
if test "x$enable_nczarr_zip" = xyes ; then
AC_DEFINE([ENABLE_NCZARR_ZIP], [1], [If true, then libzip found])
fi
# Check for enabling of S3 support
AC_MSG_CHECKING([whether netcdf S3 support should be enabled])
AC_ARG_ENABLE([s3],
[AS_HELP_STRING([--enable-s3],
[enable netcdf S3 support])])
test "x$enable_s3" = xyes || enable_s3=no
AC_MSG_RESULT($enable_s3)
if test "x$enable_remote_functionality" = xno ; then
AC_MSG_WARN([--disable-remote-functionality => --disable-s3])
enable_s3=no
fi
# --enable-nczarr-s3 is synonym for --enable-s3 (but...)
AC_MSG_CHECKING([whether netcdf NCZarr S3 support should be enabled])
AC_ARG_ENABLE([nczarr-s3],
[AS_HELP_STRING([--enable-nczarr-s3],
[(Deprecated) enable netcdf NCZarr S3 support; Deprecated in favor of --enable-s3])])
AC_MSG_RESULT([$enable_nczarr_s3 (Deprecated) Please use --enable-s3)])
# Set enable_s3 instead of enable_nczarr_s3
if test "x$enable_s3" = xno && test "x$enable_nczarr_s3" = xyes && test "x$enable_remote_functionality" = xyes; then
enable_s3=yes # back compatibility
fi
unset enable_nczarr_s3
# Note we check for the library after checking for enable_s3
# because for some reason this fails if we unconditionally test for sdk
# and it is not available. Fix someday
S3LIBS=""
if test "x$enable_s3" = xyes ; then
# See if we have the s3 aws library
# Check for the AWS S3 SDK library
AC_LANG_PUSH([C++])
AC_CHECK_LIB([aws-c-common], [aws_string_destroy], [enable_s3_aws=yes],[enable_s3_aws=no])
if test "x$enable_s3_aws" = "xyes" ; then
S3LIBS="-laws-cpp-sdk-core -laws-cpp-sdk-s3"
fi
AC_LANG_POP
else
enable_s3_aws=no
fi
AC_MSG_CHECKING([whether AWS S3 SDK library is available])
AC_MSG_RESULT([$enable_s3_aws])
# Check for enabling forced use of Internal S3 library
AC_MSG_CHECKING([whether internal S3 support should be used])
AC_ARG_ENABLE([s3-internal],
[AS_HELP_STRING([--enable-s3-internal],
[enable internal S3 support])])
test "x$enable_s3_internal" = xyes || enable_s3_internal=no
AC_MSG_RESULT($enable_s3_internal)
if test "x$enable_s3_aws" = xno && test "x$enable_s3_internal" = xno ; then
AC_MSG_WARN([No S3 library available => S3 support disabled])
enable_s3=no
fi
if test "x$enable_s3_aws" = xyes && test "x$enable_s3_internal" = xyes ; then
AC_MSG_WARN([Both aws-sdk-cpp and s3-internal enabled => use s3-internal.])
enable_s3_aws=no
fi
fi
AC_MSG_CHECKING([whether AWS S3 SDK library is available])
AC_MSG_RESULT([$enable_s3_aws])
if test "x$enable_s3_internal" = xyes ; then
if test "x$ISOSX" != xyes && test "x$ISMINGW" != xyes && test "x$ISMSVC" != xyes ; then
# Find crypto libraries if using ssl
AC_CHECK_LIB([ssl],[ssl_create_cipher_list])
AC_CHECK_LIB([crypto],[SHA256])
fi
fi
# Check for enabling forced use of Internal S3 library
AC_MSG_CHECKING([whether internal S3 support should be used])
AC_ARG_ENABLE([s3-internal],
[AS_HELP_STRING([--enable-s3-internal],
[enable internal S3 support])])
test "x$enable_s3_internal" = xyes || enable_s3_internal=no
AC_MSG_RESULT($enable_s3_internal)
# Check for enabling S3 testing
AC_MSG_CHECKING([what level of netcdf S3 testing should be enabled])
AC_ARG_WITH([s3-testing],
[AS_HELP_STRING([--with-s3-testing=yes|no|public],
[control netcdf S3 testing])],
[], [with_s3_testing=public])
AC_MSG_RESULT($with_s3_testing)
if test "x$enable_s3_aws" = xno && test "x$enable_s3_internal" = xno ; then
AC_MSG_WARN([No S3 library available => S3 support disabled])
enable_s3=no
fi
# Disable S3 tests if S3 support is disabled
if test "x$enable_s3" = xno ; then
if test "x$with_s3_testing" != xno ; then
AC_MSG_WARN([S3 support is disabled => no testing])
with_s3_testing=no
fi
fi
if test "x$enable_s3_aws" = xyes && test "x$enable_s3_internal" = xyes ; then
AC_MSG_WARN([Both aws-sdk-cpp and s3-internal enabled => use s3-internal.])
enable_s3_aws=no
fi
if test "x$enable_s3" = xyes ; then
AC_DEFINE([ENABLE_S3], [1], [if true, build netcdf-c with S3 support enabled])
fi
if test "x$enable_s3_internal" = xyes ; then
if test "x$ISOSX" != xyes && test "x$ISMINGW" != xyes && test "x$ISMSVC" != xyes ; then
# Find crypto libraries if using ssl
AC_CHECK_LIB([ssl],[ssl_create_cipher_list])
AC_CHECK_LIB([crypto],[SHA256])
fi
fi
if test "x$enable_s3_aws" = xyes ; then
LIBS="$LIBS$S3LIBS"
AC_DEFINE([ENABLE_S3_AWS], [1], [If true, then use aws S3 library])
fi
# Check for enabling S3 testing
AC_MSG_CHECKING([what level of netcdf S3 testing should be enabled])
AC_ARG_WITH([s3-testing],
[AS_HELP_STRING([--with-s3-testing=yes|no|public],
[control netcdf S3 testing])],
[], [with_s3_testing=public])
AC_MSG_RESULT($with_s3_testing)
if test "x$enable_s3_internal" = xyes ; then
AC_DEFINE([ENABLE_S3_INTERNAL], [1], [If true, then use internal S3 library])
fi
# Disable S3 tests if S3 support is disabled
if test "x$enable_s3" = xno ; then
if test "x$with_s3_testing" != xno ; then
AC_MSG_WARN([S3 support is disabled => no testing])
with_s3_testing=no
fi
fi
AC_DEFINE_UNQUOTED([WITH_S3_TESTING], [$with_s3_testing], [control S3 testing.])
if test "x$enable_s3" = xyes ; then
AC_DEFINE([ENABLE_S3], [1], [if true, build netcdf-c with S3 support enabled])
fi
if test "x$enable_s3_aws" = xyes ; then
LIBS="$LIBS$S3LIBS"
AC_DEFINE([ENABLE_S3_AWS], [1], [If true, then use aws S3 library])
fi
if test "x$enable_s3_internal" = xyes ; then
AC_DEFINE([ENABLE_S3_INTERNAL], [1], [If true, then use internal S3 library])
fi
AC_DEFINE_UNQUOTED([WITH_S3_TESTING], [$with_s3_testing], [control S3 testing.])
if test "x$with_s3_testing" = xyes ; then
AC_MSG_WARN([*** DO NOT SPECIFY WITH_S3_TESTING=YES UNLESS YOU HAVE ACCESS TO THE UNIDATA S3 BUCKET! ***])
AC_DEFINE([ENABLE_S3_TESTALL], [yes], [control S3 testing.])
if test "x$with_s3_testing" = xyes ; then
AC_MSG_WARN([*** DO NOT SPECIFY WITH_S3_TESTING=YES UNLESS YOU HAVE ACCESS TO THE UNIDATA S3 BUCKET! ***])
AC_DEFINE([ENABLE_S3_TESTALL], [yes], [control S3 testing.])
fi
fi
# Check whether we want to enable strict null byte header padding.
@ -1305,9 +1312,6 @@ AC_CHECK_HEADERS([sys/resource.h])
# See if we have ftw.h to walk directory trees
AC_CHECK_HEADERS([ftw.h])
# See if we can do stack tracing programmatically
AC_CHECK_HEADERS([execinfo.h])
# Check for these functions...
AC_CHECK_FUNCS([strlcat snprintf strcasecmp fileno \
strdup strtoll strtoull \

View File

@ -10,7 +10,11 @@ SET(CTEST_SOURCE_DIRECTORY "..")
SET(CTEST_BINARY_DIRECTORY ".")
set(CDASH_TOKEN $ENV{CDASH_TOKEN})
MESSAGE("Using cdash token: ${CDASH_TOKEN}")
IF (CDASH_TOKEN)
MESSAGE("CDASH TOKEN FOUND")
ELSE (CDASH_TOKEN)
MESSAGE("NO CDASH TOKEN FOUND")
ENDIF (CDASH_TOKEN)
SITE_NAME(local_site_name)

View File

@ -10,9 +10,13 @@ SET(CTEST_SOURCE_DIRECTORY "..")
SET(CTEST_BINARY_DIRECTORY ".")
set(CDASH_TOKEN $ENV{CDASH_TOKEN})
MESSAGE("Using cdash token: ${CDASH_TOKEN}")
IF (CDASH_TOKEN)
MESSAGE("CDASH TOKEN FOUND")
ELSE (CDASH_TOKEN)
MESSAGE("NO CDASH TOKEN FOUND")
ENDIF (CDASH_TOKEN)
SITE_NAME(local_site_name)
set(CTEST_SITE ${local_site_name})
@ -29,7 +33,7 @@ find_program(CTEST_GIT_COMMAND NAMES git)
find_program(CTEST_COVERAGE_COMMAND NAMES gcov)
find_program(CTEST_MEMORYCHECK_COMMAND NAMES valgrind)
set(CTEST_BUILD_OPTIONS "-DENABLE_COVERAGE_TESTS=TRUE -DENABLE_ERANGE_FILL=TRUE -DENABLE_LOGGING=TRUE -DENABLE_BYTERANGE=TRUE -DENABLE_LARGE_FILE_TESTS=FALSE")
set(CTEST_BUILD_OPTIONS "-DENABLE_COVERAGE_TESTS=FALSE -DENABLE_ERANGE_FILL=TRUE -DENABLE_LOGGING=TRUE -DENABLE_BYTERANGE=TRUE -DENABLE_LARGE_FILE_TESTS=FALSE")
set(CTEST_DROP_METHOD https)
@ -42,7 +46,6 @@ ctest_start("Experimental")
ctest_configure()
ctest_build()
ctest_test()
ctest_coverage()
if (NOT "${CDASH_TOKEN}" STREQUAL "")
ctest_submit(HTTPHEADER "Authorization: Bearer ${CDASH_TOKEN}")
endif()

View File

@ -0,0 +1,52 @@
###
# Standard CTest Script for testing netCDF.
# Requires a CDash Token.
#
# Set the CDASH_TOKEN environmental variable.
#
###
SET(CTEST_SOURCE_DIRECTORY "..")
SET(CTEST_BINARY_DIRECTORY ".")
set(CDASH_TOKEN $ENV{CDASH_TOKEN})
IF (CDASH_TOKEN)
MESSAGE("CDASH TOKEN FOUND")
ELSE (CDASH_TOKEN)
MESSAGE("NO CDASH TOKEN FOUND")
ENDIF (CDASH_TOKEN)
SITE_NAME(local_site_name)
set(CTEST_SITE ${local_site_name})
set(CTEST_BUILD_CONFIGURATION "Profiling")
set(CTEST_CMAKE_GENERATOR "Unix Makefiles")
set(CTEST_BUILD_NAME "default")
set(CTEST_BUILD_CONFIGURATION "Profiling")
set(CTEST_DROP_METHOD "https")
set(CTEST_DROP_SITE_CDASH TRUE)
set(CTEST_PROJECT_NAME netcdf-c)
find_program(CMAKE_COMMAND cmake)
find_program(CTEST_GIT_COMMAND NAMES git)
find_program(CTEST_COVERAGE_COMMAND NAMES gcov)
find_program(CTEST_MEMORYCHECK_COMMAND NAMES valgrind)
set(CTEST_BUILD_OPTIONS "-DENABLE_COVERAGE_TESTS=TRUE -DENABLE_ERANGE_FILL=TRUE -DENABLE_LOGGING=TRUE -DENABLE_BYTERANGE=TRUE -DENABLE_LARGE_FILE_TESTS=FALSE")
set(CTEST_DROP_METHOD https)
set(CTEST_DROP_SITE "cdash.unidata.ucar.edu:443")
set(CTEST_DROP_LOCATION "/submit.php?project=netcdf-c")
set(CTEST_CONFIGURE_COMMAND "${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE:STRING=${CTEST_BUILD_CONFIGURATION} ${CTEST_BUILD_OPTIONS} ${CTEST_SOURCE_DIRECTORY}")
ctest_start("Experimental")
ctest_configure()
ctest_build()
ctest_test()
ctest_coverage()
if (NOT "${CDASH_TOKEN}" STREQUAL "")
ctest_submit(HTTPHEADER "Authorization: Bearer ${CDASH_TOKEN}")
endif()

View File

@ -10,7 +10,11 @@ SET(CTEST_SOURCE_DIRECTORY "..")
SET(CTEST_BINARY_DIRECTORY ".")
set(CDASH_TOKEN $ENV{CDASH_TOKEN})
MESSAGE("Using cdash token: ${CDASH_TOKEN}")
IF (CDASH_TOKEN)
MESSAGE("CDASH TOKEN FOUND")
ELSE (CDASH_TOKEN)
MESSAGE("NO CDASH TOKEN FOUND")
ENDIF (CDASH_TOKEN)
SITE_NAME(local_site_name)

View File

@ -21,6 +21,7 @@ typedef int TDMR;
static NCbytes* input = NULL;
static NCbytes* output = NULL;
static NCD4meta* metadata = NULL;
static NCD4response* resp = NULL;
static char* infile = NULL;
static char* outfile = NULL;
static int ncid = 0;
@ -85,16 +86,21 @@ setup(int tdmr, int argc, char** argv)
if(translatenc4)
controller->controls.translation = NCD4_TRANSNC4;
NCD4_applyclientfragmentcontrols(controller);
if((metadata=NCD4_newmeta(controller))==NULL)
fail(NC_ENOMEM);
metadata->mode = mode;
NCD4_attachraw(metadata, ncbyteslength(input),ncbytescontents(input));
if((ret=NCD4_dechunk(metadata))) /* ok for mode == DMR or mode == DAP */
if((ret=NCD4_newMeta(controller,&metadata)))
fail(ret);
if((ret=NCD4_newResponse(controller,&resp)))
fail(ret);
resp->raw.size = ncbyteslength(input);
resp->raw.memory = ncbytescontents(input);
resp->mode = mode;
if((ret=NCD4_dechunk(resp))) /* ok for mode == DMR or mode == DAP */
fail(ret);
#ifdef DEBUG
{
int swap = (metadata->serial.hostbigendian != metadata->serial.remotebigendian);
int swap = (controller->platform.hostlittleendian != resp->remotelittleendian);
void* d = metadata->serial.dap;
size_t sz = metadata->serial.dapsize;
fprintf(stderr,"====================\n");

View File

@ -20,7 +20,7 @@ main(int argc, char** argv)
fprintf(stderr,"t_dmrmeta %s -> %s\n",infile,outfile);
#endif
if((ret = NCD4_parse(metadata))) goto done;
if((ret = NCD4_parse(metadata,resp,0))) goto done;
if((ret = NCD4_metabuild(metadata,ncid))) goto done;
done:

View File

@ -17,7 +17,7 @@ main(int argc, char** argv)
setup(TDMR_PARSE,argc,argv);
if((ret = NCD4_parse(metadata))) goto done;
if((ret = NCD4_parse(metadata,resp,0))) goto done;
ret = NCD4_print(metadata,output);
ncbytesnull(output);
if(ret == NC_NOERR) {

View File

@ -1269,15 +1269,6 @@ HTML_COLORSTYLE_SAT = 100
HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
# to YES can help to show when doxygen was last run and thus if the
# documentation is up to date.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = NO
# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
# documentation will contain a main index with vertical navigation menus that
# are dynamically created via JavaScript. If disabled, the navigation index will
@ -1865,14 +1856,6 @@ LATEX_HIDE_INDICES = NO
LATEX_BIB_STYLE = plain
# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
# page will contain the date and time when the page was generated. Setting this
# to NO can help when comparing the output of multiple runs.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_TIMESTAMP = NO
# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
# path from which the emoji images will be read. If a relative path is entered,
# it will be relative to the LATEX_OUTPUT directory. If left blank the

View File

@ -67,9 +67,9 @@ https://thredds-test.unidata.ucar.edu/thredds/fileServer/irma/metar/files/METAR_
# References {#nccloud_bib}
<a name="ref_aws">[1]</a> [Amazon Simple Storage Service Documentation](https://docs.aws.amazon.com/s3/index.html)<br>
<a name="ref_awssdk">[2]</a> [Amazon Simple Storage Service Library](https://github.com/aws/aws-sdk-cpp)<br>
<a name="ref_awssdk_conda">[11]</a> [Conda-forge / packages / aws-sdk-cpp](https://anaconda.org/conda-forge/aws-sdk-cpp)<br>
<a name="cloud_ref_aws">[1]</a> [Amazon Simple Storage Service Documentation](https://docs.aws.amazon.com/s3/index.html)<br>
<a name="cloud_ref_awssdk">[2]</a> [Amazon Simple Storage Service Library](https://github.com/aws/aws-sdk-cpp)<br>
<a name="cloud_ref_awssdk_conda">[11]</a> [Conda-forge / packages / aws-sdk-cpp](https://anaconda.org/conda-forge/aws-sdk-cpp)<br>
# Appendix A. S3 Build Support {#nccloud_s3build}

View File

@ -520,7 +520,7 @@ Modifying the dispatch version requires two steps:
The two should agree in value.
### NC_DISPATCH_VERSION Incompatibility
## NC_DISPATCH_VERSION Incompatibility
When dynamically adding a dispatch table
-- in nc_def_user_format (see libdispatch/dfile.c) --

View File

@ -65,6 +65,28 @@ The concept of a variable-sized type is defined as follows:
then that compound type is variable-sized.
4. All other types are fixed-size.
## A Warning on Backward Compatibility {#filters_compatibility}
The API defined in this document should accurately reflect the
current state of filters in the netCDF-c library. Be aware that
there was a short period in which the filter code was undergoing
some revision and extension. Those extensions have largely been
reverted. Unfortunately, some users may experience some
compilation problems for previously working code because of
these reversions. In that case, please revise your code to
adhere to this document. Apologies are extended for any
inconvenience.
A user may encounter an incompatibility if any of the following appears in user code.
* The function *\_nc\_inq\_var\_filter* was returning the error value NC\_ENOFILTER if a variable had no associated filters.
It has been reverted to the previous case where it returns NC\_NOERR and the returned filter id was set to zero if the variable had no filters.
* The function *nc\_inq\_var\_filterids* was renamed to *nc\_inq\_var\_filter\_ids*.
* Some auxilliary functions for parsing textual filter specifications have been moved to the file *netcdf\_aux.h*. See [Appendix A](#filters_appendixa).
* All of the "filterx" functions have been removed. This is unlikely to cause problems because they had limited visibility.
For additional information, see [Appendix B](#filters_appendixb).
## Enabling A HDF5 Compression Filter {#filters_enable}
HDF5 supports dynamic loading of compression filters using the
@ -611,7 +633,99 @@ As part of its testing, the NetCDF build process creates a number of shared libr
If you need a filter from that set, you may be able to set *HDF5\_PLUGIN\_PATH*
to point to that directory or you may be able to copy the shared libraries out of that directory to your own location.
## Debugging {#filters_debug}
# Lossy One-Way Filters
As of NetCDF version 4.8.2, the netcdf-c library supports
bit-grooming filters.
````
Bit-grooming is a lossy compression algorithm that removes the
bloat due to false-precision, those bits and bytes beyond the
meaningful precision of the data. Bit Grooming is statistically
unbiased, applies to all floating point numbers, and is easy to
use. Bit-Grooming reduces data storage requirements by
25-80%. Unlike its best-known competitor Linear Packing, Bit
Grooming imposes no software overhead on users, and guarantees
its precision throughout the whole floating point range
[https://doi.org/10.5194/gmd-9-3199-2016].
````
The generic term "quantize" is used to refer collectively to the various
precision-trimming algorithms. The key thing to note about quantization is that
it occurs at the point of writing of data only. Since its output is
legal data, it does not need to be "de-quantized" when the data is read.
Because of this, quantization is not part of the standard filter
mechanism and has a separate API.
The API for bit-groom is currently as follows.
````
int nc_def_var_quantize(int ncid, int varid, int quantize_mode, int nsd);
int nc_inq_var_quantize(int ncid, int varid, int *quantize_modep, int *nsdp);
````
The *quantize_mode* argument specifies the particular algorithm.
Currently, three are supported: NC_QUANTIZE_BITGROOM, NC_QUANTIZE_GRANULARBR,
and NC_QUANTIZE_BITROUND. In addition quantization can be disabled using
the value NC_NOQUANTIZE.
The input to ncgen or the output from ncdump supports special attributes
to indicate if quantization was applied to a given variable.
These attributes have the following form.
````
_QuantizeBitGroomNumberOfSignificantDigits = <NSD>
or
_QuantizeGranularBitRoundNumberOfSignificantDigits = <NSD>
or
_QuantizeBitRoundNumberOfSignificantBits = <NSB>
````
The value NSD is the number of significant (decimal) digits to keep.
The value NSB is the number of bits to keep in the fraction part of an
IEEE754 floating-point number. Note that NSB of QuantizeBitRound is the same as
"number of explicit mantissa bits" (https://doi.org/10.5194/gmd-9-3199-2016) and same as
the number of "keep-bits" (https://doi.org/10.5194/gmd-14-377-2021), but is not
one less than the number of significant bunary figures:
`_QuantizeBitRoundNumberOfSignificantBits = 0` means one significant binary figure,
`_QuantizeBitRoundNumberOfSignificantBits = 1` means two significant binary figures etc.
## Distortions introduced by lossy filters
Any lossy filter introduces distortions to data.
The lossy filters implemented in netcdf-c introduce a distortoin
that can be quantified in terms of a _relative_ error. The magnitude of
distortion introduced to every single value V is guaranteed to be within
a certain fraction of V, expressed as 0.5 * V * 2**{-NSB}:
i.e. it is 0.5V for NSB=0, 0.25V for NSB=1, 0.125V for NSB=2 etc.
Two other methods use different definitions of _decimal precision_, though both
are guaranteed to reproduce NSD decimals when printed.
The margin for a relative error introduced by the methods are summarised in the table
```
NSD 1 2 3 4 5 6 7
BitGroom
Error Margin 3.1e-2 3.9e-3 4.9e-4 3.1e-5 3.8e-6 4.7e-7 -
GranularBitRound
Error Margin 1.4e-1 1.9e-2 2.2e-3 1.4e-4 1.8e-5 2.2e-6 -
```
If one defines decimal precision as in BitGroom, i.e. the introduced relative
error must not exceed half of the unit at the decimal place NSD in the
worst-case scenario, the following values of NSB should be used for BitRound:
```
NSD 1 2 3 4 5 6 7
NSB 3 6 9 13 16 19 23
```
The resulting application of BitRound is as fast as BitGroom, and is free from
artifacts in multipoint statistics introduced by BitGroom
(see https://doi.org/10.5194/gmd-14-377-2021).
# Debugging {#filters_debug}
Depending on the debugger one uses, debugging plugins can be very difficult.
It may be necessary to use the old printf approach for debugging the filter itself.
@ -625,7 +739,7 @@ This can be accomplished using this command.
Since ncdump is not being asked to access the data (the -h flag), it can obtain the filter information without failures.
Then it can print out the filter id and the parameters as well as the Codecs (via the -s flag).
### Test Cases {#filters_TestCase}
## Test Cases {#filters_TestCase}
Within the netcdf-c source tree, the directory two directories contain test cases for testing dynamic filter operation.

View File

@ -8,13 +8,13 @@ This document attempts to record important information about
the internal architecture and operation of the netcdf-c library.
It covers the following issues.
* [Including C++ Code in the netcdf-c Library](#intern_c++)
* [Including C++ Code in the netcdf-c Library](#intern_cpp)
* [Managing instances of variable-length data types](#intern_vlens)
* [Inferring File Types](#intern_infer)
* [Adding a Standard Filter](#intern_filters)
* [Test Interference](#intern_isolation)
# 1. Including C++ Code in the netcdf-c Library {#intern_c++}
# 1. Including C++ Code in the netcdf-c Library {#intern_cpp}
The state of C compiler technology has reached the point where
it is possible to include C++ code into the netcdf-c library

View File

@ -13,7 +13,7 @@ The NetCDF homepage may be found at <a href="https://www.unidata.ucar.edu/softwa
You can find the documentation for netCDF-Fortran here:
- <a href="https://www.unidata.ucar.edu/software/netcdf/fortran/docs"> The NetCDF-Fortran Developer's Guide</a>
- <a href="https://docs.unidata.ucar.edu/netcdf-fortran/current/"> The NetCDF-Fortran Developer's Guide</a>
\section this_release Learn more about the current NetCDF-C Release

View File

@ -417,7 +417,7 @@ In order to accomodate existing implementations, certain mode tags are provided
## XArray
The Xarray <a href="#ref_xarray">[7]</a> Zarr implementation uses its own mechanism for specifying shared dimensions.
The Xarray [XArray Zarr Encoding Specification](http://xarray.pydata.org/en/latest/internals.html#zarr-encoding-specification) Zarr implementation uses its own mechanism for specifying shared dimensions.
It uses a special attribute named ''_ARRAY_DIMENSIONS''.
The value of this attribute is a list of dimension names (strings).
An example might be ````["time", "lon", "lat"]````.
@ -449,16 +449,16 @@ Here are a couple of examples using the _ncgen_ and _ncdump_ utilities.
```
4. Create an nczarr file using S3 as storage and keeping to the pure zarr format.
```
ncgen -4 -lb -o "s3://s3.uswest-1.amazonaws.com/datasetbucket#mode=zarr" dataset.cdl
ncgen -4 -lb -o 's3://s3.uswest-1.amazonaws.com/datasetbucket\#mode=zarr dataset.cdl
```
5. Create an nczarr file using the s3 protocol with a specific profile
```
ncgen -4 -lb -o "s3://datasetbucket/rootkey#mode=nczarr,awsprofile=unidata" dataset.cdl
ncgen -4 -lb -o 's3://datasetbucket/rootkey\#mode=nczarr,awsprofile=unidata' dataset.cdl
```
Note that the URL is internally translated to this
````
https://s2.&lt;region&gt.amazonaws.com/datasetbucket/rootkey#mode=nczarr,awsprofile=unidata" dataset.cdl
````
```
'https://s2.&lt;region&gt.amazonaws.com/datasetbucket/rootkey#mode=nczarr,awsprofile=unidata' dataset.cdl
```
# References {#nczarr_bib}
@ -473,7 +473,7 @@ collections — High-performance dataset datatypes](https://docs.python.org/2/li
<a name="dynamic_filter_loading">[8]</a> [Dynamic Filter Loading](https://support.hdfgroup.org/HDF5/doc/Advanced/DynamicallyLoadedFilters/HDF5DynamicallyLoadedFilters.pdf)<br>
<a name="official_hdf5_filters">[9]</a> [Officially Registered Custom HDF5 Filters](https://portal.hdfgroup.org/display/support/Registered+Filter+Plugins)<br>
<a name="blosc-c-impl">[10]</a> [C-Blosc Compressor Implementation](https://github.com/Blosc/c-blosc)<br>
<a name="ref_awssdk_conda">[11]</a> [Conda-forge / packages / aws-sdk-cpp](https://anaconda.org/conda-forge/aws-sdk-cpp)<br>
<a name="ref_awssdk_conda">[11]</a> [Conda-forge packages / aws-sdk-cpp](https://anaconda.org/conda-forge/aws-sdk-cpp)<br>
<a name="ref_gdal">[12]</a> [GDAL Zarr](https://gdal.org/drivers/raster/zarr.html)<br>
# Appendix A. Building NCZarr Support {#nczarr_build}
@ -539,7 +539,7 @@ PATH="$PATH:${AWSSDKBIN}"
Then the following options must be specified for cmake.
````
-DAWSSDK_ROOT_DIR=${AWSSDK_ROOT_DIR}
-DAWSSDK_DIR=${AWSSDK_ROOT_DIR}/lib/cmake/AWSSDK"
-DAWSSDK_DIR=${AWSSDK_ROOT_DIR}/lib/cmake/AWSSDK
````
# Appendix B. Amazon S3 Imposed Limits {#nczarr_s3limits}
@ -578,7 +578,7 @@ can in fact be any legal JSON expression.
This "convention" is currently used routinely to help support various
attributes created by other packages where the attribute is a
complex JSON expression. An example is the GDAL Driver
convention <a href="#ref_gdal">[12]</a>, where the value is a complex
convention <a href='#ref_gdal'>[12]</a>, where the value is a complex
JSON dictionary.
In order for NCZarr to be as consistent as possible with Zarr Version 2,

0
docs/testserver.dox Executable file → Normal file
View File

View File

@ -45,7 +45,7 @@ main()
/* Create a bunch of phoney data so we have something to write in
the example file. */
for (fp=(float *)temp, i=0; i<LAT_LEN*LON_LEN; i++)
*fp++ = 10. + i/10.;
*fp++ = 10.f + (float)i/10.f;
/* Now create the file in both formats with the same code. */
for (i=0; i<2; i++)

View File

@ -40,10 +40,10 @@
#define DEGREES_NORTH "degrees_north"
/* These are used to construct some example data. */
#define SAMPLE_PRESSURE 900
#define SAMPLE_TEMP 9.0
#define START_LAT 25.0
#define START_LON -125.0
#define SAMPLE_PRESSURE 900.0f
#define SAMPLE_TEMP 9.0f
#define START_LAT 25.0f
#define START_LON -125.0f
/* For the units attributes. */
#define UNITS "units"
@ -87,16 +87,16 @@ main()
* would have some real data to write, for example, model
* output. */
for (lat = 0; lat < NLAT; lat++)
lats[lat] = START_LAT + 5.*lat;
lats[lat] = START_LAT + 5.f*(float)lat;
for (lon = 0; lon < NLON; lon++)
lons[lon] = START_LON + 5.*lon;
lons[lon] = START_LON + 5.f*(float)lon;
for (lvl = 0; lvl < NLVL; lvl++)
for (lat = 0; lat < NLAT; lat++)
for (lon = 0; lon < NLON; lon++)
{
pres_out[lvl][lat][lon] = SAMPLE_PRESSURE + i;
temp_out[lvl][lat][lon] = SAMPLE_TEMP + i++;
pres_out[lvl][lat][lon] = SAMPLE_PRESSURE + (float)i;
temp_out[lvl][lat][lon] = SAMPLE_TEMP + (float)i++;
}
/* Create the file. */

View File

@ -35,10 +35,10 @@
#define DEGREES_NORTH "degrees_north"
/* These are used to construct some example data. */
#define SAMPLE_PRESSURE 900
#define SAMPLE_TEMP 9.0
#define START_LAT 25.0
#define START_LON -125.0
#define SAMPLE_PRESSURE 900.0f
#define SAMPLE_TEMP 9.0f
#define START_LAT 25.0f
#define START_LON -125.0f
/* Handle errors by printing an error message and exiting with a
* non-zero status. */
@ -78,15 +78,15 @@ main()
* would have some real data to write, for example, model
* output. */
for (lat = 0; lat < NLAT; lat++)
lats[lat] = START_LAT + 5.*lat;
lats[lat] = START_LAT + 5.f*(float)lat;
for (lon = 0; lon < NLON; lon++)
lons[lon] = START_LON + 5.*lon;
lons[lon] = START_LON + 5.f*(float)lon;
for (lat = 0; lat < NLAT; lat++)
for (lon = 0; lon < NLON; lon++)
{
pres_out[lat][lon] = SAMPLE_PRESSURE + (lon * NLAT + lat);
temp_out[lat][lon] = SAMPLE_TEMP + .25 * (lon * NLAT + lat);
pres_out[lat][lon] = SAMPLE_PRESSURE + (float)(lon * NLAT + lat);
temp_out[lat][lon] = SAMPLE_TEMP + .25f * (float)(lon * NLAT + lat);
}
/* Create the file. */

View File

@ -75,7 +75,7 @@ main()
if (!(cvc_out[i].data[j].p = calloc(sizeof(struct s1), cvc_out[i].data[j].len))) ERR;
for (k = 0; k < cvc_out[i].data[j].len; k++)
{
((struct s1 *)cvc_out[i].data[j].p)[k].x = 42.42;
((struct s1 *)cvc_out[i].data[j].p)[k].x = 42.42f;
((struct s1 *)cvc_out[i].data[j].p)[k].y = 2.0;
}
}

View File

@ -75,7 +75,7 @@ main()
if (!(vc_out[i].p = calloc(sizeof(struct s1), vc_out[i].len))) ERR;
for (k = 0; k < vc_out[i].len; k++)
{
((struct s1 *)vc_out[i].p)[k].x = 42.42;
((struct s1 *)vc_out[i].p)[k].x = 42.42f;
((struct s1 *)vc_out[i].p)[k].y = 2.0;
}
}

View File

@ -301,10 +301,10 @@ main()
/* Initialize data. */
for (i = 0; i < DIM6_LEN; i++)
{
obsdata[i].day = 15 * i + 1;
obsdata[i].elev = 2 * i + 1;
obsdata[i].day = 15 * (char)i + 1;
obsdata[i].elev = 2 * (short)i + 1;
obsdata[i].count = 2 * i + 1;
obsdata[i].relhum = 2.0 * i + 1;
obsdata[i].relhum = 2.0f * (float)i + 1;
obsdata[i].time = 2.0 * i + 1;
}
@ -452,9 +452,9 @@ main()
hr_data_out[i].starfleet_id = i;
hr_data_out[i].svc_rec.i1 = 95;
hr_data_out[i].svc_rec.i2 = 90;
if (sprintf(hr_data_out[i].name, "alien_%d", i) < 0) ERR;
hr_data_out[i].max_temp = 99.99;
hr_data_out[i].min_temp = -9.99;
if (snprintf(hr_data_out[i].name, sizeof(hr_data_out[i].name), "alien_%d", i) < 0) ERR;
hr_data_out[i].max_temp = 99.99f;
hr_data_out[i].min_temp = -9.99f;
hr_data_out[i].percent_transporter_errosion = .1;
}
@ -557,9 +557,9 @@ main()
hr_data_out[i].starfleet_id = i;
hr_data_out[i].svc_rec.i1 = 95;
hr_data_out[i].svc_rec.i2 = 90;
if (sprintf(hr_data_out[i].name, "alien_%d", i) < 0) ERR;
hr_data_out[i].max_temp = 99.99;
hr_data_out[i].min_temp = -9.99;
if (snprintf(hr_data_out[i].name, sizeof(hr_data_out[i].name), "alien_%d", i) < 0) ERR;
hr_data_out[i].max_temp = 99.99f;
hr_data_out[i].min_temp = -9.99f;
hr_data_out[i].percent_transporter_errosion = .1;
}
@ -665,9 +665,9 @@ main()
hr_data_out[i].starfleet_id = i;
hr_data_out[i].svc_rec.i1 = 95;
hr_data_out[i].svc_rec.i2 = 90;
if (sprintf(hr_data_out[i].name, "alien_%d", i) < 0) ERR;
hr_data_out[i].max_temp = 99.99;
hr_data_out[i].min_temp = -9.99;
if (snprintf(hr_data_out[i].name, sizeof(hr_data_out[i].name), "alien_%d", i) < 0) ERR;
hr_data_out[i].max_temp = 99.99f;
hr_data_out[i].min_temp = -9.99f;
hr_data_out[i].percent_transporter_errosion = .1;
}
@ -756,8 +756,8 @@ main()
/* Create some phony data. */
for (i = 0; i < DIM1_LEN; i++)
{
if (sprintf(hr_data_out[i].name, "alien_%d", i) < 0) ERR;
hr_data_out[i].max_temp = 99.99;
if (snprintf(hr_data_out[i].name, sizeof(hr_data_out[i].name), "alien_%d", i) < 0) ERR;
hr_data_out[i].max_temp = 99.99f;
}
/* Open file and get root group. */
@ -841,8 +841,8 @@ main()
/* Create some phony data. */
for (i = 0; i < DIM2_LEN; i++)
{
if (sprintf(hr_data_out[i].name, "alien_%d", i) < 0) ERR;
hr_data_out[i].max_temp = 99.99;
if (snprintf(hr_data_out[i].name, sizeof(hr_data_out[i].name), "alien_%d", i) < 0) ERR;
hr_data_out[i].max_temp = 99.99f;
}
/* Open file and get root group. */
@ -921,7 +921,7 @@ main()
{
for (j = 0; j < STR_LEN + 1; j++)
data_out[i].x[j] = 4;
data_out[i].y = 99.99;
data_out[i].y = 99.99f;
}
/* Set latest_format in access propertly list and

View File

@ -308,7 +308,7 @@ main()
if ((var1_spaceid = H5Screate_simple(1, dims, dims)) < 0) ERR;
for (v = 0; v < NUM_DATASETS; v++)
{
sprintf(var_name, "var_%d", v);
snprintf(var_name, sizeof(var_name), "var_%d", v);
if ((var1_datasetid[v] = H5Dcreate1(grpid, var_name, H5T_NATIVE_INT,
var1_spaceid, H5P_DEFAULT)) < 0) ERR;
if (H5DSattach_scale(var1_datasetid[v], dimscaleid, 0) < 0) ERR;

View File

@ -633,6 +633,7 @@ main()
hsize_t h5dimlen[DIMS2], h5dimlenmax[DIMS2], xtend_size[DIMS2] = {1, NUM_VALS};
hsize_t start[DIMS2] = {0, 0};
hsize_t count[DIMS2] = {1, NUM_VALS};
hsize_t ones[DIMS2] = {1, 1};
double value[NUM_VALS];
int dataset_ndims;
int i;
@ -661,7 +662,7 @@ main()
/* Set up the file and memory spaces. */
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET,
start, NULL, count, NULL) < 0) ERR;
start, NULL, ones, count) < 0) ERR;
if ((mem_spaceid = H5Screate_simple(DIMS2, count, NULL)) < 0) ERR;
/* Write a slice of data. */
@ -683,7 +684,7 @@ main()
/* Set up the file and memory spaces for a second slice. */
start[0]++;
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET,
start, NULL, count, NULL) < 0) ERR;
start, NULL, ones, count) < 0) ERR;
if ((mem_spaceid = H5Screate_simple(DIMS2, count, NULL)) < 0) ERR;
/* Write a second slice of data. */

View File

@ -93,7 +93,7 @@ main()
spaceid, create_propid)) < 0) ERR;
if (H5Sclose(spaceid) < 0) ERR;
if (H5Pclose(create_propid) < 0) ERR;
sprintf(dimscale_wo_var, "%s%10d", DIM_WITHOUT_VARIABLE, DIM_LEN);
snprintf(dimscale_wo_var, sizeof(dimscale_wo_var), "%s%10d", DIM_WITHOUT_VARIABLE, DIM_LEN);
if (H5DSset_scale(dimscaleid, dimscale_wo_var) < 0) ERR;
/* Create a variable that uses this dimension scale. */

View File

@ -154,7 +154,7 @@ main()
char lang[NUM_LANG][STR_LEN + 1] = {"C", "Fortran", "C++", "MISSING"};
enum langs {CLANG=0, Fortran=1, CPP=2, MISSING=255};
short the_value, fill_value = MISSING, data_point = CLANG;
hsize_t start[1] = {1}, count[1] = {1};
hsize_t start[1] = {1}, count[1] = {1}, one[1] = {1};
int num_members;
size_t size;
hid_t base_hdf_typeid;
@ -197,7 +197,7 @@ main()
if ((mem_spaceid = H5Screate(H5S_SCALAR)) < 0) ERR;
if ((file_spaceid = H5Screate_simple(1, dims, NULL)) < 0) ERR;
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET,
start, NULL, count, NULL) < 0) ERR;
start, NULL, one, count) < 0) ERR;
if (H5Dwrite(datasetid, typeid, mem_spaceid, file_spaceid,
H5P_DEFAULT, &data_point) < 0) ERR;

View File

@ -174,7 +174,7 @@ main()
#define MILLION 1000000
hid_t fileid, write_spaceid, datasetid, mem_spaceid;
hsize_t start[NDIMS], count[NDIMS];
hsize_t start[NDIMS], count[NDIMS], ones[NDIMS];
hsize_t dims[1];
int *data;
int num_steps;
@ -210,8 +210,9 @@ main()
{
/* Select hyperslab for write of one slice. */
start[0] = s * SC;
ones[0] = 1;
if (H5Sselect_hyperslab(write_spaceid, H5S_SELECT_SET,
start, NULL, count, NULL) < 0) ERR;
start, NULL, ones, count) < 0) ERR;
if (H5Dwrite(datasetid, H5T_NATIVE_INT, mem_spaceid, write_spaceid,
H5P_DEFAULT, data) < 0) ERR;
@ -242,13 +243,13 @@ main()
hid_t mem_spaceid, xfer_plistid, native_typeid;
hsize_t *chunksize, dims[1], maxdims[1], *dimsize, *maxdimsize;
hsize_t fdims[MAX_DIMS], fmaxdims[MAX_DIMS];
hsize_t start[MAX_DIMS], count[MAX_DIMS];
hsize_t start[MAX_DIMS], count[MAX_DIMS], ones[MAX_DIMS];
char file_name[STR_LEN + 1];
char dimscale_wo_var[STR_LEN];
void *bufr;
void *fillp = NULL;
sprintf(file_name, "%s/%s", TEMP_LARGE, FILE_NAME);
snprintf(file_name, sizeof(file_name), "%s/%s", TEMP_LARGE, FILE_NAME);
/* Create file access and create property lists. */
if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) ERR;
@ -285,7 +286,7 @@ main()
spaceid, plistid)) < 0) ERR;
if (H5Sclose(spaceid) < 0) ERR;
if (H5Pclose(plistid) < 0) ERR;
sprintf(dimscale_wo_var, "%s%10d", DIM_WITHOUT_VARIABLE, DIM1);
snprintf(dimscale_wo_var, sizeof(dimscale_wo_var), "%s%10d", DIM_WITHOUT_VARIABLE, DIM1);
if (H5DSset_scale(dim1_dimscaleid, dimscale_wo_var) < 0) ERR;
/* Create the dim2 dimscale. */
@ -299,7 +300,7 @@ main()
spaceid, plistid)) < 0) ERR;
if (H5Sclose(spaceid) < 0) ERR;
if (H5Pclose(plistid) < 0) ERR;
sprintf(dimscale_wo_var, "%s%10d", DIM_WITHOUT_VARIABLE, DIM2);
snprintf(dimscale_wo_var, sizeof(dimscale_wo_var), "%s%10d", DIM_WITHOUT_VARIABLE, DIM2);
if (H5DSset_scale(dim2_dimscaleid, dimscale_wo_var) < 0) ERR;
/* Now create the 2D dataset. */
@ -342,7 +343,8 @@ main()
start[1] = 0;
count[0] = 1;
count[1] = 2097153;
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET, start, NULL, count, NULL) < 0) ERR;
ones[0] = ones[1] = 1;
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET, start, NULL, ones, count) < 0) ERR;
if ((mem_spaceid = H5Screate_simple(NDIMS2, count, NULL)) < 0) ERR;
if ((xfer_plistid = H5Pcreate(H5P_DATASET_XFER)) < 0) ERR;
if ((native_typeid = H5Tget_native_type(H5T_NATIVE_SCHAR, H5T_DIR_DEFAULT)) < 0) ERR;

View File

@ -67,7 +67,7 @@ main()
/* Create the variables. */
for (v = 0; v < NUM_DATASETS; v++)
{
sprintf(var_name, "var_%d", v);
snprintf(var_name, sizeof(var_name), "var_%d", v);
/* printf("creating var %s\n", var_name);*/
if ((datasetid[v] = H5Dcreate1(grpid, var_name, H5T_NATIVE_INT,
spaceid, plistid)) < 0) ERR_RET;

View File

@ -66,7 +66,7 @@ main(int argc, char **argv)
printf("*** Creating file for parallel I/O read, and rereading it...");
{
hid_t fapl_id, fileid, whole_spaceid, dsid, slice_spaceid, whole_spaceid1, xferid;
hsize_t start[NDIMS], count[NDIMS];
hsize_t start[NDIMS], count[NDIMS], ones[NDIMS];
hsize_t dims[1];
int data[SC1], data_in[SC1];
int num_steps;
@ -126,8 +126,9 @@ main(int argc, char **argv)
/* Select hyperslab for write of one slice. */
start[0] = s * SC1 * p + my_rank * SC1;
count[0] = SC1;
ones[0] = 1;
if (H5Sselect_hyperslab(whole_spaceid, H5S_SELECT_SET,
start, NULL, count, NULL) < 0) ERR;
start, NULL, ones, count) < 0) ERR;
if (H5Dwrite(dsid, H5T_NATIVE_INT, slice_spaceid, whole_spaceid,
xferid, data) < 0) ERR;
@ -185,8 +186,9 @@ main(int argc, char **argv)
/* Select hyperslab for read of one slice. */
start[0] = s * SC1 * p + my_rank * SC1;
count[0] = SC1;
ones[0] = 1;
if (H5Sselect_hyperslab(whole_spaceid1, H5S_SELECT_SET,
start, NULL, count, NULL) < 0)
start, NULL, ones, count) < 0)
{
ERR;
return 2;

View File

@ -51,7 +51,7 @@ main(int argc, char **argv)
{
hid_t fapl_id, fileid, whole_spaceid, dsid, slice_spaceid, whole_spaceid1, xferid;
hid_t plistid;
hsize_t start[NDIMS], count[NDIMS];
hsize_t start[NDIMS], count[NDIMS], ones[NDIMS];
hsize_t dims[1], chunksize = SC1;
int data[SC1], data_in[SC1];
int num_steps;
@ -120,8 +120,9 @@ main(int argc, char **argv)
/* Select hyperslab for write of one slice. */
start[0] = s * SC1 * p + my_rank * SC1;
count[0] = SC1;
ones[0] = 1;
if (H5Sselect_hyperslab(whole_spaceid, H5S_SELECT_SET,
start, NULL, count, NULL) < 0) ERR;
start, NULL, ones, count) < 0) ERR;
if (H5Dwrite(dsid, H5T_NATIVE_INT, slice_spaceid, whole_spaceid,
xferid, data) < 0) ERR;
@ -160,8 +161,9 @@ main(int argc, char **argv)
/* Select hyperslab for read of one slice. */
start[0] = s * SC1 * p + my_rank * SC1;
count[0] = SC1;
ones[0] = 1;
if (H5Sselect_hyperslab(whole_spaceid1, H5S_SELECT_SET,
start, NULL, count, NULL) < 0)
start, NULL, ones, count) < 0)
{
ERR;
return 2;

View File

@ -30,6 +30,7 @@ main()
hid_t file_spaceid, mem_spaceid;
hsize_t dims[1] = {0}, max_dims[1] = {H5S_UNLIMITED}, chunk_dims[1] = {1};
hsize_t xtend_size[NDIMS] = {2}, start[NDIMS] = {1}, count[NDIMS] = {1};
hsize_t ones[NDIMS] = {1};
/* void *fillp;*/
char *data = "A man who carries a cat by the tail learns "
"something he can learn in no other way.";
@ -91,7 +92,7 @@ main()
/* Select space in file to write a record. */
if ((file_spaceid = H5Dget_space(datasetid)) < 0) ERR;
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET,
start, NULL, count, NULL) < 0) ERR;
start, NULL, ones, count) < 0) ERR;
/* Select space in memory to read from. */
if ((mem_spaceid = H5Screate_simple(NDIMS, count, NULL)) < 0) ERR;
@ -126,7 +127,7 @@ main()
hid_t typeid, datasetid, plistid;
hid_t file_spaceid, mem_spaceid;
hsize_t dims[1] = {2}, chunk_dims[1] = {1};
hsize_t start[NDIMS] = {1}, count[NDIMS] = {1};
hsize_t start[NDIMS] = {1}, count[NDIMS] = {1}, ones[NDIMS] = {1};
/* void *fillp;*/
char *data = "A man who carries a cat by the tail learns "
"something he can learn in no other way.";
@ -179,7 +180,7 @@ To be good is noble; but to show others how to be good is nobler and no trouble.
/* Select space in file to write a record. */
if ((file_spaceid = H5Dget_space(datasetid)) < 0) ERR;
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET,
start, NULL, count, NULL) < 0) ERR;
start, NULL, ones, count) < 0) ERR;
/* Select space in memory to read from. */
if ((mem_spaceid = H5Screate_simple(NDIMS, count, NULL)) < 0)

View File

@ -69,14 +69,14 @@ main()
float float_data_out[LAT_LEN][LON_LEN];
hsize_t dims[NDIMS], max_dims[NDIMS];
hsize_t dims_in[NDIMS], max_dims_in[NDIMS];
hsize_t start[MAX_DIMS], count[MAX_DIMS];
hsize_t start[MAX_DIMS], count[MAX_DIMS], ones[MAX_DIMS];
int lat, lon;
/* Set up some phoney data, 1 record's worth. In C, first
* dimension varies most slowly. */
for (lat = 0; lat < LAT_LEN; lat++)
for (lon = 0; lon < LON_LEN; lon++)
float_data_out[lat][lon] = -666.666;
float_data_out[lat][lon] = -666.666f;
/* Create file and group. */
if ((fileid = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT,
@ -153,8 +153,9 @@ main()
start[0] = 1;
start[1] = 0;
start[2] = 0;
ones[0] = ones[1] = ones[2] = 1;
if (H5Sselect_hyperslab(write_spaceid, H5S_SELECT_SET,
start, NULL, count, NULL) < 0) ERR;
start, NULL, ones, count) < 0) ERR;
/* Write second record of data to each dataset. */
if (H5Dwrite(pres_dsid, H5T_IEEE_F32LE, mem_spaceid, write_spaceid,
@ -517,7 +518,7 @@ main()
"szip_and_zlib"};
/* Open file and create group. */
sprintf(file_name, "%s_%s.h5", TEST_NAME, desc[f]);
snprintf(file_name, sizeof(file_name), "%s_%s.h5", TEST_NAME, desc[f]);
if ((fileid = H5Fcreate(file_name, H5F_ACC_TRUNC, H5P_DEFAULT,
H5P_DEFAULT)) < 0) ERR;
if ((grpid = H5Gcreate1(fileid, GRP_NAME, 0)) < 0) ERR;

View File

@ -199,7 +199,7 @@ main()
hid_t did, fapl_id, fcpl_id, gcpl_id, attid;
hsize_t num_obj;
hid_t fileid, grpid, spaceid;
float val = 3.1495;
float val = 3.1495f;
#if H5_VERSION_GE(1,12,0)
H5O_info2_t obj_info;
#else
@ -478,7 +478,7 @@ main()
/* Create the variables. */
for (v = 0; v < NUM_DATASETS; v++)
{
sprintf(var_name, "var_%d", v);
snprintf(var_name, sizeof(var_name), "var_%d", v);
/* printf("creating var %s\n", var_name);*/
if ((datasetid[v] = H5Dcreate1(grpid, var_name, H5T_NATIVE_INT,
spaceid, plistid)) < 0) ERR_RET;

View File

@ -7,6 +7,7 @@
#include "h5_err_macros.h"
#include <hdf5.h>
#include <stddef.h>
#define FILE_NAME "tst_h_vl.h5"
#define DIM1_LEN 3
@ -23,7 +24,7 @@ main()
hsize_t dims[1] = {DIM1_LEN};
hvl_t data[DIM1_LEN], data_in[DIM1_LEN];
int *phoney;
int i, j;
size_t i, j;
size_t size;
/* Create some phoney data, an array of struct s1, which holds a

View File

@ -141,7 +141,7 @@ main(int argc, char **argv)
/* Create some HDF4 datasets. */
for (t = 0; t < NUM_TYPES; t++)
{
sprintf(tmp_name, "hdf4_dataset_type_%d", t);
snprintf(tmp_name, sizeof(tmp_name), "hdf4_dataset_type_%d", t);
if ((sds_id = SDcreate(sd_id, tmp_name, hdf4_type[t],
DIMS_3, dim_size)) == FAIL) ERR;
/* Set up dimensions. By giving them the same names for each

View File

@ -131,7 +131,7 @@ typedef struct NC_OBJ
{
NC_SORT sort; /**< Type of object. */
char* name; /**< Name, assumed to be null terminated. */
size_t id; /**< This objects ID. */
int id; /**< This objects ID. */
} NC_OBJ;
/**

View File

@ -61,7 +61,7 @@ extern "C" {
#endif
/* WARNING: in some systems, these functions may be defined as macros, so check */
#ifndef HAVE_STRDUP
#if ! defined(HAVE_STRDUP) || defined(__CYGWIN__)
#ifndef strdup
char* strdup(const char*);
#endif

View File

@ -4,6 +4,7 @@
#define NCLIST_H 1
#include "ncexternl.h"
#include <stddef.h>
/* Define the type of the elements in the list*/

View File

@ -14,6 +14,9 @@
#ifdef HAVE_DIRENT_H
#include <dirent.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_SYS_STAT_H
#include <sys/stat.h>
#endif

View File

@ -53,6 +53,9 @@ typedef struct NCRCinfo {
NClist* s3profiles; /* NClist<struct AWSprofile*> */
} NCRCinfo;
/* Opaque structures */
struct NCS3INFO;
#if defined(__cplusplus)
extern "C" {
#endif
@ -94,7 +97,7 @@ EXTERNL int NC_getactives3profile(NCURI* uri, const char** profilep);
EXTERNL int NC_s3profilelookup(const char* profile, const char* key, const char** valuep);
EXTERNL int NC_authgets3profile(const char* profile, struct AWSprofile** profilep);
EXTERNL int NC_iss3(NCURI* uri);
EXTERNL int NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** newurlp);
EXTERNL int NC_s3urlrebuild(NCURI* url, struct NCS3INFO* s3, NCURI** newurlp);
#if defined(__cplusplus)
}

View File

@ -6,6 +6,14 @@
#ifndef NCS3SDK_H
#define NCS3SDK_H 1
#define AWSHOST ".amazonaws.com"
#define GOOGLEHOST "storage.googleapis.com"
/* Track the server type, if known */
typedef enum NCS3SVC {NCS3UNK=0, /* unknown */
NCS3=1, /* s3.amazon.aws */
NCS3GS=0 /* storage.googleapis.com */
} NCS3SVC;
typedef struct NCS3INFO {
char* host; /* non-null if other*/
@ -13,6 +21,7 @@ typedef struct NCS3INFO {
char* bucket; /* bucket name */
char* rootkey;
char* profile;
NCS3SVC svc;
} NCS3INFO;
#ifdef __cplusplus

View File

@ -52,7 +52,7 @@ parseServers(const char* remotetestservers)
/* Keep LGTM quiet */
if(rtslen > MAXREMOTETESTSERVERS) goto done;
list = (char**)malloc(sizeof(char*) * (int)(rtslen/2));
list = (char**)malloc(sizeof(char*) * (rtslen/2));
if(list == NULL) return NULL;
rts = strdup(remotetestservers);
if(rts == NULL) goto done;

View File

@ -150,14 +150,14 @@ typedef struct timeinfo_t {
# define MSC_NCTIME_EXTRA __declspec(dllimport)
# endif
MSC_NCTIME_EXTRA extern void cdRel2Iso(cdCalenType timetype, char* relunits, int separator, double reltime, char* chartime);
MSC_NCTIME_EXTRA extern void cdRel2Iso(cdCalenType timetype, char* relunits, int separator, double reltime, char* chartime, size_t chartime_size);
MSC_NCTIME_EXTRA extern void cdChar2Comp(cdCalenType timetype, char* chartime, cdCompTime* comptime);
MSC_NCTIME_EXTRA extern void Cdh2e(CdTime *htime, double *etime);
MSC_NCTIME_EXTRA extern void Cde2h(double etime, CdTimeType timeType, long baseYear, CdTime *htime);
MSC_NCTIME_EXTRA extern int cdParseRelunits(cdCalenType timetype, char* relunits, cdUnitTime* unit, cdCompTime* base_comptime);
MSC_NCTIME_EXTRA extern int cdSetErrOpts(int opts);
#else
extern void cdRel2Iso(cdCalenType timetype, char* relunits, int separator, double reltime, char* chartime);
extern void cdRel2Iso(cdCalenType timetype, char* relunits, int separator, double reltime, char* chartime, size_t chartime_size);
extern void cdChar2Comp(cdCalenType timetype, char* chartime, cdCompTime* comptime);
extern void Cdh2e(CdTime *htime, double *etime);
extern void Cde2h(double etime, CdTimeType timeType, long baseYear, CdTime *htime);

View File

@ -35,12 +35,8 @@ typedef struct NCURI {
char* path; /*!< path */
char* query; /*!< query */
char* fragment; /*!< fragment */
char** fraglist; /* envv style list of decomposed fragment*/
char** querylist; /* envv style list of decomposed query*/
#if 0
char* projection; /*!< without leading '?'*/
char* selection; /*!< with leading '&'*/
#endif
void* fraglist; /* some representation of the decomposed fragment string */
void* querylist; /* some representation of the decomposed query string */
} NCURI;
#if 0
@ -90,6 +86,18 @@ EXTERNL int ncurisetfragmentkey(NCURI* duri,const char* key, const char* value);
/* append a specific &key=...& in uri fragment */
EXTERNL int ncuriappendfragmentkey(NCURI* duri,const char* key, const char* value);
/* Replace a specific &key=...& in uri query */
EXTERNL int ncurisetquerykey(NCURI* duri,const char* key, const char* value);
/* append a specific &key=...& in uri query */
EXTERNL int ncuriappendquerykey(NCURI* duri,const char* key, const char* value);
/* Get the actual list of queryies */
EXTERNL void* ncuriqueryparams(NCURI* uri);
/* Get the actual list of frags */
EXTERNL void* ncurifragmentparams(NCURI* uri);
/* Construct a complete NC URI; caller frees returned string */
EXTERNL char* ncuribuild(NCURI*,const char* prefix, const char* suffix, int flags);
@ -105,12 +113,6 @@ EXTERNL const char* ncurifragmentlookup(NCURI*, const char* param);
*/
EXTERNL const char* ncuriquerylookup(NCURI*, const char* param);
/* Obtain the complete list of fragment pairs in envv format */
EXTERNL const char** ncurifragmentparams(NCURI*);
/* Obtain the complete list of query pairs in envv format */
EXTERNL const char** ncuriqueryparams(NCURI*);
/* URL Encode/Decode */
EXTERNL char* ncuridecode(const char* s);
/* Partial decode */

View File

@ -23,9 +23,9 @@
#ifndef NETCDF_META_H
#define NETCDF_META_H
#define NC_VERSION_MAJOR @NC_VERSION_MAJOR@ /*!< netcdf-c major version. */
#define NC_VERSION_MINOR @NC_VERSION_MINOR@ /*!< netcdf-c minor version. */
#define NC_VERSION_PATCH @NC_VERSION_PATCH@ /*!< netcdf-c patch version. */
#define NC_VERSION_MAJOR @netCDF_VERSION_MAJOR@ /*!< netcdf-c major version. */
#define NC_VERSION_MINOR @netCDF_VERSION_MINOR@ /*!< netcdf-c minor version. */
#define NC_VERSION_PATCH @netCDF_VERSION_PATCH@ /*!< netcdf-c patch version. */
#define NC_VERSION_NOTE "@NC_VERSION_NOTE@" /*!< netcdf-c note. May be blank. */
/*! netcdf-c version string.

View File

@ -6,6 +6,24 @@
# See netcdf-c/COPYRIGHT file for more info.
SET(dap2_SOURCES constraints.c dapcvt.c dapodom.c daputil.c ncdaperr.c cdf.c cache.c dapdump.c dapdebug.c dapattr.c ncd2dispatch.c getvara.c dceconstraints.c dcetab.c dceparse.c dcelex.c)
##
# Turn off inclusion of particular files when using the cmake-native
# option to turn on Unity Builds.
#
# For more information, see:
# * https://github.com/Unidata/netcdf-c/pull/2839/
# * https://cmake.org/cmake/help/latest/prop_tgt/UNITY_BUILD.html
# * https://cmake.org/cmake/help/latest/prop_tgt/UNITY_BUILD_MODE.html#prop_tgt:UNITY_BUILD_MODE
##
set_property(SOURCE ncd2dispatch.c
PROPERTY
SKIP_UNITY_BUILD_INCLUSION ON)
set_property(SOURCE ncd2dispatch.c
PROPERTY
SKIP_UNITY_BUILD_INCLUSION ON)
add_library(dap2 OBJECT ${dap2_SOURCES})
IF(STATUS_PARALLEL)

View File

@ -5,6 +5,7 @@
#include "dapincludes.h"
#include "dapdump.h"
#include <stddef.h>
/*
Grads servers always require a constraint,
@ -24,7 +25,9 @@ static int iscacheableconstraint(DCEconstraint* con);
int
iscached(NCDAPCOMMON* nccomm, CDFnode* target, NCcachenode** cachenodep)
{
int i,j,found,index;
int i, found;
size_t j;
size_t index;
NCcache* cache;
NCcachenode* cachenode;
@ -92,7 +95,7 @@ else
NCerror
prefetchdata(NCDAPCOMMON* nccomm)
{
int i;
size_t i;
NCFLAGS flags;
NCerror ncstat = NC_NOERR;
NClist* allvars = nccomm->cdf.ddsroot->tree->varnodes;
@ -341,7 +344,7 @@ fprintf(stderr,"freecachenode: %s\n",
void
freenccache(NCDAPCOMMON* nccomm, NCcache* cache)
{
int i;
size_t i;
if(cache == NULL) return;
freenccachenode(nccomm,cache->prefetch);
for(i=0;i<nclistlength(cache->nodes);i++) {
@ -367,7 +370,8 @@ createnccache(void)
static int
iscacheableprojection(DCEprojection* proj)
{
int i,cacheable;
size_t i;
int cacheable;
if(proj->discrim != CES_VAR) return 0;
cacheable = 1; /* assume so */
for(i=0;i<nclistlength(proj->var->segments);i++) {
@ -380,7 +384,7 @@ iscacheableprojection(DCEprojection* proj)
static int
iscacheableconstraint(DCEconstraint* con)
{
int i;
size_t i;
if(con == NULL) return 1;
if(con->selections != NULL && nclistlength(con->selections) > 0)
return 0; /* can't deal with selections */
@ -400,7 +404,7 @@ A variable is prefetchable if
NCerror
markprefetch(NCDAPCOMMON* nccomm)
{
int i,j;
size_t i,j;
NClist* allvars = nccomm->cdf.fullddsroot->tree->varnodes;
assert(allvars != NULL);
/* mark those variables of sufficiently small size */

View File

@ -6,6 +6,7 @@
#include "dapincludes.h"
#include "daputil.h"
#include "dapdump.h"
#include <stddef.h>
#ifdef DAPDEBUG
extern char* ocfqn(OCddsnode);
@ -70,7 +71,7 @@ computecdfnodesets(NCDAPCOMMON* nccomm, CDFtree* tree)
NCerror
computevarnodes(NCDAPCOMMON* nccomm, NClist* allnodes, NClist* varnodes)
{
unsigned int i,len;
size_t i, len;
NClist* allvarnodes = nclistnew();
for(i=0;i<nclistlength(allnodes);i++) {
CDFnode* node = (CDFnode*)nclistget(allnodes,i);
@ -433,7 +434,7 @@ we expected a grid.
static int
restructr(NCDAPCOMMON* ncc, CDFnode* dxdparent, CDFnode* patternparent, NClist* repairlist)
{
int index, i, j, match;
size_t index, i, j, match;
#ifdef DEBUG
fprintf(stderr,"restruct: matched: %s -> %s\n",
@ -501,7 +502,7 @@ static NCerror
repairgrids(NCDAPCOMMON* ncc, NClist* repairlist)
{
NCerror ncstat = NC_NOERR;
int i;
size_t i;
assert(nclistlength(repairlist) % 2 == 0);
for(i=0;i<nclistlength(repairlist);i+=2) {
CDFnode* node = (CDFnode*)nclistget(repairlist,i);
@ -541,7 +542,7 @@ structwrap(NCDAPCOMMON* ncc, CDFnode* node, CDFnode* parent, int parentindex,
static int
findin(CDFnode* parent, CDFnode* child)
{
int i;
size_t i;
NClist* subnodes = parent->subnodes;
for(i=0;i<nclistlength(subnodes);i++) {
if(nclistget(subnodes,i) == child)
@ -674,13 +675,13 @@ dimimprint(NCDAPCOMMON* nccomm)
{
NCerror ncstat = NC_NOERR;
NClist* allnodes;
int i,j;
size_t i,j;
CDFnode* basenode;
allnodes = nccomm->cdf.ddsroot->tree->nodes;
for(i=0;i<nclistlength(allnodes);i++) {
CDFnode* node = (CDFnode*)nclistget(allnodes,i);
int noderank, baserank;
size_t noderank, baserank;
/* Do dimension imprinting */
basenode = node->basenode;
if(basenode == NULL) continue;
@ -689,7 +690,7 @@ dimimprint(NCDAPCOMMON* nccomm)
if(noderank == 0) continue;
ASSERT(noderank == baserank);
#ifdef DEBUG
fprintf(stderr,"dimimprint %s/%d -> %s/%d\n",
fprintf(stderr,"dimimprint %s/%zu -> %s/%zu\n",
makecdfpathstring(basenode,"."),
noderank,
makecdfpathstring(node,"."),
@ -725,7 +726,7 @@ static NClist*
clonedimset(NCDAPCOMMON* nccomm, NClist* dimset, CDFnode* var)
{
NClist* result = NULL;
int i;
size_t i;
for(i=0;i<nclistlength(dimset);i++) {
CDFnode *dim = NULL;
@ -768,7 +769,7 @@ definedimsetplus(NCDAPCOMMON* nccomm/*notused*/, CDFnode* node)
static NCerror
definedimsetall(NCDAPCOMMON* nccomm/*notused*/, CDFnode* node)
{
int i;
size_t i;
int ncstat = NC_NOERR;
NClist* dimsetall = NULL;
@ -795,7 +796,7 @@ fprintf(stderr,"dimsetall: |%s|=%d\n",node->ocname,(int)nclistlength(dimsetall))
static NCerror
definetransdimset(NCDAPCOMMON* nccomm/*notused*/, CDFnode* node)
{
int i;
size_t i;
int ncstat = NC_NOERR;
NClist* dimsettrans = NULL;
@ -842,7 +843,7 @@ Recursive helper for definedimsettrans3
static NCerror
definedimsettransR(NCDAPCOMMON* nccomm, CDFnode* node)
{
int i;
size_t i;
int ncstat = NC_NOERR;
definetransdimset(nccomm,node);
@ -882,7 +883,7 @@ Recursive helper
static NCerror
definedimsetsR(NCDAPCOMMON* nccomm, CDFnode* node)
{
int i;
size_t i;
int ncstat = NC_NOERR;
definedimsetplus(nccomm,node);
@ -1057,7 +1058,7 @@ buildcdftreer(NCDAPCOMMON* nccomm, OCddsnode ocnode, CDFnode* container,
void
freecdfroot(CDFnode* root)
{
int i;
size_t i;
CDFtree* tree;
NCDAPCOMMON* nccomm;
if(root == NULL) return;
@ -1187,7 +1188,7 @@ fix1node(NCDAPCOMMON* nccomm, CDFnode* node)
static NCerror
fixnodes(NCDAPCOMMON* nccomm, NClist* cdfnodes)
{
int i;
size_t i;
for(i=0;i<nclistlength(cdfnodes);i++) {
CDFnode* node = (CDFnode*)nclistget(cdfnodes,i);
NCerror err = fix1node(nccomm,node);

View File

@ -7,6 +7,7 @@
#include "dceparselex.h"
#include "dceconstraints.h"
#include "dapdump.h"
#include <stddef.h>
static void completesegments(NClist* fullpath, NClist* segments);
static NCerror qualifyprojectionnames(DCEprojection* proj);
@ -295,7 +296,7 @@ matchnode->ncfullname,dumpsegments(segments));
break;
default: {
CDFnode* minnode = NULL;
int minpath = 0;
size_t minpath = 0;
int nmin = 0; /* to catch multiple ones with same short path */
/* ok, see if one of the matches has a path that is shorter
then all the others */
@ -338,7 +339,8 @@ done:
static int
matchsuffix(NClist* matchpath, NClist* segments)
{
int i,pathstart;
size_t i;
int pathstart;
int nsegs = nclistlength(segments);
int pathlen = nclistlength(matchpath);
int segmatch;
@ -356,7 +358,7 @@ matchsuffix(NClist* matchpath, NClist* segments)
matching as we go
*/
for(i=0;i<nsegs;i++) {
CDFnode* node = (CDFnode*)nclistget(matchpath,pathstart+i);
CDFnode* node = (CDFnode*)nclistget(matchpath, (size_t)pathstart+i);
DCEsegment* seg = (DCEsegment*)nclistget(segments,i);
int rank = seg->rank;
segmatch = 1; /* until proven otherwise */
@ -386,12 +388,12 @@ dapbuildvaraprojection(CDFnode* var,
const size_t* startp, const size_t* countp, const ptrdiff_t* stridep,
DCEprojection** projectionp)
{
int i,j;
size_t i,j;
NCerror ncstat = NC_NOERR;
DCEprojection* projection = NULL;
NClist* path = nclistnew();
NClist* segments = NULL;
int dimindex;
size_t dimindex;
/* Build a skeleton projection that has 1 segment for
every cdfnode from root to the variable of interest.
@ -463,9 +465,10 @@ dapiswholeslice(DCEslice* slice, CDFnode* dim)
int
dapiswholesegment(DCEsegment* seg)
{
int i,whole;
size_t i;
int whole;
NClist* dimset = NULL;
unsigned int rank;
size_t rank;
if(seg->rank == 0) return 1;
if(!seg->slicesdefined) return 0;
@ -483,7 +486,8 @@ dapiswholesegment(DCEsegment* seg)
int
dapiswholeprojection(DCEprojection* proj)
{
int i,whole;
size_t i;
int whole;
ASSERT((proj->discrim == CES_VAR));
@ -498,7 +502,7 @@ dapiswholeprojection(DCEprojection* proj)
int
dapiswholeconstraint(DCEconstraint* con)
{
int i;
size_t i;
if(con == NULL) return 1;
if(con->projections != NULL) {
for(i=0;i<nclistlength(con->projections);i++) {
@ -528,7 +532,7 @@ The term "expanded" means
NCerror
dapfixprojections(NClist* list)
{
int i,j,k;
size_t i,j,k;
NCerror ncstat = NC_NOERR;
NClist* tmp = nclistnew(); /* misc. uses */
@ -619,12 +623,12 @@ next: continue;
} /*for(;;)*/
/* remove all NULL elements */
for(i=nclistlength(list)-1;i>=0;i--) {
DCEprojection* target = (DCEprojection*)nclistget(list,i);
int n;
for(n=nclistlength(list)-1;n>=0;n--) {
DCEprojection* target = (DCEprojection*)nclistget(list,n);
if(target == NULL)
nclistremove(list,i);
nclistremove(list,n);
}
done:
#ifdef DEBUG
fprintf(stderr,"fixprojection: exploded = %s\n",dumpprojections(list));
@ -661,7 +665,7 @@ projectify(CDFnode* field, DCEprojection* container)
static int
slicematch(NClist* seglist1, NClist* seglist2)
{
int i,j;
size_t i,j;
if((seglist1 == NULL || seglist2 == NULL) && seglist1 != seglist2)
return 0;
if(nclistlength(seglist1) != nclistlength(seglist2))
@ -691,7 +695,7 @@ slicematch(NClist* seglist1, NClist* seglist2)
int
dapvar2projection(CDFnode* var, DCEprojection** projectionp)
{
int i,j;
size_t i,j;
int ncstat = NC_NOERR;
NClist* path = nclistnew();
NClist* segments;
@ -707,7 +711,7 @@ dapvar2projection(CDFnode* var, DCEprojection** projectionp)
for(i=0;i<nclistlength(path);i++) {
DCEsegment* segment = (DCEsegment*)dcecreate(CES_SEGMENT);
CDFnode* n = (CDFnode*)nclistget(path,i);
int localrank;
size_t localrank;
NClist* dimset;
segment->annotation = (void*)n;
@ -757,7 +761,7 @@ int
daprestrictprojection(NClist* projections, DCEprojection* var, DCEprojection** resultp)
{
int ncstat = NC_NOERR;
int i;
size_t i;
DCEprojection* result = NULL;
#ifdef DEBUG1
fprintf(stderr,"restrictprojection.before: constraints=|%s| vara=|%s|\n",
@ -817,7 +821,7 @@ int
dapshiftprojection(DCEprojection* projection)
{
int ncstat = NC_NOERR;
int i,j;
size_t i,j;
NClist* segments;
#ifdef DEBUG1
@ -849,7 +853,7 @@ dapcomputeprojectedvars(NCDAPCOMMON* dapcomm, DCEconstraint* constraint)
{
NCerror ncstat = NC_NOERR;
NClist* vars = NULL;
int i;
size_t i;
vars = nclistnew();

View File

@ -4,6 +4,7 @@
*********************************************************************/
#include "dapincludes.h"
#include <stddef.h>
#define OCCHECK(exp) if((ocstat = (exp))) {THROWCHK(ocstat); goto done;}
@ -18,7 +19,7 @@ and stuff from DODS_EXTRA.
int
dapmerge(NCDAPCOMMON* nccomm, CDFnode* ddsroot, OCddsnode dasroot)
{
int i,j;
size_t i,j;
NCerror ncstat = NC_NOERR;
OCerror ocstat = OC_NOERR;
NClist* allnodes;

View File

@ -4,6 +4,7 @@
*********************************************************************/
#include "config.h"
#include "dapincludes.h"
#include <stddef.h>
#ifdef _WIN32
#include <crtdbg.h>
@ -215,10 +216,10 @@ If we need an int and the string value is out of range, return NC_ERANGE.
NCerror
dapcvtattrval(nc_type etype, void* dst, NClist* src, NCattribute* att)
{
int i;
size_t i;
NCerror ncstat = NC_NOERR;
unsigned int memsize = nctypesizeof(etype);
unsigned int nvalues = nclistlength(src);
size_t memsize = nctypesizeof(etype);
size_t nvalues = nclistlength(src);
char* dstmem = (char*)dst;
for(i=0;i<nvalues;i++) {

View File

@ -5,6 +5,7 @@
#include "config.h"
#include "netcdf.h"
#include <stddef.h>
#ifdef USE_PARALLEL
#include "netcdf_par.h"
#endif
@ -38,7 +39,7 @@ dumpmetadata(int ncid, NChdr** hdrp)
fprintf(stdout,"ncid=%d ngatts=%d ndims=%d nvars=%d unlimid=%d\n",
hdr->ncid,hdr->ngatts,hdr->ndims,hdr->nvars,hdr->unlimid);
#endif
hdr->gatts = (NCattribute*)calloc(1,hdr->ngatts*sizeof(NCattribute));
hdr->gatts = (NCattribute*)calloc(1, (size_t)hdr->ngatts*sizeof(NCattribute));
MEMCHECK(hdr->gatts,NC_ENOMEM);
if(hdr->ngatts > 0)
fprintf(stdout,"global attributes:\n");
@ -81,7 +82,7 @@ dumpmetadata(int ncid, NChdr** hdrp)
fprintf(stdout,"\n");
}
hdr->dims = (Dim*)malloc(hdr->ndims*sizeof(Dim));
hdr->dims = (Dim*)malloc((size_t)hdr->ndims*sizeof(Dim));
MEMCHECK(hdr->dims,NC_ENOMEM);
for(i=0;i<hdr->ndims;i++) {
hdr->dims[i].dimid = i;
@ -93,7 +94,7 @@ dumpmetadata(int ncid, NChdr** hdrp)
fprintf(stdout,"dim[%d]: name=%s size=%lu\n",
i,hdr->dims[i].name,(unsigned long)hdr->dims[i].size);
}
hdr->vars = (Var*)malloc(hdr->nvars*sizeof(Var));
hdr->vars = (Var*)malloc((size_t)hdr->nvars*sizeof(Var));
MEMCHECK(hdr->vars,NC_ENOMEM);
for(i=0;i<hdr->nvars;i++) {
Var* var = &hdr->vars[i];
@ -118,7 +119,7 @@ dumpmetadata(int ncid, NChdr** hdrp)
fprintf(stdout," %d",var->dimids[j]);
}
fprintf(stdout,"}\n");
var->atts = (NCattribute*)malloc(var->natts*sizeof(NCattribute));
var->atts = (NCattribute*)malloc((size_t)var->natts*sizeof(NCattribute));
MEMCHECK(var->atts,NC_ENOMEM);
for(j=0;j<var->natts;j++) {
NCattribute* att = &var->atts[j];
@ -247,7 +248,7 @@ dumppath(CDFnode* leaf)
NClist* path = nclistnew();
NCbytes* buf = ncbytesnew();
char* result;
int i;
size_t i;
if(leaf == NULL) return nulldup("");
collectnodepath(leaf,path,!WITHDATASET);
@ -272,7 +273,7 @@ dumpindent(int indent, NCbytes* buf)
static void
dumptreer1(CDFnode* root, NCbytes* buf, int indent, char* tag, int visible)
{
int i;
size_t i;
dumpindent(indent,buf);
ncbytescat(buf,tag);
ncbytescat(buf," {\n");
@ -300,7 +301,7 @@ dumptreer1(CDFnode* root, NCbytes* buf, int indent, char* tag, int visible)
static void
dumptreer(CDFnode* root, NCbytes* buf, int indent, int visible)
{
int i;
size_t i;
char* primtype = NULL;
NClist* dimset = NULL;
@ -389,7 +390,7 @@ dumpnode(CDFnode* node)
{
NCbytes* buf = ncbytesnew();
char* result;
int i;
size_t i;
char* nctype = NULL;
char* primtype = NULL;
char tmp[1024];
@ -456,7 +457,7 @@ dumpnode(CDFnode* node)
ncbytescat(buf,tmp);
for(i=0;i<nclistlength(node->array.dimset0);i++) {
CDFnode* dim = (CDFnode*)nclistget(node->array.dimset0,i);
snprintf(tmp,sizeof(tmp),"dims[%d]={\n",i);
snprintf(tmp,sizeof(tmp),"dims[%zu]={\n",i);
ncbytescat(buf,tmp);
snprintf(tmp,sizeof(tmp)," ocname=%s\n",dim->ocname);
ncbytescat(buf,tmp);
@ -497,7 +498,7 @@ dumpcachenode(NCcachenode* node)
{
char* result = NULL;
char tmp[8192];
int i;
size_t i;
NCbytes* buf;
if(node == NULL) return strdup("cachenode{null}");
@ -527,7 +528,7 @@ dumpcache(NCcache* cache)
{
char* result = NULL;
char tmp[8192];
int i;
size_t i;
NCbytes* buf;
if(cache == NULL) return strdup("cache{null}");
@ -619,10 +620,10 @@ dumplistraw(NClist* l)
void
dumpstringlist(NClist* l)
{
int i;
size_t i;
for(i=0;i<nclistlength(l);i++) {
const char* s = (const char*)nclistget(l,i);
fprintf(stderr,"[%d]: |%s|\n",i,s);
fprintf(stderr,"[%zu]: |%s|\n",i,s);
}
fflush(stderr);
}

View File

@ -81,7 +81,7 @@ dapodom_print(Dapodometer* odom)
if(odom->rank == 0) {
strlcat(line,"[]",sizeof(line));
} else for(i=0;i<odom->rank;i++) {
sprintf(tmp,"[%lu/%lu:%lu:%lu]",
snprintf(tmp,sizeof(tmp),"[%lu/%lu:%lu:%lu]",
(size_t)odom->index[i],
(size_t)odom->start[i],
(size_t)odom->stride[i],

View File

@ -4,6 +4,7 @@
*********************************************************************/
#include "config.h"
#include <stddef.h>
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
@ -254,7 +255,8 @@ nclistconcat(NClist* l1, NClist* l2)
int
nclistminus(NClist* l1, NClist* l2)
{
unsigned int i,len,found;
size_t i, len;
int found;
len = nclistlength(l2);
found = 0;
for(i=0;i<len;i++) {
@ -312,7 +314,7 @@ collectocpath(OClink conn, OCddsnode node, NClist* path)
char*
makeocpathstring(OClink conn, OCddsnode node, const char* sep)
{
int i,len,first;
size_t i,len,first;
char* result;
char* name;
OCtype octype;
@ -353,7 +355,7 @@ makeocpathstring(OClink conn, OCddsnode node, const char* sep)
char*
makepathstring(NClist* path, const char* separator, int flags)
{
int i,len,first;
size_t i,len,first;
NCbytes* pathname = NULL;
char* result;
CDFnode* node;
@ -412,7 +414,7 @@ clonenodenamepath(CDFnode* node, NClist* path, int withdataset)
char*
simplepathstring(NClist* names, char* separator)
{
int i;
size_t i;
size_t len;
char* result;
if(nclistlength(names) == 0) return nulldup("");
@ -763,7 +765,7 @@ repairname(const char* name, const char* badchars)
const char *p;
char *q;
int c;
int nnlen = 0;
size_t nnlen = 0;
if(name == NULL) return NULL;
nnlen = (3*strlen(name)); /* max needed */

View File

@ -5,6 +5,7 @@
#include "config.h"
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
@ -218,7 +219,7 @@ Dst will be modified.
int
dcemergeprojectionlists(NClist* dst, NClist* src)
{
int i;
size_t i;
NClist* cat = nclistnew();
int ncstat = NC_NOERR;
@ -272,7 +273,7 @@ int
dcemergeprojections(DCEprojection* merged, DCEprojection* addition)
{
int ncstat = NC_NOERR;
int i,j;
size_t i,j;
ASSERT((merged->discrim == CES_VAR && addition->discrim == CES_VAR));
ASSERT((nclistlength(merged->var->segments) == nclistlength(addition->var->segments)));
@ -432,7 +433,7 @@ done:
NClist*
dceclonelist(NClist* list)
{
int i;
size_t i;
NClist* clone;
if(list == NULL) return NULL;
clone = nclistnew();
@ -519,7 +520,7 @@ dcefree(DCEnode* node)
void
dcefreelist(NClist* list)
{
int i;
size_t i;
if(list == NULL) return;
for(i=0;i<nclistlength(list);i++) {
DCEnode* node = (DCEnode*)nclistget(list,i);
@ -717,7 +718,7 @@ dcelisttostring(NClist* list, char* sep)
void
dcelisttobuffer(NClist* list, NCbytes* buf, char* sep)
{
int i;
size_t i;
if(list == NULL || buf == NULL) return;
if(sep == NULL) sep = ",";
for(i=0;i<nclistlength(list);i++) {
@ -741,7 +742,7 @@ dceallnodes(DCEnode* node, CEsort which)
static void
ceallnodesr(DCEnode* node, NClist* allnodes, CEsort which)
{
int i;
size_t i;
if(node == NULL) return;
if(nclistcontains(allnodes,(void*)node)) return;
if(which == CES_NIL || node->sort == which)
@ -920,8 +921,8 @@ dcemakewholeprojection(DCEprojection* p)
int
dcesamepath(NClist* list1, NClist* list2)
{
int i;
int len = nclistlength(list1);
size_t i;
size_t len = nclistlength(list1);
if(len != nclistlength(list2)) return 0;
for(i=0;i<len;i++) {
DCEsegment* s1 = (DCEsegment*)nclistget(list1,i);
@ -1162,7 +1163,7 @@ dcedumpraw(DCEnode* node, NCbytes* buf)
static void
dcedumprawlist(NClist* list, NCbytes* buf)
{
int i;
size_t i;
if(list == NULL || buf == NULL) return;
ncbytescat(buf,"(");
for(i=0;i<nclistlength(list);i++) {

View File

@ -8,6 +8,7 @@
*/
#include "config.h"
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
@ -91,7 +92,7 @@ segmentlist(DCEparsestate* state, Object var0, Object decl)
Object
segment(DCEparsestate* state, Object name, Object slices0)
{
int i;
size_t i;
DCEsegment* segment = (DCEsegment*)dcecreate(CES_SEGMENT);
NClist* slices = (NClist*)slices0;
segment->name = strdup((char*)name);
@ -222,7 +223,7 @@ array_indices(DCEparsestate* state, Object list0, Object indexno)
Object
indexer(DCEparsestate* state, Object name, Object indices)
{
int i;
size_t i;
NClist* list = (NClist*)indices;
DCEsegment* seg = (DCEsegment*)dcecreate(CES_SEGMENT);
seg->name = strdup((char*)name);

View File

@ -978,7 +978,7 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
return 1;
}
/* Avoid sprintf, as that infringes on the user's name space.
/* Avoid snprintf, as that infringes on the user's name space.
Don't have undefined behavior even if the translation
produced a string with the wrong number of "%s"s. */
{

View File

@ -9,6 +9,7 @@
#include "dapdump.h"
#include "ncd2dispatch.h"
#include "ocx.h"
#include <stddef.h>
#define NEWVARM
@ -95,7 +96,7 @@ nc3d_getvarx(int ncid, int varid,
{
NCerror ncstat = NC_NOERR;
OCerror ocstat = OC_NOERR;
int i;
size_t i;
NC* drno;
NC* substrate;
NCDAPCOMMON* dapcomm;
@ -426,7 +427,7 @@ fail:
static NCerror
removepseudodims(DCEprojection* proj)
{
int i;
size_t i;
#ifdef DEBUG1
fprintf(stderr,"removesequencedims.before: %s\n",dumpprojection(proj));
#endif
@ -935,7 +936,7 @@ extractstring(
{
NCerror ncstat = NC_NOERR;
OCerror ocstat = OC_NOERR;
int i;
size_t i;
size_t rank0;
NClist* strings = NULL;
Dapodometer* odom = NULL;
@ -1056,9 +1057,9 @@ It is assumed that both trees have been re-struct'ed if necessary.
*/
static NCerror
attachr(CDFnode* xnode, NClist* patternpath, int depth)
attachr(CDFnode* xnode, NClist* patternpath, size_t depth)
{
unsigned int i,plen,lastnode,gridable;
size_t i,plen,lastnode,gridable;
NCerror ncstat = NC_NOERR;
CDFnode* patternpathnode;
CDFnode* patternpathnext;
@ -1129,7 +1130,7 @@ attachsubsetr(CDFnode* target, CDFnode* pattern)
{
unsigned int i;
NCerror ncstat = NC_NOERR;
int fieldindex;
size_t fieldindex;
#ifdef DEBUG2
fprintf(stderr,"attachsubsetr: attach: target=%s pattern=%s\n",

View File

@ -8,6 +8,7 @@
#include "ncrc.h"
#include "ncoffsets.h"
#include "netcdf_dispatch.h"
#include <stddef.h>
#ifdef DEBUG2
#include "dapdump.h"
#endif
@ -728,7 +729,7 @@ done:
static NCerror
buildvars(NCDAPCOMMON* dapcomm)
{
int i,j;
size_t i,j;
NCerror ncstat = NC_NOERR;
int varid;
NClist* varnodes = dapcomm->cdf.ddsroot->tree->varnodes;
@ -738,7 +739,7 @@ buildvars(NCDAPCOMMON* dapcomm)
for(i=0;i<nclistlength(varnodes);i++) {
CDFnode* var = (CDFnode*)nclistget(varnodes,i);
int dimids[NC_MAX_VAR_DIMS];
unsigned int ncrank;
size_t ncrank;
NClist* vardims = NULL;
if(var->invisible) continue;
@ -773,7 +774,7 @@ fprintf(stderr,"\n");
ncstat = nc_def_var(dapcomm->substrate.nc3id,
definename,
var->externaltype,
ncrank,
(int)ncrank,
(ncrank==0?NULL:dimids),
&varid);
nullfree(definename);
@ -849,7 +850,7 @@ done:
static NCerror
buildglobalattrs(NCDAPCOMMON* dapcomm, CDFnode* root)
{
int i;
size_t i;
NCerror ncstat = NC_NOERR;
const char* txt;
char *nltxt, *p;
@ -930,9 +931,9 @@ done:
static NCerror
buildattribute(NCDAPCOMMON* dapcomm, CDFnode* var, NCattribute* att)
{
int i;
size_t i;
NCerror ncstat = NC_NOERR;
unsigned int nvalues = nclistlength(att->values);
size_t nvalues = nclistlength(att->values);
int varid = (var == NULL ? NC_GLOBAL : var->ncid);
void* mem = NULL;
@ -966,7 +967,7 @@ buildattribute(NCDAPCOMMON* dapcomm, CDFnode* var, NCattribute* att)
if(ncstat) goto done;
} else {
nc_type atype;
unsigned int typesize;
size_t typesize;
atype = nctypeconvert(dapcomm,att->etype);
typesize = nctypesizeof(atype);
if (nvalues > 0) {
@ -1040,7 +1041,7 @@ NCD2_inq_format_extended(int ncid, int* formatp, int* modep)
NCerror
computecdfdimnames(NCDAPCOMMON* nccomm)
{
int i,j;
size_t i,j;
char tmp[NC_MAX_NAME*2];
NClist* conflicts = nclistnew();
NClist* varnodes = nccomm->cdf.ddsroot->tree->varnodes;
@ -1116,7 +1117,7 @@ fprintf(stderr,"conflict: %s[%lu] %s[%lu]\n",
/* Give all the conflicting dimensions an index */
for(j=0;j<nclistlength(conflicts);j++) {
CDFnode* dim = (CDFnode*)nclistget(conflicts,j);
dim->dim.index1 = j+1;
dim->dim.index1 = (int)j+1;
}
}
nclistfree(conflicts);
@ -1240,7 +1241,8 @@ paramlookup(NCDAPCOMMON* state, const char* key)
static NCerror
applyclientparams(NCDAPCOMMON* nccomm)
{
int i,len;
size_t i;
int len;
int dfaltstrlen = DEFAULTSTRINGLENGTH;
int dfaltseqlim = DEFAULTSEQLIMIT;
const char* value;
@ -1364,12 +1366,12 @@ applyclientparams(NCDAPCOMMON* nccomm)
static void
computedimindexanon(CDFnode* dim, CDFnode* var)
{
int i;
size_t i;
NClist* dimset = var->array.dimsetall;
for(i=0;i<nclistlength(dimset);i++) {
CDFnode* candidate = (CDFnode*)nclistget(dimset,i);
if(dim == candidate) {
dim->dim.index1=i+1;
dim->dim.index1 = (int)i+1;
return;
}
}
@ -1379,7 +1381,7 @@ computedimindexanon(CDFnode* dim, CDFnode* var)
static void
replacedims(NClist* dims)
{
int i;
size_t i;
for(i=0;i<nclistlength(dims);i++) {
CDFnode* dim = (CDFnode*)nclistget(dims,i);
CDFnode* basedim = dim->dim.basedim;
@ -1407,7 +1409,7 @@ equivalentdim(CDFnode* basedim, CDFnode* dupdim)
static void
getalldimsa(NClist* dimset, NClist* alldims)
{
int i;
size_t i;
for(i=0;i<nclistlength(dimset);i++) {
CDFnode* dim = (CDFnode*)nclistget(dimset,i);
if(!nclistcontains(alldims,(void*)dim)) {
@ -1426,7 +1428,7 @@ fprintf(stderr,"getalldims: %s[%lu]\n",
NClist*
getalldims(NCDAPCOMMON* nccomm, int visibleonly)
{
int i;
size_t i;
NClist* alldims = nclistnew();
NClist* varnodes = nccomm->cdf.ddsroot->tree->varnodes;
@ -1450,7 +1452,7 @@ addstringdims(NCDAPCOMMON* dapcomm)
in DODS{...} attribute set or defaulting to the variable name.
All such dimensions are global.
*/
int i;
size_t i;
NClist* varnodes = dapcomm->cdf.ddsroot->tree->varnodes;
CDFnode* globalsdim = NULL;
char dimname[4096];
@ -1514,7 +1516,7 @@ addstringdims(NCDAPCOMMON* dapcomm)
static NCerror
defrecorddim(NCDAPCOMMON* dapcomm)
{
unsigned int i;
size_t i;
NCerror ncstat = NC_NOERR;
NClist* basedims;
@ -1590,7 +1592,7 @@ fail:
static NCerror
showprojection(NCDAPCOMMON* dapcomm, CDFnode* var)
{
int i,rank;
size_t i,rank;
NCerror ncstat = NC_NOERR;
NCbytes* projection = ncbytesnew();
NClist* path = nclistnew();
@ -1848,7 +1850,7 @@ make sure we always have a constraint.
static NCerror
computeseqcountconstraints(NCDAPCOMMON* dapcomm, CDFnode* seq, NCbytes* seqcountconstraints)
{
int i,j;
size_t i,j;
NClist* path = NULL;
CDFnode* var = NULL;
@ -1875,7 +1877,7 @@ computeseqcountconstraints(NCDAPCOMMON* dapcomm, CDFnode* seq, NCbytes* seqcount
ncbytescat(seqcountconstraints,tmp);
}
} else if(nclistlength(node->array.dimset0) > 0) {
int ndims = nclistlength(node->array.dimset0);
size_t ndims = nclistlength(node->array.dimset0);
for(j=0;j<ndims;j++) {
CDFnode* dim = (CDFnode*)nclistget(node->array.dimset0,j);
if(DIMFLAG(dim,CDFDIMSTRING)) {
@ -1986,8 +1988,8 @@ cdftotalsize(NClist* dimensions)
static void
estimatevarsizes(NCDAPCOMMON* dapcomm)
{
int ivar;
unsigned int rank;
size_t ivar;
size_t rank;
size_t totalsize = 0;
for(ivar=0;ivar<nclistlength(dapcomm->cdf.ddsroot->tree->varnodes);ivar++) {
@ -2183,7 +2185,7 @@ make them invisible.
static NCerror
fixzerodims(NCDAPCOMMON* dapcomm)
{
int i,j;
size_t i,j;
for(i=0;i<nclistlength(dapcomm->cdf.ddsroot->tree->varnodes);i++) {
CDFnode* var = (CDFnode*)nclistget(dapcomm->cdf.ddsroot->tree->varnodes,i);
NClist* ncdims = var->array.dimsetplus;
@ -2249,7 +2251,7 @@ applyclientparamcontrols(NCDAPCOMMON* dapcomm)
CLRFLAG(dapcomm->controls,NCF_FILLMISMATCH);
if((value=dapparamvalue(dapcomm,"encode")) != NULL) {
int i;
size_t i;
NClist* encode = nclistnew();
if(dapparamparselist(value,',',encode))
nclog(NCLOGERR,"Malformed encode parameter: %s",value);

View File

@ -6,6 +6,19 @@
# See netcdf-c/COPYRIGHT file for more info.
SET(dap4_SOURCES d4curlfunctions.c d4fix.c d4data.c d4file.c d4parser.c d4meta.c d4varx.c d4dump.c d4swap.c d4chunk.c d4printer.c d4read.c d4http.c d4util.c d4odom.c d4cvt.c d4debug.c ncd4dispatch.c)
##
# Turn off inclusion of particular files when using the cmake-native
# option to turn on Unity Builds.
#
# For more information, see:
# * https://github.com/Unidata/netcdf-c/pull/2839/
# * https://cmake.org/cmake/help/latest/prop_tgt/UNITY_BUILD.html
# * https://cmake.org/cmake/help/latest/prop_tgt/UNITY_BUILD_MODE.html#prop_tgt:UNITY_BUILD_MODE
##
set_property(SOURCE d4meta.c
PROPERTY
SKIP_UNITY_BUILD_INCLUSION ON)
add_library(dap4 OBJECT ${dap4_SOURCES})
IF(STATUS_PARALLEL)

View File

@ -22,133 +22,128 @@ Notes:
*/
/* Forward */
static int processerrchunk(NCD4meta* metadata, void* errchunk, unsigned int count);
static int processerrchunk(NCD4response*, void* errchunk, unsigned int count);
/**************************************************/
void
NCD4_resetSerial(NCD4serial* serial, size_t rawsize, void* rawdata)
{
nullfree(serial->errdata);
nullfree(serial->dmr);
nullfree(serial->dap);
nullfree(serial->rawdata);
/* clear all fields */
memset(serial,0,sizeof(NCD4serial));
/* Reset fields */
serial->hostlittleendian = NCD4_isLittleEndian();
serial->rawsize = rawsize;
serial->rawdata = rawdata;
}
int
NCD4_dechunk(NCD4meta* metadata)
NCD4_dechunk(NCD4response* resp)
{
unsigned char *praw, *phdr, *pdap;
unsigned char *praw, *pdmr, *phdr, *pdap, *pappend, *pchunk;
NCD4HDR hdr;
int firstchunk;
#ifdef D4DUMPRAW
NCD4_tagdump(metadata->serial.rawsize,metadata->serial.rawdata,0,"RAW");
NCD4_tagdump(resp->serial.raw.size,resp->serial.raw.data,0,"RAW");
#endif
/* Access the returned raw data */
praw = metadata->serial.rawdata;
praw = (unsigned char*)resp->raw.memory;
if(metadata->mode == NCD4_DSR) {
if(resp->mode == NCD4_DSR) {
return THROW(NC_EDMR);
} else if(metadata->mode == NCD4_DMR) {
} else if(resp->mode == NCD4_DMR) {
/* Verify the mode; assume that the <?xml...?> is optional */
if(memcmp(praw,"<?xml",strlen("<?xml"))==0
|| memcmp(praw,"<Dataset",strlen("<Dataset"))==0) {
size_t len = 0;
/* setup as dmr only */
/* Avoid strdup since rawdata might contain nul chars */
len = metadata->serial.rawsize;
if((metadata->serial.dmr = malloc(len+1)) == NULL)
len = resp->raw.size;
if((resp->serial.dmr = malloc(len+1)) == NULL)
return THROW(NC_ENOMEM);
memcpy(metadata->serial.dmr,praw,len);
metadata->serial.dmr[len] = '\0';
memcpy(resp->serial.dmr,praw,len);
resp->serial.dmr[len] = '\0';
/* Suppress nuls */
(void)NCD4_elidenuls(metadata->serial.dmr,len);
(void)NCD4_elidenuls(resp->serial.dmr,len);
return THROW(NC_NOERR);
}
} else if(metadata->mode != NCD4_DAP)
} else if(resp->mode != NCD4_DAP)
return THROW(NC_EDAP);
/* We must be processing a DAP mode packet */
praw = (metadata->serial.dap = metadata->serial.rawdata);
metadata->serial.rawdata = NULL;
praw = resp->raw.memory;
/* If the raw data looks like xml, then we almost certainly have an error */
if(memcmp(praw,"<?xml",strlen("<?xml"))==0
|| memcmp(praw,"<!doctype",strlen("<!doctype"))==0) {
/* Set up to report the error */
int stat = NCD4_seterrormessage(metadata, metadata->serial.rawsize, metadata->serial.rawdata);
int stat = NCD4_seterrormessage(resp, resp->raw.size, resp->raw.memory);
return THROW(stat); /* slight lie */
}
/* Get the DMR chunk header*/
phdr = NCD4_getheader(praw,&hdr,metadata->serial.hostlittleendian);
/* Get the first header to get dmr content and endian flags*/
pdmr = NCD4_getheader(praw,&hdr,resp->controller->platform.hostlittleendian);
if(hdr.count == 0)
return THROW(NC_EDMR);
if(hdr.flags & NCD4_ERR_CHUNK) {
return processerrchunk(metadata, (void*)phdr, hdr.count);
}
if(hdr.flags & NCD4_ERR_CHUNK)
return processerrchunk(resp, (void*)pdmr, hdr.count);
resp->remotelittleendian = ((hdr.flags & NCD4_LITTLE_ENDIAN_CHUNK) ? 1 : 0);
metadata->serial.remotelittleendian = ((hdr.flags & NCD4_LITTLE_ENDIAN_CHUNK) ? 1 : 0);
/* Again, avoid strxxx operations on dmr */
if((metadata->serial.dmr = malloc(hdr.count+1)) == NULL)
/* avoid strxxx operations on dmr */
if((resp->serial.dmr = malloc(hdr.count+1)) == NULL)
return THROW(NC_ENOMEM);
memcpy(metadata->serial.dmr,phdr,hdr.count);
metadata->serial.dmr[hdr.count-1] = '\0';
memcpy(resp->serial.dmr,pdmr,hdr.count);
resp->serial.dmr[hdr.count-1] = '\0';
/* Suppress nuls */
(void)NCD4_elidenuls(metadata->serial.dmr,hdr.count);
(void)NCD4_elidenuls(resp->serial.dmr,hdr.count);
/* See if there is any data after the DMR */
if(hdr.flags & NCD4_LAST_CHUNK)
return THROW(NC_ENODATA);
/* Read and concat together the data chunks */
phdr = phdr + hdr.count; /* point to data chunk header */
phdr = pdmr + hdr.count; /* point to data chunk header */
/* Do a sanity check in case the server has shorted us with no data */
if((hdr.count + CHUNKHDRSIZE) >= metadata->serial.rawsize) {
if((hdr.count + CHUNKHDRSIZE) >= resp->raw.size) {
/* Server only sent the DMR part */
metadata->serial.dapsize = 0;
resp->serial.dapsize = 0;
return THROW(NC_EDATADDS);
}
pdap = metadata->serial.dap;
for(;;) {
phdr = NCD4_getheader(phdr,&hdr,metadata->serial.hostlittleendian);
if(hdr.flags & NCD4_ERR_CHUNK) {
return processerrchunk(metadata, (void*)phdr, hdr.count);
/* walk all the data chunks */
/* invariants:
praw -- beginning of the raw response
pdmr -- beginning of the dmr in the raw data
pdap -- beginning of the dechunked dap data
phdr -- pointer to the hdr of the current chunk
pchunk -- pointer to the data part of the current chunk
pappend -- where to append next chunk to the growing dechunked data
*/
for(firstchunk=1;;firstchunk=0) {
pchunk = NCD4_getheader(phdr,&hdr,resp->controller->platform.hostlittleendian); /* Process first data chunk header */
if(firstchunk) {
pdap = phdr; /* remember start point of the dechunked data */
pappend = phdr; /* start appending here */
}
if(hdr.flags & NCD4_ERR_CHUNK)
return processerrchunk(resp, (void*)pchunk, hdr.count);
/* data chunk; possibly last; possibly empty */
if(hdr.count > 0) {
d4memmove(pdap,phdr,hdr.count); /* will overwrite the header */
phdr += hdr.count;
pdap += hdr.count;
}
if(hdr.count > 0)
d4memmove(pappend,pchunk,hdr.count); /* overwrite the header; this the heart of dechunking */
pappend += hdr.count; /* next append point */
phdr = pchunk + hdr.count; /* point to header of next chunk */
if(hdr.flags & NCD4_LAST_CHUNK) break;
}
metadata->serial.dapsize = (size_t)DELTA(pdap,metadata->serial.dap);
resp->serial.dap = pdap; /* start of dechunked data */
resp->serial.dapsize = (size_t)DELTA(pappend,pdap);
#ifdef D4DUMPDMR
fprintf(stderr,"%s\n",metadata->serial.dmr);
fprintf(stderr,"%s\n",resp->serial.dmr);
fflush(stderr);
#endif
#ifdef D4DUMPDAP
NCD4_tagdump(metadata->serial.dapsize,metadata->serial.dap,0,"DAP");
NCD4_tagdump(resp->serial.dapsize,resp->serial.dap,0,"DAP");
#endif
return THROW(NC_NOERR);
}
static int
processerrchunk(NCD4meta* metadata, void* errchunk, unsigned int count)
processerrchunk(NCD4response* resp, void* errchunk, unsigned int count)
{
metadata->serial.errdata = (char*)d4alloc(count+1);
if(metadata->serial.errdata == NULL)
resp->serial.errdata = (char*)d4alloc(count+1);
if(resp->serial.errdata == NULL)
return THROW(NC_ENOMEM);
memcpy(metadata->serial.errdata,errchunk,count);
metadata->serial.errdata[count] = '\0';
memcpy(resp->serial.errdata,errchunk,count);
resp->serial.errdata[count] = '\0';
return THROW(NC_ENODATA); /* slight lie */
}
@ -157,26 +152,26 @@ Given a raw response, attempt to infer the mode: DMR, DAP, DSR.
Since DSR is not standardizes, it becomes the default.
*/
int
NCD4_infermode(NCD4meta* meta)
NCD4_infermode(NCD4response* resp)
{
d4size_t size = meta->serial.rawsize;
char* raw = meta->serial.rawdata;
d4size_t size = resp->raw.size;
char* raw = resp->raw.memory;
if(size < 16)
return THROW(NC_EDAP); /* must have at least this to hold a hdr + partial dmr*/
if(memcmp(raw,"<?xml",strlen("<?xml"))==0
|| memcmp(raw,"<Dataset",strlen("<Dataset"))==0) {
meta->mode = NCD4_DMR;
resp->mode = NCD4_DMR;
goto done;
}
raw += 4; /* Pretend we have a DAP hdr */
if(memcmp(raw,"<?xml",strlen("<?xml"))==0
|| memcmp(raw,"<Dataset",strlen("<Dataset"))==0) {
meta->mode = NCD4_DAP;
resp->mode = NCD4_DAP;
goto done;
}
/* Default to DSR */
meta->mode = NCD4_DSR;
resp->mode = NCD4_DSR;
done:
return NC_NOERR;

View File

@ -3,6 +3,11 @@
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
*********************************************************************/
/* WARNING: oc2/occurlfunctions.c and libdap4/d4curlfunctions.c
should be merged since they are essentially the same file.
In the meantime, changes to one should be propagated to the other.
*/
#include "d4includes.h"
#include "d4curlfunctions.h"
@ -123,33 +128,43 @@ set_curlflag(NCD4INFO* state, int flag)
}
}
break;
case CURLOPT_USE_SSL:
case CURLOPT_SSLCERT: case CURLOPT_SSLKEY:
case CURLOPT_SSL_VERIFYPEER: case CURLOPT_SSL_VERIFYHOST:
{
struct ssl* ssl = &state->auth->ssl;
case CURLOPT_SSL_VERIFYPEER:
/* VERIFYPEER == 0 => VERIFYHOST == 0 */
/* We need to have 2 states: default and a set value */
/* So -1 => default, >= 0 => use value; */
if(ssl->verifypeer >= 0)
SETCURLOPT(state, CURLOPT_SSL_VERIFYPEER, (OPTARG)(ssl->verifypeer));
/* So -1 => default >= 0 => use value */
if(state->auth->ssl.verifypeer >= 0) {
SETCURLOPT(state, CURLOPT_SSL_VERIFYPEER, (OPTARG)(state->auth->ssl.verifypeer));
if(state->auth->ssl.verifypeer == 0) state->auth->ssl.verifyhost = 0;
}
break;
case CURLOPT_SSL_VERIFYHOST:
#ifdef HAVE_LIBCURL_766
if(ssl->verifyhost >= 0)
SETCURLOPT(state, CURLOPT_SSL_VERIFYHOST, (OPTARG)(ssl->verifyhost));
if(state->auth->ssl.verifyhost >= 0) {
SETCURLOPT(state, CURLOPT_SSL_VERIFYHOST, (OPTARG)(state->auth->ssl.verifyhost));
}
#endif
if(ssl->certificate)
SETCURLOPT(state, CURLOPT_SSLCERT, ssl->certificate);
if(ssl->key)
SETCURLOPT(state, CURLOPT_SSLKEY, ssl->key);
if(ssl->keypasswd)
break;
case CURLOPT_SSLCERT:
if(state->auth->ssl.certificate)
SETCURLOPT(state, CURLOPT_SSLCERT, state->auth->ssl.certificate);
break;
case CURLOPT_SSLKEY:
if(state->auth->ssl.key)
SETCURLOPT(state, CURLOPT_SSLKEY, state->auth->ssl.key);
if(state->auth->ssl.keypasswd)
/* libcurl prior to 7.16.4 used 'CURLOPT_SSLKEYPASSWD' */
SETCURLOPT(state, CURLOPT_KEYPASSWD, ssl->keypasswd);
if(ssl->cainfo)
SETCURLOPT(state, CURLOPT_CAINFO, ssl->cainfo);
if(ssl->capath)
SETCURLOPT(state, CURLOPT_CAPATH, ssl->capath);
}
break;
SETCURLOPT(state, CURLOPT_SSLKEYPASSWD, state->auth->ssl.keypasswd);
break;
case CURLOPT_CAINFO:
if(state->auth->ssl.cainfo)
SETCURLOPT(state, CURLOPT_CAINFO, state->auth->ssl.cainfo);
break;
case CURLOPT_CAPATH:
if(state->auth->ssl.capath)
SETCURLOPT(state, CURLOPT_CAPATH, state->auth->ssl.capath);
break;
case CURLOPT_USE_SSL:
break;
#ifdef HAVE_CURLOPT_BUFFERSIZE
case CURLOPT_BUFFERSIZE:
@ -200,6 +215,12 @@ NCD4_set_flags_perlink(NCD4INFO* state)
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_COOKIEJAR);
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_USERPWD);
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_PROXY);
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_SSL_VERIFYPEER);
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_SSL_VERIFYHOST);
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_SSLCERT);
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_SSLKEY);
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_CAINFO);
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_CAPATH);
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_USE_SSL);
if(ret == NC_NOERR) ret = set_curlflag(state, CURLOPT_FOLLOWLOCATION);
if(ret == NC_NOERR) ret = set_curlflag(state, CURLOPT_MAXREDIRS);
@ -319,7 +340,7 @@ NCD4_get_rcproperties(NCD4INFO* state)
ncerror err = NC_NOERR;
char* option = NULL;
#ifdef HAVE_CURLOPT_BUFFERSIZE
option = NC_rclookup(D4BUFFERSIZE,state->uri->uri,NULL);
option = NC_rclookup(D4BUFFERSIZE,state->dmruri->uri,NULL);
if(option != NULL && strlen(option) != 0) {
long bufsize;
if(strcasecmp(option,"max")==0)
@ -330,7 +351,7 @@ NCD4_get_rcproperties(NCD4INFO* state)
}
#endif
#ifdef HAVE_CURLOPT_KEEPALIVE
option = NC_rclookup(D4KEEPALIVE,state->uri->uri,NULL);
option = NC_rclookup(D4KEEPALIVE,state->dmruri->uri,NULL);
if(option != NULL && strlen(option) != 0) {
/* The keepalive value is of the form 0 or n/m,
where n is the idle time and m is the interval time;

View File

@ -6,6 +6,7 @@
#include "d4includes.h"
#include <stdarg.h>
#include <assert.h>
#include <stddef.h>
#include "d4includes.h"
#include "d4odom.h"
#include "nccrc.h"
@ -16,7 +17,7 @@ This code serves two purposes
(NCD4_processdata)
2. Walk a specified variable instance to convert to netcdf4
memory representation.
(NCD4_fillinstance)
(NCD4_movetoinstance)
*/
@ -29,7 +30,6 @@ static int fillopfixed(NCD4meta*, d4size_t opaquesize, NCD4offset* offset, void*
static int fillopvar(NCD4meta*, NCD4node* type, NCD4offset* offset, void** dstp, NClist* blobs);
static int fillstruct(NCD4meta*, NCD4node* type, NCD4offset* offset, void** dstp, NClist* blobs);
static int fillseq(NCD4meta*, NCD4node* type, NCD4offset* offset, void** dstp, NClist* blobs);
static int NCD4_inferChecksums(NCD4meta* meta, NClist* toplevel);
static unsigned NCD4_computeChecksum(NCD4meta* meta, NCD4node* topvar);
/***************************************************/
@ -54,11 +54,12 @@ static unsigned int debugcrc32(unsigned int crc, const void *buf, size_t size)
/***************************************************/
/* API */
/* Parcel out the dechunked data to the corresponding vars */
int
NCD4_processdata(NCD4meta* meta)
NCD4_parcelvars(NCD4meta* meta, NCD4response* resp)
{
int ret = NC_NOERR;
int i;
size_t i;
NClist* toplevel = NULL;
NCD4node* root = meta->root;
NCD4offset* offset = NULL;
@ -68,35 +69,57 @@ NCD4_processdata(NCD4meta* meta)
toplevel = nclistnew();
NCD4_getToplevelVars(meta,root,toplevel);
/* Otherwise */
NCD4_inferChecksums(meta,toplevel);
/* If necessary, byte swap the serialized data */
/* Do we need to swap the dap4 data? */
meta->swap = (meta->serial.hostlittleendian != meta->serial.remotelittleendian);
/* Compute the offset and size of the toplevel vars in the raw dap data. */
/* Also extract remote checksums */
offset = BUILDOFFSET(meta->serial.dap,meta->serial.dapsize);
offset = BUILDOFFSET(resp->serial.dap,resp->serial.dapsize);
for(i=0;i<nclistlength(toplevel);i++) {
NCD4node* var = (NCD4node*)nclistget(toplevel,i);
if((ret=NCD4_delimit(meta,var,offset)))
if((ret=NCD4_delimit(meta,var,offset,resp->inferredchecksumming))) {
FAIL(ret,"delimit failure");
if(meta->controller->data.inferredchecksumming) {
/* Compute remote checksum: must occur before any byte swapping */
}
var->data.response = resp; /* cross link */
}
done:
nclistfree(toplevel);
nullfree(offset);
return THROW(ret);
}
/* Process top level vars wrt checksums and swapping */
int
NCD4_processdata(NCD4meta* meta, NCD4response* resp)
{
int ret = NC_NOERR;
size_t i;
NClist* toplevel = NULL;
NCD4node* root = meta->root;
NCD4offset* offset = NULL;
/* Do we need to swap the dap4 data? */
meta->swap = (meta->controller->platform.hostlittleendian != resp->remotelittleendian);
/* Recursively walk the tree in prefix order
to get the top-level variables; also mark as unvisited */
toplevel = nclistnew();
NCD4_getToplevelVars(meta,root,toplevel);
/* Extract remote checksums */
for(i=0;i<nclistlength(toplevel);i++) {
NCD4node* var = (NCD4node*)nclistget(toplevel,i);
if(resp->inferredchecksumming) {
/* Compute checksum of response data: must occur before any byte swapping and after delimiting */
var->data.localchecksum = NCD4_computeChecksum(meta,var);
#ifdef DUMPCHECKSUM
fprintf(stderr,"var %s: remote-checksum = 0x%x\n",var->name,var->data.remotechecksum);
#endif
/* verify checksums */
if(!meta->controller->data.checksumignore) {
if(!resp->checksumignore) {
if(var->data.localchecksum != var->data.remotechecksum) {
nclog(NCLOGERR,"Checksum mismatch: %s\n",var->name);
ret = NC_EDAP;
goto done;
}
/* Also verify checksum attribute */
if(meta->controller->data.attrchecksumming) {
if(resp->attrchecksumming) {
if(var->data.attrchecksum != var->data.remotechecksum) {
nclog(NCLOGERR,"Attribute Checksum mismatch: %s\n",var->name);
ret = NC_EDAP;
@ -105,13 +128,11 @@ NCD4_processdata(NCD4meta* meta)
}
}
}
}
/* Swap the data for each top level variable,
*/
if(meta->swap) {
if((ret=NCD4_swapdata(meta,toplevel)))
FAIL(ret,"byte swapping failed");
if(meta->swap) {
if((ret=NCD4_swapdata(resp,var,meta->swap)))
FAIL(ret,"byte swapping failed");
}
var->data.valid = 1; /* Everything should be in place */
}
done:
@ -133,7 +154,7 @@ Assumes that NCD4_processdata has been called.
*/
int
NCD4_fillinstance(NCD4meta* meta, NCD4node* type, NCD4offset* offset, void** dstp, NClist* blobs)
NCD4_movetoinstance(NCD4meta* meta, NCD4node* type, NCD4offset* offset, void** dstp, NClist* blobs)
{
int ret = NC_NOERR;
void* dst = *dstp;
@ -149,30 +170,30 @@ NCD4_fillinstance(NCD4meta* meta, NCD4node* type, NCD4offset* offset, void** dst
} else switch(type->subsort) {
case NC_STRING: /* oob strings */
if((ret=fillstring(meta,offset,&dst,blobs)))
FAIL(ret,"fillinstance");
FAIL(ret,"movetoinstance");
break;
case NC_OPAQUE:
if(type->opaque.size > 0) {
/* We know the size and its the same for all instances */
if((ret=fillopfixed(meta,type->opaque.size,offset,&dst)))
FAIL(ret,"fillinstance");
FAIL(ret,"movetoinstance");
} else {
/* Size differs per instance, so we need to convert each opaque to a vlen */
if((ret=fillopvar(meta,type,offset,&dst,blobs)))
FAIL(ret,"fillinstance");
FAIL(ret,"movetoinstance");
}
break;
case NC_STRUCT:
if((ret=fillstruct(meta,type,offset,&dst,blobs)))
FAIL(ret,"fillinstance");
FAIL(ret,"movetoinstance");
break;
case NC_SEQ:
if((ret=fillseq(meta,type,offset,&dst,blobs)))
FAIL(ret,"fillinstance");
FAIL(ret,"movetoinstance");
break;
default:
ret = NC_EINVAL;
FAIL(ret,"fillinstance");
FAIL(ret,"movetoinstance");
}
*dstp = dst;
@ -183,7 +204,8 @@ done:
static int
fillstruct(NCD4meta* meta, NCD4node* type, NCD4offset* offset, void** dstp, NClist* blobs)
{
int i,ret = NC_NOERR;
size_t i;
int ret = NC_NOERR;
void* dst = *dstp;
#ifdef CLEARSTRUCT
@ -196,7 +218,7 @@ fillstruct(NCD4meta* meta, NCD4node* type, NCD4offset* offset, void** dstp, NCli
NCD4node* field = nclistget(type->vars,i);
NCD4node* ftype = field->basetype;
void* fdst = (((char*)dst) + field->meta.offset);
if((ret=NCD4_fillinstance(meta,ftype,offset,&fdst,blobs)))
if((ret=NCD4_movetoinstance(meta,ftype,offset,&fdst,blobs)))
FAIL(ret,"fillstruct");
}
dst = ((char*)dst) + type->meta.memsize;
@ -231,7 +253,7 @@ fillseq(NCD4meta* meta, NCD4node* type, NCD4offset* offset, void** dstp, NClist*
for(i=0;i<recordcount;i++) {
/* Read each record instance */
void* recdst = ((char*)(dst->p))+(recordsize * i);
if((ret=NCD4_fillinstance(meta,vlentype,offset,&recdst,blobs)))
if((ret=NCD4_movetoinstance(meta,vlentype,offset,&recdst,blobs)))
FAIL(ret,"fillseq");
}
dst++;
@ -350,7 +372,7 @@ int
NCD4_getToplevelVars(NCD4meta* meta, NCD4node* group, NClist* toplevel)
{
int ret = NC_NOERR;
int i;
size_t i;
if(group == NULL)
group = meta->root;
@ -373,17 +395,22 @@ done:
return THROW(ret);
}
static int
NCD4_inferChecksums(NCD4meta* meta, NClist* toplevel)
int
NCD4_inferChecksums(NCD4meta* meta, NCD4response* resp)
{
int ret = NC_NOERR;
int i, attrfound;
NCD4INFO* info = meta->controller;
size_t i;
int attrfound;
NClist* toplevel = NULL;
/* Get the toplevel vars */
toplevel = nclistnew();
NCD4_getToplevelVars(meta,meta->root,toplevel);
/* First, look thru the DMR to see if there is a checksum attribute */
attrfound = 0;
for(i=0;i<nclistlength(toplevel);i++) {
int a;
size_t a;
NCD4node* node = (NCD4node*)nclistget(toplevel,i);
for(a=0;a<nclistlength(node->attributes);a++) {
NCD4node* attr = (NCD4node*)nclistget(node->attributes,a);
@ -399,9 +426,10 @@ NCD4_inferChecksums(NCD4meta* meta, NClist* toplevel)
}
}
}
info->data.attrchecksumming = (attrfound ? 1 : 0);
nclistfree(toplevel);
resp->attrchecksumming = (attrfound ? 1 : 0);
/* Infer checksums */
info->data.inferredchecksumming = ((info->data.attrchecksumming || info->data.querychecksumming) ? 1 : 0);
resp->inferredchecksumming = ((resp->attrchecksumming || resp->querychecksumming) ? 1 : 0);
return THROW(ret);
}

View File

@ -4,6 +4,7 @@
*********************************************************************/
#include "config.h"
#include <stdarg.h>
#include <stddef.h>
#include <stdio.h>
#include "d4includes.h"
@ -96,8 +97,9 @@ bv inserting the data into the substrate and then writing it out.
int
NCD4_debugcopy(NCD4INFO* info)
{
int i,ret=NC_NOERR;
NCD4meta* meta = info->substrate.metadata;
size_t i;
int ret = NC_NOERR;
NCD4meta* meta = info->dmrmetadata;
NClist* topvars = nclistnew();
NC* ncp = info->controller;
void* memory = NULL;
@ -134,7 +136,7 @@ NCD4_debugcopy(NCD4INFO* info)
*/
{
size_t edges[NC_MAX_VAR_DIMS];
int d;
size_t d;
for(d=0;d<nclistlength(var->dims);d++) {
NCD4node* dim = (NCD4node*)nclistget(var->dims,d);
edges[d] = (size_t)dim->dim.size;

View File

@ -3,6 +3,7 @@
*/
#include "d4includes.h"
#include <stddef.h>
/*
Provide a simple dump of binary data
@ -87,7 +88,7 @@ NCD4_tagdump(size_t size, const void* data0, int swap, const char* tag)
void
NCD4_dumpvars(NCD4node* group)
{
int i;
size_t i;
fprintf(stderr,"%s.vars:\n",group->name);
for(i=0;i<nclistlength(group->vars);i++) {
NCD4node* var = (NCD4node*)nclistget(group->vars,i);

View File

@ -6,8 +6,8 @@
#include "ncdispatch.h"
#include "ncd4dispatch.h"
#include "d4includes.h"
#include "d4read.h"
#include "d4curlfunctions.h"
#include <stddef.h>
#ifdef _MSC_VER
#include <process.h>
@ -23,19 +23,20 @@
static int constrainable(NCURI*);
static void freeCurl(NCD4curl*);
static void freeInfo(NCD4INFO*);
static int fragmentcheck(NCD4INFO*, const char* key, const char* subkey);
static const char* getfragment(NCD4INFO* info, const char* key);
static const char* getquery(NCD4INFO* info, const char* key);
static int set_curl_properties(NCD4INFO*);
static int makesubstrate(NCD4INFO* d4info);
static void resetInfoforRead(NCD4INFO* d4info);
/**************************************************/
/* Constants */
static const char* checkseps = "+,:;";
/*Define the set of protocols known to be constrainable */
static const char* constrainableprotocols[] = {"http", "https",NULL};
/**************************************************/
int
NCD4_open(const char * path, int mode,
@ -46,10 +47,10 @@ NCD4_open(const char * path, int mode,
NCD4INFO* d4info = NULL;
const char* value;
NC* nc;
NCD4meta* meta = NULL;
size_t len = 0;
void* contents = NULL;
NCD4response* dmrresp = NULL;
if(path == NULL)
return THROW(NC_EDAPURL);
@ -61,29 +62,27 @@ NCD4_open(const char * path, int mode,
/* Setup our NC and NCDAPCOMMON state*/
d4info = (NCD4INFO*)calloc(1,sizeof(NCD4INFO));
if(d4info == NULL) {ret = NC_ENOMEM; goto done;}
if((ret=NCD4_newInfo(&d4info))) goto done;
nc->dispatchdata = d4info;
nc->int_ncid = nc__pseudofd(); /* create a unique id */
d4info->controller = (NC*)nc;
/* Parse url and params */
if(ncuriparse(nc->path,&d4info->uri))
if(ncuriparse(nc->path,&d4info->dmruri))
{ret = NC_EDAPURL; goto done;}
/* Load auth info from rc file */
if((ret = NC_authsetup(&d4info->auth, d4info->uri)))
if((ret = NC_authsetup(&d4info->auth, d4info->dmruri)))
goto done;
NCD4_curl_protocols(d4info);
if(!constrainable(d4info->uri))
if(!constrainable(d4info->dmruri))
SETFLAG(d4info->controls.flags,NCF_UNCONSTRAINABLE);
/* fail if we are unconstrainable but have constraints */
if(FLAGSET(d4info->controls.flags,NCF_UNCONSTRAINABLE)) {
if(d4info->uri != NULL) {
const char* ce = ncuriquerylookup(d4info->uri,DAP4CE); /* Look for dap4.ce */
if(d4info->dmruri != NULL) {
const char* ce = ncuriquerylookup(d4info->dmruri,DAP4CE); /* Look for dap4.ce */
if(ce != NULL) {
nclog(NCLOGWARN,"Attempt to constrain an unconstrainable data source: %s=%s",
DAP4CE,ce);
@ -115,7 +114,7 @@ NCD4_open(const char * path, int mode,
}
/* Turn on logging; only do this after oc_open*/
if((value = ncurifragmentlookup(d4info->uri,"log")) != NULL) {
if((value = ncurifragmentlookup(d4info->dmruri,"log")) != NULL) {
ncloginit();
ncsetloglevel(NCLOGNOTE);
}
@ -150,30 +149,34 @@ NCD4_open(const char * path, int mode,
/* Reset the substrate */
if((ret=makesubstrate(d4info))) goto done;
/* Always start by reading the DMR only */
/* Always start by reading the whole DMR only */
/* reclaim substrate.metadata */
resetInfoforRead(d4info);
NCD4_resetInfoForRead(d4info);
/* Rebuild metadata */
if((d4info->substrate.metadata=NCD4_newmeta(d4info))==NULL)
{ret = NC_ENOMEM; goto done;}
if((ret = NCD4_newMeta(d4info,&d4info->dmrmetadata))) goto done;
if((ret=NCD4_readDMR(d4info, d4info->controls.flags.flags))) goto done;
/* Capture response */
if((dmrresp = (NCD4response*)calloc(1,sizeof(NCD4response)))==NULL)
{ret = NC_ENOMEM; goto done;}
dmrresp->controller = d4info;
if((ret=NCD4_readDMR(d4info, d4info->controls.flags.flags, d4info->dmruri, dmrresp))) goto done;
/* set serial.rawdata */
len = ncbyteslength(d4info->curl->packet);
contents = ncbytesextract(d4info->curl->packet);
NCD4_attachraw(d4info->substrate.metadata, len, contents);
assert(dmrresp != NULL);
dmrresp->raw.size = len;
dmrresp->raw.memory = contents;
/* process query parameters */
NCD4_applyclientquerycontrols(d4info);
meta = d4info->substrate.metadata;
/* process checksum parameters */
NCD4_applychecksumcontrols(d4info,dmrresp);
/* Infer the mode */
if((ret=NCD4_infermode(meta))) goto done;
if((ret=NCD4_infermode(dmrresp))) goto done;
/* Process the dmr part */
if((ret=NCD4_dechunk(meta))) goto done;
if((ret=NCD4_dechunk(dmrresp))) goto done;
#ifdef D4DUMPDMR
{
@ -184,13 +187,14 @@ NCD4_open(const char * path, int mode,
}
#endif
if((ret = NCD4_parse(d4info->substrate.metadata))) goto done;
if((ret = NCD4_parse(d4info->dmrmetadata,dmrresp,0))) goto done;
#ifdef D4DEBUGMETA
{
meta = d4info->dmrmetadata;
fprintf(stderr,"\n/////////////\n");
NCbytes* buf = ncbytesnew();
NCD4_print(d4info->substrate.metadata,buf);
NCD4_print(meta,buf);
ncbytesnull(buf);
fputs(ncbytescontents(buf),stderr);
ncbytesfree(buf);
@ -200,12 +204,20 @@ NCD4_open(const char * path, int mode,
#endif
/* Build the substrate metadata */
ret = NCD4_metabuild(d4info->substrate.metadata,d4info->substrate.metadata->ncid);
ret = NCD4_metabuild(d4info->dmrmetadata,d4info->dmrmetadata->ncid);
if(ret != NC_NOERR && ret != NC_EVARSIZE) goto done;
/* Remember the response */
nclistpush(d4info->responses,dmrresp);
/* Avoid duplicate reclaims */
dmrresp = NULL;
d4info = NULL;
done:
NCD4_reclaimResponse(dmrresp);
NCD4_reclaimInfo(d4info);
if(ret) {
freeInfo(d4info);
nc->dispatchdata = NULL;
}
return THROW(ret);
@ -236,7 +248,7 @@ NCD4_close(int ncid, void* ignore)
ret = nc_abort(substrateid);
}
freeInfo(d4info);
NCD4_reclaimInfo(d4info);
done:
return THROW(ret);
@ -248,82 +260,6 @@ NCD4_abort(int ncid)
return NCD4_close(ncid,NULL);
}
/**************************************************/
/* Reclaim an NCD4INFO instance */
static void
freeInfo(NCD4INFO* d4info)
{
if(d4info == NULL) return;
d4info->controller = NULL; /* break link */
nullfree(d4info->rawurltext);
nullfree(d4info->urltext);
ncurifree(d4info->uri);
freeCurl(d4info->curl);
nullfree(d4info->data.memory);
nullfree(d4info->data.ondiskfilename);
if(d4info->data.ondiskfile != NULL)
fclose(d4info->data.ondiskfile);
nullfree(d4info->fileproto.filename);
if(d4info->substrate.realfile
&& !FLAGSET(d4info->controls.debugflags,NCF_DEBUG_COPY)) {
/* We used real file, so we need to delete the temp file
unless we are debugging.
Assume caller has done nc_close|nc_abort on the ncid.
Note that in theory, this should not be necessary since
AFAIK the substrate file is still in def mode, and
when aborted, it should be deleted. But that is not working
for some reason, so we delete it ourselves.
*/
if(d4info->substrate.filename != NULL) {
unlink(d4info->substrate.filename);
}
}
nullfree(d4info->substrate.filename); /* always reclaim */
NCD4_reclaimMeta(d4info->substrate.metadata);
NC_authfree(d4info->auth);
nclistfree(d4info->blobs);
free(d4info);
}
/* Reset NCD4INFO instance for new read request */
static void
resetInfoforRead(NCD4INFO* d4info)
{
if(d4info == NULL) return;
if(d4info->substrate.realfile
&& !FLAGSET(d4info->controls.debugflags,NCF_DEBUG_COPY)) {
/* We used real file, so we need to delete the temp file
unless we are debugging.
Assume caller has done nc_close|nc_abort on the ncid.
Note that in theory, this should not be necessary since
AFAIK the substrate file is still in def mode, and
when aborted, it should be deleted. But that is not working
for some reason, so we delete it ourselves.
*/
if(d4info->substrate.filename != NULL) {
unlink(d4info->substrate.filename);
}
}
NCD4_resetMeta(d4info->substrate.metadata);
nullfree(d4info->substrate.metadata);
d4info->substrate.metadata = NULL;
}
static void
freeCurl(NCD4curl* curl)
{
if(curl == NULL) return;
NCD4_curlclose(curl->curl);
ncbytesfree(curl->packet);
nullfree(curl->errdata.code);
nullfree(curl->errdata.message);
free(curl);
}
/* Define the set of protocols known to be constrainable */
static const char* constrainableprotocols[] = {"http", "https",NULL};
static int
constrainable(NCURI* durl)
{
@ -366,7 +302,7 @@ set_curl_properties(NCD4INFO* d4info)
/* If no cookie file was defined, define a default */
char* path = NULL;
char* newpath = NULL;
int len;
size_t len;
errno = 0;
NCglobalstate* globalstate = NC_getglobalstate();
@ -449,11 +385,6 @@ NCD4_applyclientfragmentcontrols(NCD4INFO* info)
if(value != NULL)
strncpy(info->controls.substratename,value,(NC_MAX_NAME-1));
value = getfragment(info,"hyrax");
if(value != NULL) {
info->data.checksumignore = 1; /* Assume checksum, but ignore */
}
info->controls.opaquesize = DFALTOPAQUESIZE;
value = getfragment(info,"opaquesize");
if(value != NULL) {
@ -476,22 +407,29 @@ NCD4_applyclientfragmentcontrols(NCD4INFO* info)
}
}
/* Checksum controls are found both in the query and fragment
parts of a URL.
*/
void
NCD4_applyclientquerycontrols(NCD4INFO* info)
NCD4_applychecksumcontrols(NCD4INFO* info, NCD4response* resp)
{
const char* value = getquery(info,DAP4CSUM);
if(value == NULL) {
info->data.querychecksumming = DEFAULT_CHECKSUM_STATE;
resp->querychecksumming = DEFAULT_CHECKSUM_STATE;
} else {
if(strcasecmp(value,"false")==0) {
info->data.querychecksumming = 0;
resp->querychecksumming = 0;
} else if(strcasecmp(value,"true")==0) {
info->data.querychecksumming = 1;
resp->querychecksumming = 1;
} else {
nclog(NCLOGWARN,"Unknown checksum mode: %s ; using default",value);
info->data.querychecksumming = DEFAULT_CHECKSUM_STATE;
resp->querychecksumming = DEFAULT_CHECKSUM_STATE;
}
}
value = getfragment(info,"hyrax");
if(value != NULL) {
resp->checksumignore = 1; /* Assume checksum, but ignore */
}
}
/* Search for substring in value of param. If substring == NULL; then just
@ -523,7 +461,7 @@ getfragment(NCD4INFO* info, const char* key)
const char* value;
if(info == NULL || key == NULL) return NULL;
if((value=ncurifragmentlookup(info->uri,key)) == NULL)
if((value=ncurifragmentlookup(info->dmruri,key)) == NULL)
return NULL;
return value;
}
@ -537,7 +475,7 @@ getquery(NCD4INFO* info, const char* key)
const char* value;
if(info == NULL || key == NULL) return NULL;
if((value=ncuriquerylookup(info->uri,key)) == NULL)
if((value=ncuriquerylookup(info->dmruri,key)) == NULL)
return NULL;
return value;
}
@ -596,3 +534,175 @@ NCD4_get_substrate(NC* nc)
} else subnc = nc;
return subnc;
}
/**************************************************/
/* Allocate/Free for various structures */
int
NCD4_newInfo(NCD4INFO** d4infop)
{
int ret = NC_NOERR;
NCD4INFO* info = NULL;
if((info = calloc(1,sizeof(NCD4INFO)))==NULL)
{ret = NC_ENOMEM; goto done;}
info->platform.hostlittleendian = NCD4_isLittleEndian();
info->responses = nclistnew();
if(d4infop) {*d4infop = info; info = NULL;}
done:
if(info) NCD4_reclaimInfo(info);
return THROW(ret);
}
/* Reclaim an NCD4INFO instance */
void
NCD4_reclaimInfo(NCD4INFO* d4info)
{
size_t i;
if(d4info == NULL) return;
d4info->controller = NULL; /* break link */
nullfree(d4info->rawdmrurltext);
nullfree(d4info->dmrurltext);
ncurifree(d4info->dmruri);
freeCurl(d4info->curl);
nullfree(d4info->fileproto.filename);
NCD4_resetInfoForRead(d4info);
nullfree(d4info->substrate.filename); /* always reclaim */
NC_authfree(d4info->auth);
nclistfree(d4info->blobs);
/* Reclaim dmr node tree */
NCD4_reclaimMeta(d4info->dmrmetadata);
/* Reclaim all responses */
for(i=0;i<nclistlength(d4info->responses);i++) {
NCD4response* resp = nclistget(d4info->responses,i);
NCD4_reclaimResponse(resp);
}
nclistfree(d4info->responses);
free(d4info);
}
/* Reset NCD4INFO instance for new read request */
void
NCD4_resetInfoForRead(NCD4INFO* d4info)
{
if(d4info == NULL) return;
if(d4info->substrate.realfile
&& !FLAGSET(d4info->controls.debugflags,NCF_DEBUG_COPY)) {
/* We used real file, so we need to delete the temp file
unless we are debugging.
Assume caller has done nc_close|nc_abort on the ncid.
Note that in theory, this should not be necessary since
AFAIK the substrate file is still in def mode, and
when aborted, it should be deleted. But that is not working
for some reason, so we delete it ourselves.
*/
if(d4info->substrate.filename != NULL) {
unlink(d4info->substrate.filename);
}
}
NCD4_reclaimMeta(d4info->dmrmetadata);
d4info->dmrmetadata = NULL;
}
static void
freeCurl(NCD4curl* curl)
{
if(curl == NULL) return;
NCD4_curlclose(curl->curl);
ncbytesfree(curl->packet);
nullfree(curl->errdata.code);
nullfree(curl->errdata.message);
free(curl);
}
int
NCD4_newResponse(NCD4INFO* info, NCD4response** respp)
{
int ret = NC_NOERR;
NCD4response* resp = NULL;
NC_UNUSED(info);
if((resp = calloc(1,sizeof(NCD4response)))==NULL)
{ret = NC_ENOMEM; goto done;}
resp->controller = info;
if(respp) {*respp = resp; resp = NULL;}
done:
if(resp) NCD4_reclaimResponse(resp);
return THROW(ret);
}
/* Reclaim an NCD4response instance */
void
NCD4_reclaimResponse(NCD4response* d4resp)
{
struct NCD4serial* serial = NULL;
if(d4resp == NULL) return;
serial = &d4resp->serial;
d4resp->controller = NULL; /* break link */
nullfree(d4resp->raw.memory);
nullfree(serial->dmr);
nullfree(serial->errdata);
/* clear all fields */
memset(serial,0,sizeof(struct NCD4serial));
nullfree(d4resp->error.parseerror);
nullfree(d4resp->error.message);
nullfree(d4resp->error.context);
nullfree(d4resp->error.otherinfo);
memset(&d4resp->error,0,sizeof(d4resp->error));
free(d4resp);
}
/* Create an empty NCD4meta object for
use in subsequent calls
(is the the right src file to hold this?)
*/
int
NCD4_newMeta(NCD4INFO* info, NCD4meta** metap)
{
int ret = NC_NOERR;
NCD4meta* meta = (NCD4meta*)calloc(1,sizeof(NCD4meta));
if(meta == NULL) return NC_ENOMEM;
meta->allnodes = nclistnew();
#ifdef D4DEBUG
meta->debuglevel = 1;
#endif
meta->controller = info;
meta->ncid = info->substrate.nc4id; /* Transfer netcdf ncid */
if(metap) {*metap = meta; meta = NULL;}
return THROW(ret);
}
void
NCD4_reclaimMeta(NCD4meta* dataset)
{
size_t i;
if(dataset == NULL) return;
for(i=0;i<nclistlength(dataset->allnodes);i++) {
NCD4node* node = (NCD4node*)nclistget(dataset->allnodes,i);
reclaimNode(node);
}
nclistfree(dataset->allnodes);
nclistfree(dataset->groupbyid);
nclistfree(dataset->atomictypes);
free(dataset);
}
#if 0
void
NCD4_resetMeta(NCD4meta* dataset)
{
if(dataset == NULL) return;
#if 0
for(i=0;i<nclistlength(dataset->blobs);i++) {
void* p = nclistget(dataset->blobs,i);
nullfree(p);
}
nclistfree(dataset->blobs);
#endif
}
#endif

View File

@ -5,6 +5,7 @@
#include <stdarg.h>
#include <assert.h>
#include <stddef.h>
#include "d4includes.h"
@ -120,7 +121,7 @@ Do depth first search
static void
walk(NCD4node* node, NClist* sorted)
{
int i;
size_t i;
if(node->visited) return;
node->visited = 1;
@ -190,7 +191,7 @@ walk(NCD4node* node, NClist* sorted)
*/
int
NCD4_delimit(NCD4meta* compiler, NCD4node* topvar, NCD4offset* offset)
NCD4_delimit(NCD4meta* compiler, NCD4node* topvar, NCD4offset* offset, int inferredchecksumming)
{
int ret = NC_NOERR;
NCD4mark mark = 0;
@ -214,7 +215,7 @@ NCD4_delimit(NCD4meta* compiler, NCD4node* topvar, NCD4offset* offset)
topvar->data.dap4data.memory = mark;
topvar->data.dap4data.size = OFFSETSIZE(offset,mark);
/* extract the dap4 data checksum, if present */
if(compiler->controller->data.inferredchecksumming) {
if(inferredchecksumming) {
union ATOMICS csum;
TRANSFER(csum.u8,offset,CHECKSUMSIZE);
topvar->data.remotechecksum = csum.u32[0];
@ -310,7 +311,7 @@ static int
delimitStruct(NCD4meta* compiler, NCD4node* basetype, NCD4offset* offset)
{
int ret = NC_NOERR;
int i;
size_t i;
/* The fields are associated with the basetype struct */
for(i=0;i<nclistlength(basetype->vars);i++) {

View File

@ -6,7 +6,6 @@
#include "d4includes.h"
#include "d4curlfunctions.h"
static size_t WriteFileCallback(void*, size_t, size_t, void*);
static size_t WriteMemoryCallback(void*, size_t, size_t, void*);
static int curlerrtoncerr(CURLcode cstat);
@ -33,59 +32,6 @@ NCD4_fetchhttpcode(CURL* curl)
return httpcode;
}
int
NCD4_fetchurl_file(CURL* curl, const char* url, FILE* stream,
d4size_t* sizep, long* filetime)
{
int ret = NC_NOERR;
CURLcode cstat = CURLE_OK;
struct Fetchdata fetchdata;
/* Set the URL */
cstat = curl_easy_setopt(curl, CURLOPT_URL, (void*)url);
if (cstat != CURLE_OK) goto fail;
/* send all data to this function */
cstat = curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteFileCallback);
if (cstat != CURLE_OK) goto fail;
/* we pass our file to the callback function */
cstat = curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *)&fetchdata);
if(cstat != CURLE_OK) goto fail;
/* One last thing; always try to get the last modified time */
cstat = curl_easy_setopt(curl, CURLOPT_FILETIME, (long)1);
if (cstat != CURLE_OK) goto fail;
fetchdata.stream = stream;
fetchdata.size = 0;
cstat = curl_easy_perform(curl);
if (cstat != CURLE_OK)
{ret = NC_EDAPSVC; goto fail;}
if (ret == NC_NOERR) {
/* return the file size*/
#ifdef D4DEBUG
nclog(NCLOGNOTE,"filesize: %lu bytes",fetchdata.size);
#endif
if (sizep != NULL)
*sizep = fetchdata.size;
/* Get the last modified time */
if(filetime != NULL)
cstat = curl_easy_getinfo(curl,CURLINFO_FILETIME,filetime);
if(cstat != CURLE_OK)
{ret = NC_ECURL; goto fail;}
}
return THROW(ret);
fail:
if(cstat != CURLE_OK) {
nclog(NCLOGERR, "curl error: %s", curl_easy_strerror(cstat));
ret = curlerrtoncerr(cstat);
}
return THROW(ret);
}
int
NCD4_fetchurl(CURL* curl, const char* url, NCbytes* buf, long* filetime, int* httpcodep)
{
@ -155,27 +101,6 @@ done:
return THROW(ret);
}
static size_t
WriteFileCallback(void* ptr, size_t size, size_t nmemb, void* data)
{
size_t realsize = size * nmemb;
size_t count;
struct Fetchdata* fetchdata;
fetchdata = (struct Fetchdata*) data;
if(realsize == 0)
nclog(NCLOGWARN,"WriteFileCallback: zero sized chunk");
count = fwrite(ptr, size, nmemb, fetchdata->stream);
if (count > 0) {
fetchdata->size += (count * size);
} else {
nclog(NCLOGWARN,"WriteFileCallback: zero sized write");
}
#ifdef PROGRESS
nclog(NCLOGNOTE,"callback: %lu bytes",(d4size_t)realsize);
#endif
return count;
}
static size_t
WriteMemoryCallback(void *ptr, size_t size, size_t nmemb, void *data)
{

View File

@ -5,6 +5,7 @@
#include "d4includes.h"
#include <stdarg.h>
#include <stddef.h>
#include "nc4internal.h"
#include "ncoffsets.h"
@ -58,7 +59,7 @@ int
NCD4_metabuild(NCD4meta* metadata, int ncid)
{
int ret = NC_NOERR;
int i;
size_t i;
metadata->ncid = ncid;
metadata->root->meta.id = ncid;
@ -89,75 +90,6 @@ done:
return THROW(ret);
}
/* Create an empty NCD4meta object for
use in subsequent calls
(is the the right src file to hold this?)
*/
NCD4meta*
NCD4_newmeta(NCD4INFO* info)
{
NCD4meta* meta = (NCD4meta*)calloc(1,sizeof(NCD4meta));
if(meta == NULL) return NULL;
meta->allnodes = nclistnew();
#ifdef D4DEBUG
meta->debuglevel = 1;
#endif
meta->controller = info;
meta->ncid = info->substrate.nc4id; /* Transfer netcdf ncid */
return meta;
}
/* Attach raw data to metadata */
void
NCD4_attachraw(NCD4meta* meta, size_t rawsize, void* rawdata)
{
assert(meta != NULL);
NCD4_resetSerial(&meta->serial,rawsize,rawdata);
}
void
NCD4_setdebuglevel(NCD4meta* meta, int debuglevel)
{
meta->debuglevel = debuglevel;
}
void
NCD4_reclaimMeta(NCD4meta* dataset)
{
int i;
if(dataset == NULL) return;
NCD4_resetMeta(dataset);
for(i=0;i<nclistlength(dataset->allnodes);i++) {
NCD4node* node = (NCD4node*)nclistget(dataset->allnodes,i);
reclaimNode(node);
}
nclistfree(dataset->allnodes);
nclistfree(dataset->groupbyid);
nclistfree(dataset->atomictypes);
free(dataset);
}
void
NCD4_resetMeta(NCD4meta* dataset)
{
if(dataset == NULL) return;
nullfree(dataset->error.parseerror); dataset->error.parseerror = NULL;
nullfree(dataset->error.message); dataset->error.message = NULL;
nullfree(dataset->error.context); dataset->error.context = NULL;
nullfree(dataset->error.otherinfo); dataset->error.otherinfo = NULL;
NCD4_resetSerial(&dataset->serial,0,NULL);
#if 0
for(i=0;i<nclistlength(dataset->blobs);i++) {
void* p = nclistget(dataset->blobs,i);
nullfree(p);
}
nclistfree(dataset->blobs);
#endif
}
void
reclaimNode(NCD4node* node)
{
@ -188,7 +120,8 @@ reclaimNode(NCD4node* node)
static int
build(NCD4meta* builder, NCD4node* root)
{
int i,ret = NC_NOERR;
size_t i;
int ret = NC_NOERR;
size_t len = nclistlength(builder->allnodes);
/* Tag the root group */
@ -292,7 +225,8 @@ done:
static int
buildGroups(NCD4meta* builder, NCD4node* parent)
{
int i,ret=NC_NOERR;
size_t i;
int ret = NC_NOERR;
#ifdef D4DEBUG
fprintf(stderr,"build group: %s\n",parent->name);
#endif
@ -330,7 +264,8 @@ done:
static int
buildEnumeration(NCD4meta* builder, NCD4node* en)
{
int i,ret = NC_NOERR;
size_t i;
int ret = NC_NOERR;
NCD4node* group = NCD4_groupFor(en);
NCCHECK((nc_def_enum(group->meta.id,en->basetype->meta.id,en->name,&en->meta.id)));
for(i=0;i<nclistlength(en->en.econsts);i++) {
@ -411,7 +346,8 @@ done:
static int
buildMaps(NCD4meta* builder, NCD4node* var)
{
int i,ret = NC_NOERR;
size_t i;
int ret = NC_NOERR;
size_t count = nclistlength(var->maps);
char** memory = NULL;
char** p;
@ -441,7 +377,8 @@ done:
static int
buildAttributes(NCD4meta* builder, NCD4node* varorgroup)
{
int i,ret = NC_NOERR;
size_t i;
int ret = NC_NOERR;
NClist* blobs = NULL;
for(i=0;i<nclistlength(varorgroup->attributes);i++) {
@ -548,7 +485,8 @@ done:
static int
buildCompound(NCD4meta* builder, NCD4node* cmpdtype, NCD4node* group, char* name)
{
int i,ret = NC_NOERR;
size_t i;
int ret = NC_NOERR;
/* Step 1: compute field offsets */
computeOffsets(builder,cmpdtype);
@ -558,15 +496,14 @@ buildCompound(NCD4meta* builder, NCD4node* cmpdtype, NCD4node* group, char* name
/* Step 3: add the fields to type */
for(i=0;i<nclistlength(cmpdtype->vars);i++) {
int rank;
int dimsizes[NC_MAX_VAR_DIMS];
NCD4node* field = (NCD4node*)nclistget(cmpdtype->vars,i);
rank = nclistlength(field->dims);
size_t rank = nclistlength(field->dims);
if(rank == 0) { /* scalar */
NCCHECK((nc_insert_compound(group->meta.id, cmpdtype->meta.id,
field->name, field->meta.offset,
field->basetype->meta.id)));
} else if(rank > 0) { /* array */
} else { /* array */
int idimsizes[NC_MAX_VAR_DIMS];
int j;
getDimsizes(field,dimsizes);
@ -575,7 +512,7 @@ buildCompound(NCD4meta* builder, NCD4node* cmpdtype, NCD4node* group, char* name
NCCHECK((nc_insert_array_compound(group->meta.id, cmpdtype->meta.id,
field->name, field->meta.offset,
field->basetype->meta.id,
rank, idimsizes)));
(int)rank, idimsizes)));
}
}
@ -613,14 +550,13 @@ buildStructure(NCD4meta* builder, NCD4node* structvar)
{
int ret = NC_NOERR;
NCD4node* group;
int rank;
int dimids[NC_MAX_VAR_DIMS];
/* Step 1: define the variable */
rank = nclistlength(structvar->dims);
size_t rank = nclistlength(structvar->dims);
getDimrefs(structvar,dimids);
group = NCD4_groupFor(structvar);
NCCHECK((nc_def_var(group->meta.id,structvar->name,structvar->basetype->meta.id,rank,dimids,&structvar->meta.id)));
NCCHECK((nc_def_var(group->meta.id,structvar->name,structvar->basetype->meta.id,(int)rank,dimids,&structvar->meta.id)));
/* Tag the var */
savevarbyid(group,structvar);
@ -637,13 +573,12 @@ buildSequence(NCD4meta* builder, NCD4node* seq)
int ret = NC_NOERR;
NCD4node* group;
int rank;
int dimids[NC_MAX_VAR_DIMS];
rank = nclistlength(seq->dims);
size_t rank = nclistlength(seq->dims);
getDimrefs(seq,dimids);
group = NCD4_groupFor(seq);
NCCHECK((nc_def_var(group->meta.id,seq->name,seq->basetype->meta.id,rank,dimids,&seq->meta.id)));
NCCHECK((nc_def_var(group->meta.id,seq->name,seq->basetype->meta.id,(int)rank,dimids,&seq->meta.id)));
savevarbyid(group,seq);
/* Build attributes and map attributes WRT the variable */
@ -672,8 +607,41 @@ savevarbyid(NCD4node* group, NCD4node* var)
{
if(group->group.varbyid == NULL)
group->group.varbyid = nclistnew();
nclistsetalloc(group->group.varbyid,var->meta.id);
nclistinsert(group->group.varbyid,var->meta.id,var);
nclistsetalloc(group->group.varbyid, (size_t)var->meta.id);
nclistinsert(group->group.varbyid, (size_t)var->meta.id,var);
}
/* Collect FQN path from var node up to and including
the root group and create an name from it
*/
char*
NCD4_getVarFQN(NCD4node* var, const char* tail)
{
size_t i;
NCD4node* x = NULL;
NClist* path = NULL;
NCbytes* fqn = NULL;
char* result;
path = nclistnew();
for(x=var->container;ISGROUP(x->sort);x=x->container) {
nclistinsert(path,0,x);
}
fqn = ncbytesnew();
for(i=0;i<nclistlength(path);i++) {
NCD4node* grp = (NCD4node*)nclistget(path,i);
char* escaped = backslashEscape(grp->name);
if(escaped == NULL) return NULL;
if(i > 0) ncbytesappend(fqn,'/');
ncbytescat(fqn,escaped);
free(escaped);
}
nclistfree(path);
if(tail != NULL)
ncbytescat(fqn,tail);
result = ncbytesextract(fqn);
ncbytesfree(fqn);
return result;
}
/* Collect FQN path from node up to (but not including)
@ -682,7 +650,7 @@ savevarbyid(NCD4node* group, NCD4node* var)
static char*
getFieldFQN(NCD4node* field, const char* tail)
{
int i;
size_t i;
NCD4node* x = NULL;
NClist* path = NULL;
NCbytes* fqn = NULL;
@ -712,8 +680,8 @@ getFieldFQN(NCD4node* field, const char* tail)
static size_t
getDimrefs(NCD4node* var, int* dimids)
{
int i;
int rank = nclistlength(var->dims);
size_t i;
size_t rank = nclistlength(var->dims);
for(i=0;i<rank;i++) {
NCD4node* dim = (NCD4node*)nclistget(var->dims,i);
dimids[i] = dim->meta.id;
@ -724,8 +692,8 @@ getDimrefs(NCD4node* var, int* dimids)
static size_t
getDimsizes(NCD4node* var, int* dimsizes)
{
int i;
int rank = nclistlength(var->dims);
size_t i;
size_t rank = nclistlength(var->dims);
for(i=0;i<rank;i++) {
NCD4node* dim = (NCD4node*)nclistget(var->dims,i);
dimsizes[i] = (int)dim->dim.size;
@ -756,7 +724,8 @@ to nc_put_att().
static int
compileAttrValues(NCD4meta* builder, NCD4node* attr, void** memoryp, NClist* blobs)
{
int i,ret = NC_NOERR;
size_t i;
int ret = NC_NOERR;
unsigned char* memory = NULL;
unsigned char* p;
size_t size;
@ -766,7 +735,7 @@ compileAttrValues(NCD4meta* builder, NCD4node* attr, void** memoryp, NClist* blo
NCD4node* container = attr->container;
NCD4node* basetype = attr->basetype;
NClist* values = attr->attr.values;
int count = nclistlength(values);
size_t count = nclistlength(values);
memset((void*)&converter,0,sizeof(converter));
@ -923,7 +892,8 @@ Note: this will work if the econst string is a number or a econst name
static int
decodeEconst(NCD4meta* builder, NCD4node* enumtype, const char* nameorval, union ATOMICS* converter)
{
int i,ret=NC_NOERR;
size_t i;
int ret = NC_NOERR;
union ATOMICS number;
NCD4node* match = NULL;
@ -985,7 +955,7 @@ backslashEscape(const char* s)
static int
markfixedsize(NCD4meta* meta)
{
int i,j;
size_t i,j;
for(i=0;i<nclistlength(meta->allnodes);i++) {
int fixed = 1;
NCD4node* n = (NCD4node*)nclistget(meta->allnodes,i);
@ -1015,7 +985,7 @@ markfixedsize(NCD4meta* meta)
static void
computeOffsets(NCD4meta* builder, NCD4node* cmpd)
{
int i;
size_t i;
d4size_t offset = 0;
d4size_t largestalign = 1;
d4size_t size = 0;
@ -1139,7 +1109,7 @@ getpadding(d4size_t offset, size_t alignment)
static int
markdapsize(NCD4meta* meta)
{
int i,j;
size_t i,j;
for(i=0;i<nclistlength(meta->allnodes);i++) {
NCD4node* type = (NCD4node*)nclistget(meta->allnodes,i);
size_t totalsize;
@ -1180,7 +1150,7 @@ markdapsize(NCD4meta* meta)
}
int
NCD4_findvar(NC* ncp, int ncid, int varid, NCD4node** varp, NCD4node** grpp)
NCD4_findvar(NC* ncp, int gid, int varid, NCD4node** varp, NCD4node** grpp)
{
int ret = NC_NOERR;
NCD4INFO* info = NULL;
@ -1192,20 +1162,20 @@ NCD4_findvar(NC* ncp, int ncid, int varid, NCD4node** varp, NCD4node** grpp)
info = getdap(ncp);
if(info == NULL)
return THROW(NC_EBADID);
meta = info->substrate.metadata;
meta = info->dmrmetadata;
if(meta == NULL)
return THROW(NC_EBADID);
/* Locate var node via (grpid,varid) */
grp_id = GROUPIDPART(ncid);
group = nclistget(meta->groupbyid,grp_id);
grp_id = GROUPIDPART(gid);
group = nclistget(meta->groupbyid, (size_t)grp_id);
if(group == NULL)
return THROW(NC_EBADID);
var = nclistget(group->group.varbyid,varid);
var = nclistget(group->group.varbyid, (size_t)varid);
if(var == NULL)
return THROW(NC_EBADID);
if(varp) *varp = var;
if(grpp) *grpp = group;
return ret;
return THROW(ret);
}
static int

View File

@ -78,7 +78,7 @@ d4odom_print(D4odometer* odom)
if(odom->rank == 0) {
strlcat(line,"[]",sizeof(line));
} else for(i=0;i<odom->rank;i++) {
sprintf(tmp,"[%lu/%lu:%lu:%lu]",
snprintf(tmp,sizeof(tmp),"[%lu/%lu:%lu:%lu]",
(size_t)odom->index[i],
(size_t)odom->start[i],
(size_t)odom->stride[i],

View File

@ -6,6 +6,7 @@
#include "d4includes.h"
#include <stdarg.h>
#include <assert.h>
#include <stddef.h>
#include "ncxml.h"
/**
@ -154,7 +155,7 @@ static int defineBytestringType(NCD4parser*);
/* API */
int
NCD4_parse(NCD4meta* metadata)
NCD4_parse(NCD4meta* metadata, NCD4response* resp, int dapparse)
{
int ret = NC_NOERR;
NCD4parser* parser = NULL;
@ -168,8 +169,10 @@ NCD4_parse(NCD4meta* metadata)
/* Create and fill in the parser state */
parser = (NCD4parser*)calloc(1,sizeof(NCD4parser));
if(parser == NULL) {ret=NC_ENOMEM; goto done;}
parser->controller = metadata->controller;
parser->metadata = metadata;
doc = ncxml_parse(parser->metadata->serial.dmr,strlen(parser->metadata->serial.dmr));
parser->response = resp;
doc = ncxml_parse(parser->response->serial.dmr,strlen(parser->response->serial.dmr));
if(doc == NULL) {ret=NC_ENOMEM; goto done;}
dom = ncxml_root(doc);
parser->types = nclistnew();
@ -178,6 +181,7 @@ NCD4_parse(NCD4meta* metadata)
#ifdef D4DEBUG
parser->debuglevel = 1;
#endif
parser->dapparse = dapparse;
/*Walk the DOM tree to build the DAP4 node tree*/
ret = traverse(parser,dom);
@ -214,9 +218,9 @@ traverse(NCD4parser* parser, ncxml_t dom)
ret=parseError(parser,dom);
/* Report the error */
fprintf(stderr,"DAP4 Error: http-code=%d message=\"%s\" context=\"%s\"\n",
parser->metadata->error.httpcode,
parser->metadata->error.message,
parser->metadata->error.context);
parser->response->error.httpcode,
parser->response->error.message,
parser->response->error.context);
fflush(stderr);
ret=NC_EDMR;
goto done;
@ -225,7 +229,8 @@ traverse(NCD4parser* parser, ncxml_t dom)
if((ret=makeNode(parser,NULL,NULL,NCD4_GROUP,NC_NULL,&parser->metadata->root))) goto done;
parser->metadata->root->group.isdataset = 1;
parser->metadata->root->meta.id = parser->metadata->ncid;
parser->metadata->groupbyid = nclistnew();
if(parser->metadata->groupbyid == NULL)
parser->metadata->groupbyid = nclistnew();
SETNAME(parser->metadata->root,"/");
xattr = ncxml_attr(dom,"name");
if(xattr != NULL) parser->metadata->root->group.datasetname = xattr;
@ -847,23 +852,23 @@ parseError(NCD4parser* parser, ncxml_t errxml)
char* shttpcode = ncxml_attr(errxml,"httpcode");
ncxml_t x;
if(shttpcode == NULL) shttpcode = strdup("400");
if(sscanf(shttpcode,"%d",&parser->metadata->error.httpcode) != 1)
if(sscanf(shttpcode,"%d",&parser->response->error.httpcode) != 1)
nclog(NCLOGERR,"Malformed <ERROR> response");
nullfree(shttpcode);
x=ncxml_child(errxml, "Message");
if(x != NULL) {
char* txt = ncxml_text(x);
parser->metadata->error.message = (txt == NULL ? NULL : txt);
parser->response->error.message = (txt == NULL ? NULL : txt);
}
x = ncxml_child(errxml, "Context");
if(x != NULL) {
const char* txt = ncxml_text(x);
parser->metadata->error.context = (txt == NULL ? NULL : strdup(txt));
parser->response->error.context = (txt == NULL ? NULL : strdup(txt));
}
x=ncxml_child(errxml, "OtherInformation");
if(x != NULL) {
const char* txt = ncxml_text(x);
parser->metadata->error.otherinfo = (txt == NULL ? NULL : strdup(txt));
parser->response->error.otherinfo = (txt == NULL ? NULL : strdup(txt));
}
return THROW(NC_NOERR);
}
@ -874,7 +879,8 @@ Find or create an opaque type
static NCD4node*
getOpaque(NCD4parser* parser, ncxml_t varxml, NCD4node* group)
{
int i, ret = NC_NOERR;
size_t i;
int ret = NC_NOERR;
long long len;
NCD4node* opaquetype = NULL;
char* xattr;
@ -1020,7 +1026,7 @@ done:
NCD4node*
NCD4_findAttr(NCD4node* container, const char* attrname)
{
int i;
size_t i;
/* Look directly under this xml for <Attribute> */
for(i=0;i<nclistlength(container->attributes);i++) {
NCD4node* attr = (NCD4node*)nclistget(container->attributes,i);
@ -1091,7 +1097,8 @@ static int
lookupFQNList(NCD4parser* parser, NClist* fqn, NCD4sort sort, NCD4node** result)
{
int ret = NC_NOERR;
int i,nsteps;
size_t i;
int nsteps;
NCD4node* current;
char* name = NULL;
NCD4node* node = NULL;
@ -1135,7 +1142,7 @@ lookupFQNList(NCD4parser* parser, NClist* fqn, NCD4sort sort, NCD4node** result)
assert (i < (nsteps - 1));
i++; /* skip variable name */
for(;;i++) {
int j;
size_t j;
name = (char*)nclistget(fqn,i);
assert(ISTYPE(current->sort) && ISCMPD(current->subsort));
for(node=NULL,j=0;j<nclistlength(current->vars);j++) {
@ -1162,7 +1169,7 @@ notfound:
static NCD4node*
lookFor(NClist* elems, const char* name, NCD4sort sort)
{
int n,i;
size_t n,i;
if(elems == NULL || nclistlength(elems) == 0) return NULL;
n = nclistlength(elems);
for(i=0;i<n;i++) {
@ -1176,7 +1183,7 @@ lookFor(NClist* elems, const char* name, NCD4sort sort)
void
NCD4_printElems(NCD4node* group)
{
int n,i;
size_t n,i;
NClist* elems;
elems = group->group.elements;
if(elems == NULL || nclistlength(elems) == 0) return;
@ -1321,7 +1328,7 @@ makeNode(NCD4parser* parser, NCD4node* parent, ncxml_t xml, NCD4sort sort, nc_ty
record(parser,node);
if(nodep) *nodep = node;
done:
return ret;
return THROW(ret);
}
static int
@ -1642,7 +1649,7 @@ static int
parseForwards(NCD4parser* parser, NCD4node* root)
{
int ret = NC_NOERR;
int i,j;
size_t i,j;
/* process all vars */
for(i=0;i<nclistlength(parser->vars);i++) {
@ -1652,12 +1659,19 @@ parseForwards(NCD4parser* parser, NCD4node* root)
const char* mapname = (const char*)nclistget(var->mapnames,j);
/* Find the corresponding variable */
NCD4node* mapref = lookupFQN(parser,mapname,NCD4_VAR);
if(mapref == NULL)
if(mapref != NULL)
PUSH(var->maps,mapref);
else if(!parser->dapparse)
FAIL(NC_ENOTVAR,"<Map> name does not refer to a variable: %s",mapname);
PUSH(var->maps,mapref);
}
}
done:
return THROW(ret);
}
void
NCD4_setdebuglevel(NCD4parser* parser, int debuglevel)
{
parser->debuglevel = debuglevel;
}

View File

@ -4,6 +4,7 @@
*********************************************************************/
#include "d4includes.h"
#include <stddef.h>
/**
This provides a simple dap4 metadata -> xml printer.
@ -87,7 +88,7 @@ static int
printNode(D4printer* out, NCD4node* node, int depth)
{
int ret = NC_NOERR;
int i;
size_t i;
char* fqn = NULL;
switch (node->sort) {
@ -297,13 +298,12 @@ static int
printGroupBody(D4printer* out, NCD4node* node, int depth)
{
int ret = NC_NOERR;
int i,ngroups,nvars,ntypes,ndims,nattrs;
ngroups = nclistlength(node->groups);
nvars = nclistlength(node->vars);
ntypes = nclistlength(node->types);
ndims = nclistlength(node->dims);
nattrs = nclistlength(node->attributes);
size_t i;
size_t ngroups = nclistlength(node->groups);
size_t nvars = nclistlength(node->vars);
size_t ntypes = nclistlength(node->types);
size_t ndims = nclistlength(node->dims);
size_t nattrs = nclistlength(node->attributes);
if(ndims > 0) {
INDENT(depth);
@ -371,7 +371,7 @@ static int
printMetaData(D4printer* out, NCD4node* node, int depth)
{
int ret = NC_NOERR;
int i;
size_t i;
if(nclistlength(node->dims) > 0) {
for(i=0;i<nclistlength(node->dims);i++) {
@ -456,7 +456,7 @@ static int
printAttribute(D4printer* out, NCD4node* attr, int depth)
{
int ret = NC_NOERR;
int i = 0;
size_t i = 0;
char* fqn = NULL;
INDENT(depth); CAT("<Attribute");

View File

@ -19,9 +19,8 @@ See \ref copyright file for more info.
/* Do conversion if this code was compiled via Vis. Studio or Mingw */
/*Forward*/
static int readpacket(NCD4INFO* state, NCURI*, NCbytes*, NCD4mode, NCD4format, long*);
static int readpacket(NCD4INFO* state, NCURI*, NCbytes*, NCD4mode, NCD4format, int*, long*);
static int readfile(NCD4INFO* state, const NCURI* uri, NCD4mode dxx, NCD4format fxx, NCbytes* packet);
static int readfiletofile(NCD4INFO* state, const NCURI* uri, NCD4mode dxx, NCD4format fxx, FILE* stream, d4size_t* sizep);
static int readfileDAPDMR(NCD4INFO* state, const NCURI* uri, NCbytes* packet);
#ifdef HAVE_GETTIMEOFDAY
@ -38,82 +37,27 @@ deltatime(struct timeval time0,struct timeval time1)
#endif
int
NCD4_readDMR(NCD4INFO* state, int flags)
NCD4_readDMR(NCD4INFO* state, int flags, NCURI* url, NCD4response* resp)
{
int stat = NC_NOERR;
long lastmod = -1;
if((flags & NCF_ONDISK) == 0) {
ncbytesclear(state->curl->packet);
stat = readpacket(state,state->uri,state->curl->packet,NCD4_DMR,NCD4_FORMAT_XML,&lastmod);
if(stat == NC_NOERR)
state->data.dmrlastmodified = lastmod;
} else { /*((flags & NCF_ONDISK) != 0) */
NCURI* url = state->uri;
int fileprotocol = (strcmp(url->protocol,"file")==0);
if(fileprotocol) {
stat = readfiletofile(state, url, NCD4_DMR, NCD4_FORMAT_XML, state->data.ondiskfile, &state->data.datasize);
} else {
char* readurl = NULL;
int flags = 0;
if(!fileprotocol) flags |= NCURIQUERY;
flags |= NCURIENCODE;
flags |= NCURIPWD;
#ifdef FIX
ncurisetconstraints(url,state->constraint);
#endif
readurl = ncuribuild(url,NULL,".dmr.xml",NCURISVC);
if(readurl == NULL)
return THROW(NC_ENOMEM);
stat = NCD4_fetchurl_file(state->curl, readurl, state->data.ondiskfile,
&state->data.datasize, &lastmod);
nullfree(readurl);
if(stat == NC_NOERR)
state->data.dmrlastmodified = lastmod;
}
}
ncbytesclear(state->curl->packet);
stat = readpacket(state,url,state->curl->packet,NCD4_DMR,NCD4_FORMAT_XML,&resp->serial.httpcode,NULL);
return THROW(stat);
}
int
NCD4_readDAP(NCD4INFO* state, int flags)
NCD4_readDAP(NCD4INFO* state, int flags, NCURI* url, NCD4response* resp)
{
int stat = NC_NOERR;
long lastmod = -1;
if((flags & NCF_ONDISK) == 0) {
ncbytesclear(state->curl->packet);
stat = readpacket(state,state->uri,state->curl->packet,NCD4_DAP,NCD4_FORMAT_NONE,&lastmod);
if(stat) {
NCD4_seterrormessage(state->substrate.metadata, nclistlength(state->curl->packet), nclistcontents(state->curl->packet));
goto done;
} else
state->data.daplastmodified = lastmod;
} else { /*((flags & NCF_ONDISK) != 0) */
NCURI* url = state->uri;
int fileprotocol = (strcmp(url->protocol,"file")==0);
if(fileprotocol) {
stat = readfiletofile(state, url, NCD4_DAP, NCD4_FORMAT_NONE, state->data.ondiskfile, &state->data.datasize);
} else {
char* readurl = NULL;
int flags = 0;
if(!fileprotocol) flags |= NCURIQUERY;
flags |= NCURIENCODE;
flags |= NCURIPWD;
#ifdef FIX
ncurisetconstraints(url,state->constraint);
#endif
readurl = ncuribuild(url,NULL,".dap",NCURISVC);
if(readurl == NULL)
return THROW(NC_ENOMEM);
stat = NCD4_fetchurl_file(state->curl, readurl, state->data.ondiskfile,
&state->data.datasize, &lastmod);
nullfree(readurl);
if(stat == NC_NOERR)
state->data.daplastmodified = lastmod;
}
ncbytesclear(state->curl->packet);
stat = readpacket(state,url,state->curl->packet,NCD4_DAP,NCD4_FORMAT_NONE,&resp->serial.httpcode,NULL);
if(stat) {
NCD4_seterrormessage(resp, nclistlength(state->curl->packet), nclistcontents(state->curl->packet));
} else {
resp->raw.size = ncbyteslength(state->curl->packet);
resp->raw.memory = ncbytesextract(state->curl->packet);
}
done:
return THROW(stat);
}
@ -150,7 +94,7 @@ dxxformat(int fxx, int dxx)
}
static int
readpacket(NCD4INFO* state, NCURI* url, NCbytes* packet, NCD4mode dxx, NCD4format fxx, long* lastmodified)
readpacket(NCD4INFO* state, NCURI* url, NCbytes* packet, NCD4mode dxx, NCD4format fxx, int* httpcodep, long* lastmodified)
{
int stat = NC_NOERR;
int fileprotocol = 0;
@ -185,7 +129,7 @@ readpacket(NCD4INFO* state, NCURI* url, NCbytes* packet, NCD4mode dxx, NCD4forma
gettimeofday(&time0,NULL);
#endif
}
stat = NCD4_fetchurl(curl,fetchurl,packet,lastmodified,&state->substrate.metadata->error.httpcode);
stat = NCD4_fetchurl(curl,fetchurl,packet,lastmodified,httpcodep);
nullfree(fetchurl);
if(stat) goto fail;
if(FLAGSET(state->controls.flags,NCF_SHOWFETCH)) {
@ -207,37 +151,26 @@ fail:
return THROW(stat);
}
#if 0
static int
readfiletofile(NCD4INFO* state, const NCURI* uri, NCD4mode dxx, NCD4format fxx, FILE* stream, d4size_t* sizep)
readfromfile(NCD4INFO* state, const NCURI* uri, NCD4mode dxx, NCD4format fxx, d4size_t* sizep)
{
int stat = NC_NOERR;
NCbytes* packet = ncbytesnew();
size_t len;
stat = readfile(state, uri, dxx, fxx, packet);
ncbytesclear(state->curl->packet);
stat = readfile(state, uri, dxx, fxx, state->curl->packet);
#ifdef D4DEBUG
fprintf(stderr,"readfiletofile: packet.size=%lu\n",
(unsigned long)ncbyteslength(packet));
(unsigned long)ncbyteslength(state->curl->packet));
#endif
if(stat != NC_NOERR) goto unwind;
len = nclistlength(packet);
if(stat == NC_NOERR) {
size_t written;
fseek(stream,0,SEEK_SET);
written = fwrite(ncbytescontents(packet),1,len,stream);
if(written != len) {
#ifdef D4DEBUG
fprintf(stderr,"readfiletofile: written!=length: %lu :: %lu\n",
(unsigned long)written,(unsigned long)len);
#endif
stat = NC_EIO;
}
}
len = nclistlength(state->curl->packet);
if(sizep != NULL) *sizep = len;
unwind:
ncbytesfree(packet);
return THROW(stat);
}
#endif
static int
readfile(NCD4INFO* state, const NCURI* uri, NCD4mode dxx, NCD4format fxx, NCbytes* packet)
@ -365,12 +298,12 @@ done:
/* Extract packet as error message; assume httpcode set */
int
NCD4_seterrormessage(NCD4meta* metadata, size_t len, char* msg)
NCD4_seterrormessage(NCD4response* resp, size_t len, char* msg)
{
metadata->error.message = (char*)d4alloc(len+1);
if(metadata->error.message == NULL)
resp->error.message = (char*)d4alloc(len+1);
if(resp->error.message == NULL)
return THROW(NC_ENOMEM);
memcpy(metadata->error.message,msg,len);
metadata->error.message[len] = '\0';
memcpy(resp->error.message,msg,len);
resp->error.message[len] = '\0';
return THROW(NC_ENODATA); /* slight lie */
}

View File

@ -7,6 +7,6 @@
#define D4READ_H
extern int NCD4_readDMR(NCD4INFO*, int flags);
extern int NCD4_readDAP(NCD4INFO*, int flags);
extern int NCD4_readDAP(NCD4INFO*, int flags, NCURI* uri);
#endif /*READ_H*/

View File

@ -5,6 +5,7 @@
#include "d4includes.h"
#include <stdarg.h>
#include <stddef.h>
#include "d4includes.h"
/*
@ -14,12 +15,12 @@ the incoming data to get the endianness correct.
/* Forward */
static int walkAtomicVar(NCD4meta*, NCD4node*, NCD4node*, NCD4offset* offset);
static int walkOpaqueVar(NCD4meta*,NCD4node*, NCD4node*, NCD4offset* offset);
static int walkStructArray(NCD4meta*,NCD4node*, NCD4node*, NCD4offset* offset);
static int walkStruct(NCD4meta*, NCD4node*, NCD4node*, NCD4offset* offset);
static int walkSeqArray(NCD4meta*, NCD4node*, NCD4node*, NCD4offset* offset);
static int walkSeq(NCD4meta*,NCD4node*, NCD4node*, NCD4offset* offset);
static int walkAtomicVar(NCD4response*, NCD4node*, NCD4node*, NCD4offset* offset,int doswap);
static int walkOpaqueVar(NCD4response*,NCD4node*, NCD4node*, NCD4offset* offset,int doswap);
static int walkStructArray(NCD4response*,NCD4node*, NCD4node*, NCD4offset* offset,int doswap);
static int walkStruct(NCD4response*, NCD4node*, NCD4node*, NCD4offset* offset,int doswap);
static int walkSeqArray(NCD4response*, NCD4node*, NCD4node*, NCD4offset* offset,int doswap);
static int walkSeq(NCD4response*,NCD4node*, NCD4node*, NCD4offset* offset,int doswap);
/**************************************************/
@ -28,43 +29,39 @@ Assumes that compiler->swap is true; does necessary
byte swapping.
*/
int
NCD4_swapdata(NCD4meta* compiler, NClist* topvars)
NCD4_swapdata(NCD4response* resp, NCD4node* var, int doswap)
{
int ret = NC_NOERR;
int i;
NCD4offset* offset = NULL;
offset = BUILDOFFSET(compiler->serial.dap,compiler->serial.dapsize);
for(i=0;i<nclistlength(topvars);i++) {
NCD4node* var = (NCD4node*)nclistget(topvars,i);
offset = BUILDOFFSET(resp->serial.dap,resp->serial.dapsize);
OFFSET2BLOB(var->data.dap4data,offset);
switch (var->subsort) {
default:
if((ret=walkAtomicVar(compiler,var,var,offset))) goto done;
if((ret=walkAtomicVar(resp,var,var,offset,doswap))) goto done;
break;
case NC_OPAQUE:
/* The only thing we need to do is swap the counts */
if((ret=walkOpaqueVar(compiler,var,var,offset))) goto done;
if((ret=walkOpaqueVar(resp,var,var,offset,doswap))) goto done;
break;
case NC_STRUCT:
if((ret=walkStructArray(compiler,var,var,offset))) goto done;
if((ret=walkStructArray(resp,var,var,offset,doswap))) goto done;
break;
case NC_SEQ:
if((ret=walkSeqArray(compiler,var,var,offset))) goto done;
if((ret=walkSeqArray(resp,var,var,offset,doswap))) goto done;
break;
}
var->data.dap4data.size = DELTA(offset,var->data.dap4data.memory);
/* skip checksum, if there is one */
if(compiler->controller->data.inferredchecksumming)
if(resp->inferredchecksumming)
INCR(offset,CHECKSUMSIZE);
}
done:
if(offset) free(offset);
return THROW(ret);
}
static int
walkAtomicVar(NCD4meta* compiler, NCD4node* topvar, NCD4node* var, NCD4offset* offset)
walkAtomicVar(NCD4response* resp, NCD4node* topvar, NCD4node* var, NCD4offset* offset, int doswap)
{
int ret = NC_NOERR;
d4size_t i;
@ -87,7 +84,7 @@ walkAtomicVar(NCD4meta* compiler, NCD4node* topvar, NCD4node* var, NCD4offset* o
} else { /*(typesize > 1)*/
for(i=0;i<dimproduct;i++) {
char* sp = offset->offset;
if(compiler->swap) {
if(doswap) {
switch (typesize) {
case 2: swapinline16(sp); break;
case 4: swapinline32(sp); break;
@ -102,7 +99,7 @@ walkAtomicVar(NCD4meta* compiler, NCD4node* topvar, NCD4node* var, NCD4offset* o
COUNTERTYPE count;
for(i=0;i<dimproduct;i++) {
/* Get string count */
if(compiler->swap)
if(doswap)
swapinline64(offset);
count = GETCOUNTER(offset);
SKIPCOUNTER(offset);
@ -114,7 +111,7 @@ walkAtomicVar(NCD4meta* compiler, NCD4node* topvar, NCD4node* var, NCD4offset* o
}
static int
walkOpaqueVar(NCD4meta* compiler, NCD4node* topvar, NCD4node* var, NCD4offset* offset)
walkOpaqueVar(NCD4response* resp, NCD4node* topvar, NCD4node* var, NCD4offset* offset, int doswap)
{
int ret = NC_NOERR;
d4size_t i;
@ -125,7 +122,7 @@ walkOpaqueVar(NCD4meta* compiler, NCD4node* topvar, NCD4node* var, NCD4offset* o
for(i=0;i<dimproduct;i++) {
/* Get and swap opaque count */
if(compiler->swap)
if(doswap)
swapinline64(offset);
count = GETCOUNTER(offset);
SKIPCOUNTER(offset);
@ -135,7 +132,7 @@ walkOpaqueVar(NCD4meta* compiler, NCD4node* topvar, NCD4node* var, NCD4offset* o
}
static int
walkStructArray(NCD4meta* compiler, NCD4node* topvar, NCD4node* var, NCD4offset* offset)
walkStructArray(NCD4response* resp, NCD4node* topvar, NCD4node* var, NCD4offset* offset, int doswap)
{
int ret = NC_NOERR;
d4size_t i;
@ -144,7 +141,7 @@ walkStructArray(NCD4meta* compiler, NCD4node* topvar, NCD4node* var, NCD4offset
for(i=0;i<dimproduct;i++) {
/* Swap, possibly recursively, the single struct pointed to by offset*/
if((ret=walkStruct(compiler,topvar,basetype,offset))) goto done;
if((ret=walkStruct(resp,topvar,basetype,offset,doswap))) goto done;
}
done:
@ -152,27 +149,27 @@ done:
}
static int
walkStruct(NCD4meta* compiler, NCD4node* topvar, NCD4node* structtype, NCD4offset* offset)
walkStruct(NCD4response* resp, NCD4node* topvar, NCD4node* structtype, NCD4offset* offset, int doswap)
{
int ret = NC_NOERR;
int i;
size_t i;
for(i=0;i<nclistlength(structtype->vars);i++) {
NCD4node* field = (NCD4node*)nclistget(structtype->vars,i);
NCD4node* fieldbase = field->basetype;
switch (fieldbase->subsort) {
default:
if((ret=walkAtomicVar(compiler,topvar,field,offset))) goto done;
if((ret=walkAtomicVar(resp,topvar,field,offset,doswap))) goto done;
break;
case NC_OPAQUE:
/* The only thing we need to do is swap the counts */
if((ret=walkOpaqueVar(compiler,topvar,field,offset))) goto done;
if((ret=walkOpaqueVar(resp,topvar,field,offset,doswap))) goto done;
break;
case NC_STRUCT:
if((ret=walkStructArray(compiler,topvar,field,offset))) goto done;
if((ret=walkStructArray(resp,topvar,field,offset,doswap))) goto done;
break;
case NC_SEQ:
if((ret=walkSeqArray(compiler,topvar,field,offset))) goto done;
if((ret=walkSeqArray(resp,topvar,field,offset,doswap))) goto done;
break;
}
}
@ -182,7 +179,7 @@ done:
}
static int
walkSeqArray(NCD4meta* compiler, NCD4node* topvar, NCD4node* var, NCD4offset* offset)
walkSeqArray(NCD4response* resp, NCD4node* topvar, NCD4node* var, NCD4offset* offset, int doswap)
{
int ret = NC_NOERR;
d4size_t i;
@ -195,7 +192,7 @@ walkSeqArray(NCD4meta* compiler, NCD4node* topvar, NCD4node* var, NCD4offset* of
for(i=0;i<dimproduct;i++) {
/* Swap, possibly recursively, the single seq pointed to by offset*/
if((ret=walkSeq(compiler,topvar,seqtype,offset))) goto done;
if((ret=walkSeq(resp,topvar,seqtype,offset,doswap))) goto done;
}
done:
@ -206,7 +203,7 @@ done:
Remember that the base type of var is a vlen.
*/
static int
walkSeq(NCD4meta* compiler, NCD4node* topvar, NCD4node* vlentype, NCD4offset* offset)
walkSeq(NCD4response* resp, NCD4node* topvar, NCD4node* vlentype, NCD4offset* offset, int doswap)
{
int ret = NC_NOERR;
int i;
@ -214,7 +211,7 @@ walkSeq(NCD4meta* compiler, NCD4node* topvar, NCD4node* vlentype, NCD4offset* of
NCD4node* basetype;
/* process the record count */
if(compiler->swap)
if(doswap)
swapinline64(offset);
recordcount = GETCOUNTER(offset);
SKIPCOUNTER(offset);
@ -225,17 +222,17 @@ walkSeq(NCD4meta* compiler, NCD4node* topvar, NCD4node* vlentype, NCD4offset* of
for(i=0;i<recordcount;i++) {
switch(basetype->subsort) {
default: /* atomic basetype */
if((ret=walkAtomicVar(compiler,topvar,basetype,offset))) goto done;
if((ret=walkAtomicVar(resp,topvar,basetype,offset,doswap))) goto done;
break;
case NC_OPAQUE:
if((ret=walkOpaqueVar(compiler,topvar,basetype,offset))) goto done;
if((ret=walkOpaqueVar(resp,topvar,basetype,offset,doswap))) goto done;
break;
case NC_STRUCT:
/* We can treat each record like a structure instance */
if((ret=walkStruct(compiler,topvar,basetype,offset))) goto done;
if((ret=walkStruct(resp,topvar,basetype,offset,doswap))) goto done;
break;
case NC_SEQ:
if((ret=walkSeq(compiler,topvar,basetype,offset))) goto done;
if((ret=walkSeq(resp,topvar,basetype,offset,doswap))) goto done;
break;
}
}

View File

@ -4,6 +4,7 @@
*********************************************************************/
#include "d4includes.h"
#include <stddef.h>
#ifdef HAVE_SYS_STAT_H
#include <sys/stat.h>
#endif
@ -92,7 +93,7 @@ NCD4_typesize(nc_type tid)
d4size_t
NCD4_dimproduct(NCD4node* node)
{
int i;
size_t i;
d4size_t product = 1;
for(i=0;i<nclistlength(node->dims);i++) {
NCD4node* dim = (NCD4node*)nclistget(node->dims,i);
@ -107,44 +108,45 @@ NCD4_makeFQN(NCD4node* node)
{
char* fqn = NULL;
char* escaped;
int i;
NCD4node* g = node;
NClist* path = nclistnew();
size_t estimate;
NCbytes* buf = ncbytesnew();
NClist* grps = nclistnew();
NClist* parts = nclistnew();
NCD4node* n;
size_t i;
for(estimate=0;g != NULL;g=g->container) {
estimate += strlen(g->name);
nclistinsert(path,0,g);
/* collect all the non-groups */
for(n=node;n;n=n->container) {
if(ISGROUP(n->sort))
nclistinsert(grps,0,n); /* keep the correct order of groups */
else
nclistinsert(parts,0,n);
}
estimate = (estimate*2) + 2*nclistlength(path);
estimate++; /*strlcat nul*/
fqn = (char*)malloc(estimate+1);
if(fqn == NULL) goto done;
fqn[0] = '\0';
/* Create the group-based fqn prefix */
/* start at 1 to avoid dataset */
for(i=1;i<nclistlength(path);i++) {
NCD4node* elem = (NCD4node*)nclistget(path,i);
if(elem->sort != NCD4_GROUP) break;
/* Build grp prefix of the fqn */
for(i=1;i<nclistlength(grps);i++) {
n = (NCD4node*)nclistget(grps,i);
/* Add in the group name */
escaped = backslashEscape(elem->name);
if(escaped == NULL) {free(fqn); fqn = NULL; goto done;}
strlcat(fqn,"/",estimate);
strlcat(fqn,escaped,estimate);
escaped = backslashEscape(n->name);
if(escaped == NULL) goto done;
ncbytescat(buf,"/");
ncbytescat(buf,escaped);
free(escaped);
}
/* Add in the final name part (if not group) */
if(i < nclistlength(path)) {
int last = nclistlength(path)-1;
NCD4node* n = (NCD4node*)nclistget(path,last);
char* name = NCD4_makeName(n,".");
strlcat(fqn,"/",estimate);
strlcat(fqn,name,estimate);
nullfree(name);
for(i=0;i<nclistlength(parts);i++) {
n = (NCD4node*)nclistget(parts,i);
escaped = backslashEscape(n->name);
if(escaped == NULL) goto done;
ncbytescat(buf,(i==0?"/":"."));
ncbytescat(buf,escaped);
free(escaped);
}
fqn = ncbytesextract(buf);
done:
nclistfree(path);
ncbytesfree(buf);
nclistfree(grps);
nclistfree(parts);
return fqn;
}
@ -155,7 +157,7 @@ create the last part of the fqn
char*
NCD4_makeName(NCD4node* elem, const char* sep)
{
int i;
size_t i;
size_t estimate = 0;
NCD4node* n;
NClist* path = nclistnew();
@ -446,11 +448,9 @@ NCD4_getheader(void* p, NCD4HDR* hdr, int hostlittleendian)
}
void
NCD4_reporterror(NCD4INFO* state)
NCD4_reporterror(NCD4response* resp, NCURI* uri)
{
NCD4meta* meta = state->substrate.metadata;
char* u = NULL;
if(meta == NULL) return;
u = ncuribuild(state->uri,NULL,NULL,NCURIALL);
fprintf(stderr,"***FAIL: url=%s httpcode=%d errmsg->\n%s\n",u,meta->error.httpcode,meta->error.message);
u = ncuribuild(uri,NULL,NULL,NCURIALL);
fprintf(stderr,"***FAIL: url=%s httpcode=%d errmsg->\n%s\n",u,resp->serial.httpcode,resp->error.message);
}

View File

@ -17,9 +17,12 @@
*/
typedef unsigned long long d4size_t;
/* Define a counted memory marker */
/* Define a (size, memory) pair */
typedef struct D4blob {d4size_t size; void* memory;} D4blob;
/* Empty blob constant */
#define NULLBLOB(blob) {blob.size = 0; blob.memory = NULL;}
#define OFFSET2BLOB(blob,offset) do{(blob).size = ((offset)->limit - (offset)->base); (blob).memory = (offset)->base; }while(0)
#define BLOB2OFFSET(offset,blob) do{\
(offset)->base = (blob).memory; \

View File

@ -9,9 +9,11 @@
#include "nc4internal.h"
#include "d4includes.h"
#include "d4odom.h"
#include <stddef.h>
/* Forward */
static int getvarx(int ncid, int varid, NCD4INFO**, NCD4node** varp, nc_type* xtypep, size_t*, nc_type* nc4typep, size_t*);
static int getvarx(int gid, int varid, NCD4INFO**, NCD4node** varp, nc_type* xtypep, size_t*, nc_type* nc4typep, size_t*);
static int mapvars(NCD4meta* dapmeta, NCD4meta* dmrmeta, int inferredchecksumming);
int
NCD4_get_vara(int ncid, int varid,
@ -22,15 +24,16 @@ NCD4_get_vara(int ncid, int varid,
int ret;
/* TODO: optimize since we know stride is 1 */
ret = NCD4_get_vars(ncid,varid,start,edges,NC_stride_one,value,memtype);
return ret;
return THROW(ret);
}
int
NCD4_get_vars(int ncid, int varid,
NCD4_get_vars(int gid, int varid,
const size_t *start, const size_t *edges, const ptrdiff_t* stride,
void *memoryin, nc_type xtype)
{
int i,ret;
size_t i;
int ret;
NCD4INFO* info;
NCD4meta* meta;
NCD4node* ncvar;
@ -40,17 +43,17 @@ NCD4_get_vars(int ncid, int varid,
size_t nc4size, xsize, dapsize;
void* instance = NULL; /* Staging area in case we have to convert */
NClist* blobs = NULL;
int rank;
size_t rank;
size_t dimsizes[NC_MAX_VAR_DIMS];
d4size_t dimproduct;
size_t dstcount;
size_t dstpos;
NCD4offset* offset = NULL;
/* Get netcdf type info */
if((ret=getvarx(ncid, varid, &info, &ncvar, &xtype, &xsize, &nc4type, &nc4size)))
/* Get netcdf var metadata and data */
if((ret=getvarx(gid, varid, &info, &ncvar, &xtype, &xsize, &nc4type, &nc4size)))
{goto done;}
meta = info->substrate.metadata;
meta = info->dmrmetadata;
nctype = ncvar->basetype;
rank = nclistlength(ncvar->dims);
blobs = nclistnew();
@ -74,18 +77,18 @@ NCD4_get_vars(int ncid, int varid,
odom = d4odom_new(rank,start,edges,stride,dimsizes);
else
odom = d4scalarodom_new();
dstcount = 0; /* We always write into dst starting at position 0*/
for(;d4odom_more(odom);dstcount++) {
dstpos = 0; /* We always write into dst starting at position 0*/
for(;d4odom_more(odom);dstpos++) {
void* xpos;
void* dst;
d4size_t count;
d4size_t pos;
count = d4odom_next(odom);
if(count >= dimproduct) {
pos = d4odom_next(odom);
if(pos >= dimproduct) {
ret = THROW(NC_EINVALCOORDS);
goto done;
}
xpos = ((char*)memoryin)+(xsize * dstcount); /* ultimate destination */
xpos = ((char*)memoryin)+(xsize * dstpos); /* ultimate destination */
/* We need to compute the offset in the dap4 data of this instance;
for fixed size types, this is easy, otherwise we have to walk
the variable size type
@ -95,16 +98,16 @@ NCD4_get_vars(int ncid, int varid,
offset = NULL;
offset = BUILDOFFSET(NULL,0);
BLOB2OFFSET(offset,ncvar->data.dap4data);
/* Move offset to the count'th element of the array */
/* Move offset to the pos'th element of the array */
if(nctype->meta.isfixedsize) {
INCR(offset,(dapsize*count));
INCR(offset,(dapsize*pos));
} else {
/* We have to walk to the count'th location in the data */
if((ret=NCD4_moveto(meta,ncvar,count,offset)))
/* We have to walk to the pos'th location in the data */
if((ret=NCD4_moveto(meta,ncvar,pos,offset)))
{goto done;}
}
dst = instance;
if((ret=NCD4_fillinstance(meta,nctype,offset,&dst,blobs)))
if((ret=NCD4_movetoinstance(meta,nctype,offset,&dst,blobs)))
{goto done;}
if(xtype == nc4type) {
/* We can just copy out the data */
@ -132,45 +135,29 @@ done:
}
static int
getvarx(int ncid, int varid, NCD4INFO** infop, NCD4node** varp,
getvarx(int gid, int varid, NCD4INFO** infop, NCD4node** varp,
nc_type* xtypep, size_t* xsizep, nc_type* nc4typep, size_t* nc4sizep)
{
int ret = NC_NOERR;
NC* ncp;
NCD4INFO* info;
NCD4meta* meta;
NCD4node* group;
NCD4node* var;
NCD4node* type;
NC* ncp = NULL;
NCD4INFO* info = NULL;
NCD4meta* dmrmeta = NULL;
NCD4node* group = NULL;
NCD4node* var = NULL;
NCD4node* type = NULL;
nc_type xtype, actualtype;
size_t instancesize, xsize;
NCURI* ceuri = NULL; /* Constrained uri */
NCD4meta* dapmeta = NULL;
NCD4response* dapresp = NULL;
if((ret = NC_check_id(ncid, (NC**)&ncp)) != NC_NOERR)
if((ret = NC_check_id(gid, (NC**)&ncp)) != NC_NOERR)
goto done;
info = getdap(ncp);
meta = info->substrate.metadata;
/* If the data has not already been read and processed, then do so. */
if(meta->serial.dap == NULL) {
size_t len = 0;
void* content = NULL;
/* (Re)Build the meta data; sets serial.rawdata */
NCD4_resetMeta(info->substrate.metadata);
meta->controller = info;
meta->ncid = info->substrate.nc4id; /* Transfer netcdf ncid */
if((ret=NCD4_readDAP(info, info->controls.flags.flags))) goto done;
len = ncbyteslength(info->curl->packet);
content = ncbytesextract(info->curl->packet);
NCD4_resetSerial(&meta->serial, len, content);
/* Process the data part */
if((ret=NCD4_dechunk(meta))) goto done;
if((ret = NCD4_processdata(info->substrate.metadata))) goto done;
}
if((ret = NCD4_findvar(ncp,ncid,varid,&var,&group))) goto done;
dmrmeta = info->dmrmetadata;
if((ret = NCD4_findvar(ncp,gid,varid,&var,&group))) goto done;
type = var->basetype;
actualtype = type->meta.id;
instancesize = type->meta.memsize;
@ -189,6 +176,46 @@ getvarx(int ncid, int varid, NCD4INFO** infop, NCD4node** varp,
else
xsize = instancesize;
/* If we already have valid data, then just return */
if(var->data.valid) goto validated;
/* Ok, we need to read from the server */
/* Add the variable to the URI, unless the URI is already constrained or is unconstrainable */
ceuri = ncuriclone(info->dmruri);
/* append the request for a specific variable */
if(ncuriquerylookup(ceuri,DAP4CE) == NULL && !FLAGSET(info->controls.flags,NCF_UNCONSTRAINABLE)) {
ncurisetquerykey(ceuri,strdup("dap4.ce"),NCD4_makeFQN(var));
}
/* Read and process the data */
/* Setup the meta-data for the DAP */
if((ret=NCD4_newMeta(info,&dapmeta))) goto done;
if((ret=NCD4_newResponse(info,&dapresp))) goto done;
dapresp->mode = NCD4_DAP;
nclistpush(info->responses,dapresp);
if((ret=NCD4_readDAP(info, info->controls.flags.flags, ceuri, dapresp))) goto done;
/* Extract DMR and dechunk the data part */
if((ret=NCD4_dechunk(dapresp))) goto done;
/* Process the dmr part */
if((ret=NCD4_parse(dapmeta,dapresp,1))) goto done;
/* See if we are checksumming */
if((ret=NCD4_inferChecksums(dapmeta,dapresp))) goto done;
/* connect variables and corresponding dap data */
if((ret = NCD4_parcelvars(dapmeta,dapresp))) goto done;
/* Process checksums and byte-order swapping */
if((ret = NCD4_processdata(dapmeta,dapresp))) goto done;
/* Transfer and process the data */
if((ret = mapvars(dapmeta,dmrmeta,dapresp->inferredchecksumming))) goto done;
validated:
/* Return relevant info */
if(infop) *infop = info;
if(xtypep) *xtypep = xtype;
@ -197,8 +224,98 @@ getvarx(int ncid, int varid, NCD4INFO** infop, NCD4node** varp,
if(nc4sizep) *nc4sizep = instancesize;
if(varp) *varp = var;
done:
if(meta->error.message != NULL)
NCD4_reporterror(info); /* Make sure the user sees this */
if(dapmeta) NCD4_reclaimMeta(dapmeta);
ncurifree(ceuri);
if(dapresp != NULL && dapresp->error.message != NULL)
NCD4_reporterror(dapresp,ceuri); /* Make sure the user sees this */
return THROW(ret);
}
#if 0
static NCD4node*
findbyname(const char* name, NClist* nodes)
{
int i;
for(i=0;i<nclistlength(nodes);i++) {
NCD4node* node = (NCD4node*)nclistget(nodes,i);
if(strcmp(name,node->name)==0)
return node;
}
return NULL;
}
#endif
static int
matchvar(NCD4meta* dmrmeta, NCD4node* dapvar, NCD4node** dmrvarp)
{
size_t i;
int ret = NC_NOERR;
NCD4node* x = NULL;
NClist* dappath = nclistnew();
NClist* dmrpath = nclistnew(); /* compute path for this dmr var */
int found = 0;
NCD4node* match = NULL;
/* Capture the dap path starting at root and ending at the dapvar (assumed to be topvar) */
for(x=dapvar;x != NULL;x=x->container) nclistinsert(dappath,0,x);
/* Iterate over all variable nodes to find matching one */
for(i=0;i<nclistlength(dmrmeta->allnodes);i++) {
NCD4node* node = (NCD4node*)nclistget(dmrmeta->allnodes,i);
if(ISVAR(node->sort) && strcmp(node->name,dapvar->name)==0) { /* possible candidate */
size_t j;
found = 0;
nclistclear(dmrpath);
for(x=node;x != NULL;x=x->container) nclistinsert(dmrpath,0,x);
if(nclistlength(dmrpath) == nclistlength(dappath)) { /* same length paths */
/* compare paths: name and sort */
for(found=1,j=0;j<nclistlength(dmrpath);j++) {
NCD4node* pdmr = (NCD4node*)nclistget(dmrpath,j);
NCD4node* pdap = (NCD4node*)nclistget(dappath,j);
if(pdmr->sort != pdap->sort || strcmp(pdmr->name,pdap->name) != 0)
{found = 0; break;}
}
if(found) {match = node; break;}
}
}
}
if(!found) {ret = NC_EINVAL; goto done;}
if(dmrvarp) *dmrvarp = match;
done:
nclistfree(dappath);
nclistfree(dmrpath);
return THROW(ret);
}
/*
Map each toplevel dap var to the corresponding
toplevel dmr var and transfer necessary info;
*/
static int
mapvars(NCD4meta* dapmeta, NCD4meta* dmrmeta, int inferredchecksumming)
{
size_t i;
int ret = NC_NOERR;
NCD4node* daproot = dapmeta->root;
NClist* daptop = NULL; /* top variables in dap tree */
/* Get top level variables from the dap node tree */
daptop = nclistnew();
NCD4_getToplevelVars(dapmeta,daproot,daptop);
/* Match up the dap top variables with the dmr top variables */
for(i=0;i<nclistlength(daptop);i++) {
NCD4node* dapvar = (NCD4node*)nclistget(daptop,i);
NCD4node* dmrvar = NULL;
if((ret=matchvar(dmrmeta,dapvar,&dmrvar))) goto done;
/* Transfer info from dap var to dmr var */
dmrvar->data = dapvar->data;
memset(&dapvar->data,0,sizeof(NCD4vardata));
dmrvar->data.valid = 1;
}
done:
nclistfree(daptop);
return THROW(ret);
}

View File

@ -77,7 +77,6 @@ EXTERNL int dsp_open(const char* path, ND4dsp** dspp);
/* From d4http.c */
EXTERNL long NCD4_fetchhttpcode(CURL* curl);
EXTERNL int NCD4_fetchurl_file(CURL* curl, const char* url, FILE* stream, d4size_t* sizep, long* filetime);
EXTERNL int NCD4_fetchurl(CURL* curl, const char* url, NCbytes* buf, long* filetime, int* httpcode);
EXTERNL int NCD4_curlopen(CURL** curlp);
EXTERNL void NCD4_curlclose(CURL* curl);
@ -85,15 +84,16 @@ EXTERNL int NCD4_fetchlastmodified(CURL* curl, char* url, long* filetime);
EXTERNL int NCD4_ping(const char* url);
/* From d4read.c */
EXTERNL int NCD4_readDMR(NCD4INFO* state, int flags);
EXTERNL int NCD4_readDAP(NCD4INFO* state, int flags);
EXTERNL int NCD4_seterrormessage(NCD4meta* metadata, size_t len, char* msg);
EXTERNL int NCD4_readDMR(NCD4INFO* state, int flags, NCURI* url, NCD4response*);
EXTERNL int NCD4_readDAP(NCD4INFO* state, int flags, NCURI* ceuri, NCD4response*);
EXTERNL int NCD4_seterrormessage(NCD4response*, size_t len, char* msg);
/* From d4parser.c */
EXTERNL int NCD4_parse(NCD4meta*);
EXTERNL int NCD4_parse(NCD4meta*, NCD4response*, int dapparse);
EXTERNL NCD4node* NCD4_findAttr(NCD4node* container, const char* attrname);
EXTERNL NCD4node* NCD4_groupFor(NCD4node* node);
EXTERNL int NCD4_defineattr(NCD4meta* meta, NCD4node* parent, const char* aname, const char* typename, NCD4node** attrp);
EXTERNL void NCD4_setdebuglevel(NCD4parser*,int);
/* From d4printer.c */
EXTERNL int NCD4_print(NCD4meta*, NCbytes* output);
@ -104,29 +104,33 @@ EXTERNL void NCD4_attachraw(NCD4meta*, size_t size, void* rawdata);
EXTERNL void NCD4_reclaimMeta(NCD4meta*);
EXTERNL void NCD4_resetMeta(NCD4meta*);
EXTERNL void reclaimNode(NCD4node* node);
EXTERNL void NCD4_setdebuglevel(NCD4meta*,int);
EXTERNL int NCD4_metabuild(NCD4meta*, int ncid);
EXTERNL size_t NCD4_computeTypeSize(NCD4meta*, NCD4node* type);
EXTERNL int NCD4_findvar(NC* ncp, int ncid, int varid, NCD4node** varp, NCD4node** grpp);
EXTERNL char* NCD4_getVarFQN(NCD4node* var, const char* tail);
/* From d4chunk.c */
EXTERNL int NCD4_dechunk(NCD4meta*);
EXTERNL int NCD4_infermode(NCD4meta* meta);
EXTERNL int NCD4_dechunk(NCD4response*);
EXTERNL int NCD4_infermode(NCD4response*);
struct NCD4serial;
EXTERNL void NCD4_resetSerial(struct NCD4serial* serial, size_t rawsize, void* rawdata);
EXTERNL void NCD4_moveSerial(struct NCD4serial* serial, struct NCD4serial* dst);
/* From d4swap.c */
EXTERNL int NCD4_swapdata(NCD4meta*, NClist* topvars);
EXTERNL int NCD4_swapdata(NCD4response*, NCD4node* topvar, int doswap);
/* From d4fix.c */
EXTERNL int NCD4_delimit(NCD4meta*, NCD4node* var, NCD4offset* offset);
EXTERNL int NCD4_delimit(NCD4meta*, NCD4node* var, NCD4offset* offset, int inferredchecksumming);
EXTERNL int NCD4_moveto(NCD4meta*, NCD4node* var, d4size_t count, NCD4offset* offset);
EXTERNL int NCD4_toposort(NCD4meta*);
/* From d4data.c */
EXTERNL int NCD4_processdata(NCD4meta*);
EXTERNL int NCD4_fillinstance(NCD4meta*, NCD4node* type, NCD4offset* offset, void** dstp, NClist* blobs);
EXTERNL int NCD4_parcelvars(NCD4meta* meta, NCD4response* resp);
EXTERNL int NCD4_processdata(NCD4meta*,NCD4response*);
EXTERNL int NCD4_movetoinstance(NCD4meta*, NCD4node* type, NCD4offset* offset, void** dstp, NClist* blobs);
EXTERNL int NCD4_getToplevelVars(NCD4meta* meta, NCD4node* group, NClist* toplevel);
EXTERNL int NCD4_inferChecksums(NCD4meta* meta, NCD4response* resp);
/* From d4util.c */
EXTERNL d4size_t NCD4_dimproduct(NCD4node* node);
@ -141,7 +145,7 @@ EXTERNL char* NCD4_deescape(const char* esc);
EXTERNL char* NCD4_entityescape(const char* s);
EXTERNL size_t NCD4_elidenuls(char* s, size_t slen);
EXTERNL void* NCD4_getheader(void* p, NCD4HDR* hdr, int hostlittleendian);
EXTERNL void NCD4_reporterror(NCD4INFO* state);
EXTERNL void NCD4_reporterror(NCD4response*, NCURI* uri);
/* From d4dump.c */
EXTERNL void NCD4_dumpbytes(size_t size, const void* data0, int swap);
@ -163,7 +167,15 @@ EXTERNL int NCD4_convert(nc_type srctype, nc_type dsttype, char* memory0, char*
/* d4file.c */
EXTERNL int NCD4_readDMRorDAP(NCD4INFO* d4info, NCD4mode mode);
EXTERNL void NCD4_applyclientfragmentcontrols(NCD4INFO* d4info);
EXTERNL void NCD4_applyclientquerycontrols(NCD4INFO* d4info);
EXTERNL void NCD4_applychecksumcontrols(NCD4INFO* d4info, NCD4response*);
EXTERNL int NCD4_newInfo(NCD4INFO** d4infop);
EXTERNL void NCD4_reclaimInfo(NCD4INFO* d4info);
EXTERNL void NCD4_resetInfoforRead(NCD4INFO* d4info);
EXTERNL int NCD4_newResponse(NCD4INFO*,NCD4response** respp);
EXTERNL void NCD4_reclaimResponse(NCD4response* d4resp);
EXTERNL void NCD4_resetInfoForRead(NCD4INFO* d4info);
EXTERNL int NCD4_newMeta(NCD4INFO*,NCD4meta**);
EXTERNL void NCD4_reclaimMeta(NCD4meta*);
/* ncd4dispatch.c */
struct NC_reservedatt; /*forward*/

View File

@ -3,6 +3,7 @@
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
*********************************************************************/
#include <stddef.h>
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
@ -388,11 +389,11 @@ NCD4_inq_attname(int ncid, int varid, int attnum, char* name)
const NC_reservedatt* rsvp = NULL;
if((ret = NC_check_id(ncid, (NC**)&ncp)) != NC_NOERR) return (ret);
substrateid = makenc4id(ncp,ncid);
ret = nc_inq_attname(substrateid, varid, attnum, name);
/* Is this a reserved attribute name? */
if(name && (rsvp = NCD4_lookupreserved(name)))
return NC_EATTMETA;
substrateid = makenc4id(ncp,ncid);
ret = nc_inq_attname(substrateid, varid, attnum, name);
return (ret);
}
@ -841,13 +842,13 @@ NCD4_inq_dim(int ncid, int dimid, char* name, size_t* lenp)
NC* ncp;
NCD4INFO* info;
NCD4meta* meta;
int i;
size_t i;
NCD4node* dim = NULL;
if((ret = NC_check_id(ncid, (NC**)&ncp)) != NC_NOERR)
goto done;
info = (NCD4INFO*)ncp->dispatchdata;
meta = info->substrate.metadata;
meta = info->dmrmetadata;
/* Locate the dimension specified by dimid */
for(i=0;i<nclistlength(meta->allnodes);i++) {
@ -871,16 +872,15 @@ static int
ncd4_get_att_reserved(NC* ncp, int ncid, int varid, const char* name, void* value, nc_type t, const NC_reservedatt* rsvp)
{
int ret = NC_NOERR;
NCD4INFO* info = (NCD4INFO*)(ncp->dispatchdata);
NCD4meta* meta = info->substrate.metadata;
NCD4node* var = NULL;
if((ret=NCD4_findvar(ncp,ncid,varid,&var,NULL))) goto done;
if(strcmp(rsvp->name,D4CHECKSUMATTR)==0) {
unsigned int* ip = (unsigned int*)value;
if(varid == NC_GLOBAL)
{ret = NC_EBADID; goto done;}
if(t != NC_UINT) {ret = NC_EBADTYPE; goto done;}
if((ret=NCD4_findvar(ncp,ncid,varid,&var,NULL))) goto done;
if(var->data.checksumattr == 0)
{ret = NC_ENOTATT; goto done;}
*ip = (var->data.remotechecksum);
@ -889,7 +889,7 @@ ncd4_get_att_reserved(NC* ncp, int ncid, int varid, const char* name, void* valu
if(varid != NC_GLOBAL)
{ret = NC_EBADID; goto done;}
if(t != NC_INT) {ret = NC_EBADTYPE; goto done;}
*ip = (meta->serial.remotelittleendian?1:0);
*ip = (var->data.response->remotelittleendian?1:0);
}
done:
return THROW(ret);
@ -925,7 +925,7 @@ static int
globalinit(void)
{
int stat = NC_NOERR;
return stat;
return THROW(stat);
}
/**************************************************/

View File

@ -50,6 +50,8 @@ typedef struct NCD4node NCD4node;
typedef struct NCD4params NCD4params;
typedef struct NCD4HDR NCD4HDR;
typedef struct NCD4offset NCD4offset;
typedef struct NCD4vardata NCD4vardata;
typedef struct NCD4response NCD4response;
/* Define the NCD4HDR flags */
/* Header flags */
@ -228,19 +230,20 @@ struct NCD4node {
int isfixedsize; /* sort == NCD4_TYPE; Is this a fixed size (recursively) type? */
d4size_t dapsize; /* size of the type as stored in the dap data; will, as a rule,
be same as memsize only for types <= NC_UINT64 */
nc_type cmpdid; /*netcdf id for the compound type created for seq type */
nc_type cmpdid; /* netcdf id for the compound type created for seq type */
size_t memsize; /* size of a memory instance without taking dimproduct into account,
but taking compound alignment into account */
d4size_t offset; /* computed structure field offset in memory */
size_t alignment; /* computed structure field alignment in memory */
} meta;
struct { /* Data compilation info */
int flags; /* See d4data for actual flags */
struct NCD4vardata { /* Data compilation info */
int valid; /* 1 => this contains valid data */
D4blob dap4data; /* offset and start pos for this var's data in serialization */
unsigned int remotechecksum; /* checksum from data as sent by server */
unsigned int localchecksum; /* toplevel variable checksum as computed by client */
int checksumattr; /* 1=> _DAP4_Checksum_CRC32 is defined */
int attrchecksum; /* _DAP4_Checksum_CRC32 value */
unsigned remotechecksum; /* toplevel per-variable checksum contained in the data */
unsigned localchecksum; /* toplevel variable checksum as computed by client */
int checksumattr; /* 1 => _DAP4_Checksum_CRC32 is defined */
unsigned attrchecksum; /* _DAP4_Checksum_CRC32 value; this is the checksum computed by server */
NCD4response* response; /* Response from which this data is taken */
} data;
struct { /* Track netcdf-4 conversion info */
int isvlen; /* _edu.ucar.isvlen */
@ -253,36 +256,13 @@ struct NCD4node {
} nc4;
};
/* Tracking info about the serialized input before and after de-chunking */
typedef struct NCD4serial {
size_t rawsize; /* |rawdata| */
void* rawdata;
size_t dapsize; /* |dap|; this is transient */
void* dap; /* pointer into rawdata where dap data starts */
char* dmr;/* copy of dmr */
char* errdata; /* null || error chunk (null terminated) */
int httpcode; /* returned from last request */
int hostlittleendian; /* 1 if the host is little endian */
int remotelittleendian; /* 1 if the packet says data is little endian */
} NCD4serial;
/* This will be passed out of the parse */
/* DMR information from a response; this will be passed out of the parse */
struct NCD4meta {
NCD4INFO* controller;
int ncid; /* root ncid of the substrate netcdf-4 file;
warning: copy of NCD4Info.substrate.nc4id */
NCD4node* root;
NCD4mode mode; /* Are we reading DMR (only) or DAP (includes DMR) */
NClist* allnodes; /*list<NCD4node>*/
struct Error { /* Content of any error response */
char* parseerror;
int httpcode;
char* message;
char* context;
char* otherinfo;
} error;
int debuglevel;
NCD4serial serial;
int swap; /* 1 => swap data */
/* Define some "global" (to a DMR) data */
NClist* groupbyid; /* NClist<NCD4node*> indexed by groupid >> 16; this is global */
@ -292,9 +272,12 @@ struct NCD4meta {
};
typedef struct NCD4parser {
NCD4INFO* controller;
char* input;
int debuglevel;
int dapparse; /* 1 => we are parsing the DAP DMR */
NCD4meta* metadata;
NCD4response* response;
/* Capture useful subsets of dataset->allnodes */
NClist* types; /*list<NCD4node>; user-defined types only*/
NClist* dims; /*list<NCD4node>*/
@ -303,6 +286,32 @@ typedef struct NCD4parser {
NCD4node* dapopaque; /* Single non-fixed-size opaque type */
} NCD4parser;
/* Capture all the relevant info about the response to a server request */
struct NCD4response { /* possibly processed response from a query */
NCD4INFO* controller; /* controlling connection */
D4blob raw; /* complete response in memory */
int querychecksumming; /* 1 => user specified dap4.ce value */
int attrchecksumming; /* 1=> _DAP4_Checksum_CRC32 is defined for at least one variable */
int inferredchecksumming; /* 1 => either query checksum || att checksum */
int checksumignore; /* 1 => assume checksum, but do not validate */
int remotelittleendian; /* 1 if the packet says data is little endian */
NCD4mode mode; /* Are we reading DMR (only) or DAP (includes DMR) */
struct NCD4serial {
size_t dapsize; /* |dap|; this is transient */
void* dap; /* pointer into raw where dap data starts */
char* dmr;/* copy of dmr */
char* errdata; /* null || error chunk (null terminated) */
int httpcode; /* returned from last request */
} serial; /* Dechunked and processed DAP part of the response */
struct Error { /* Content of any error response */
char* parseerror;
int httpcode;
char* message;
char* context;
char* otherinfo;
} error;
};
/**************************************************/
/* Curl info */
@ -328,28 +337,20 @@ struct NCD4curl {
struct NCD4INFO {
NC* controller; /* Parent instance of NCD4INFO */
char* rawurltext; /* as given to ncd4_open */
char* urltext; /* as modified by ncd4_open */
NCURI* uri; /* parse of rawuritext */
char* rawdmrurltext; /* as given to ncd4_open */
char* dmrurltext; /* as modified by ncd4_open */
NCURI* dmruri; /* parse of rawuritext */
NCD4curl* curl;
int inmemory; /* store fetched data in memory? */
struct {
char* memory; /* allocated memory if ONDISK is not set */
char* ondiskfilename; /* If ONDISK is set */
FILE* ondiskfile; /* ditto */
d4size_t datasize; /* size on disk or in memory */
long dmrlastmodified;
long daplastmodified;
int querychecksumming; /* 1 => user specified dap4.ce value */
int attrchecksumming; /* 1=> _DAP4_Checksum_CRC32 is defined for at least one variable */
int inferredchecksumming; /* 1 => either query checksum || att checksum */
int checksumignore; /* 1 => assume checksum, but do not validate */
} data;
NCD4meta* dmrmetadata; /* Independent of responses */
NClist* responses; /* NClist<NCD4response> all responses from this curl handle */
struct { /* Properties that are per-platform */
int hostlittleendian; /* 1 if the host is little endian */
} platform;
struct {
int realfile; /* 1 => we created actual temp file */
char* filename; /* of the substrate file */
int nc4id; /* substrate nc4 file ncid used to hold metadata; not same as external id */
NCD4meta* metadata;
} substrate;
struct {
NCCONTROLS flags;
@ -362,6 +363,7 @@ struct NCD4INFO {
struct {
char* filename;
} fileproto;
int debuglevel;
NClist* blobs;
};

Some files were not shown because too many files have changed in this diff Show More