mirror of
https://github.com/Unidata/netcdf-c.git
synced 2025-02-17 16:50:18 +08:00
update
This commit is contained in:
commit
87497d79cf
179
.github/workflows/run_tests_cdash.yml
vendored
Normal file
179
.github/workflows/run_tests_cdash.yml
vendored
Normal file
@ -0,0 +1,179 @@
|
||||
###
|
||||
# Build hdf5 dependencies and cache them in a combined directory.
|
||||
###
|
||||
|
||||
name: Run CDash Ubuntu/Linux netCDF Tests
|
||||
|
||||
on: workflow_dispatch
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow}}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
build-deps-cdash:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
hdf5: [ 1.10.8, 1.12.2, 1.14.0 ]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install System dependencies
|
||||
shell: bash -l {0}
|
||||
run: sudo apt update && sudo apt install -y libaec-dev zlib1g-dev automake autoconf libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev doxygen openssl
|
||||
|
||||
###
|
||||
# Installing libhdf5
|
||||
###
|
||||
- name: Cache libhdf5-${{ matrix.hdf5 }}
|
||||
id: cache-hdf5
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
|
||||
|
||||
- name: Build libhdf5-${{ matrix.hdf5 }}
|
||||
if: steps.cache-hdf5.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
set -x
|
||||
|
||||
wget https://support.hdfgroup.org/ftp/HDF/releases/HDF4.2.15/src/hdf-4.2.15.tar.bz2
|
||||
tar -jxf hdf-4.2.15.tar.bz2
|
||||
pushd hdf-4.2.15
|
||||
./configure --prefix=${HOME}/environments/${{ matrix.hdf5 }} --disable-static --enable-shared --disable-fortran --disable-netcdf --with-szlib --enable-hdf4-xdr
|
||||
make -j
|
||||
make install -j
|
||||
popd
|
||||
|
||||
wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-$(echo ${{ matrix.hdf5 }} | cut -d. -f 1,2)/hdf5-${{ matrix.hdf5 }}/src/hdf5-${{ matrix.hdf5 }}.tar.bz2
|
||||
tar -jxf hdf5-${{ matrix.hdf5 }}.tar.bz2
|
||||
pushd hdf5-${{ matrix.hdf5 }}
|
||||
./configure --disable-static --enable-shared --prefix=${HOME}/environments/${{ matrix.hdf5 }} --enable-hl --with-szlib
|
||||
make -j
|
||||
make install -j
|
||||
popd
|
||||
|
||||
|
||||
build-deps-parallel:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
hdf5: [ 1.14.0 ]
|
||||
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install System dependencies
|
||||
shell: bash -l {0}
|
||||
run: sudo apt update && sudo apt install -y libaec-dev zlib1g-dev automake autoconf libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev mpich libmpich-dev
|
||||
|
||||
###
|
||||
# Installing libhdf5
|
||||
###
|
||||
- name: Cache libhdf5-parallel-${{ matrix.hdf5 }}
|
||||
id: cache-hdf5
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-parallel-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
|
||||
|
||||
- name: Build libhdf5-${{ matrix.hdf5 }}-pnetcdf-1.12.3
|
||||
if: steps.cache-hdf5.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
set -x
|
||||
|
||||
wget https://support.hdfgroup.org/ftp/HDF/releases/HDF4.2.15/src/hdf-4.2.15.tar.bz2
|
||||
tar -jxf hdf-4.2.15.tar.bz2
|
||||
pushd hdf-4.2.15
|
||||
CC=mpicc ./configure --prefix=${HOME}/environments/${{ matrix.hdf5 }} --disable-static --enable-shared --disable-fortran --disable-netcdf --with-szlib --enable-parallel --enable-hdf4-xdr
|
||||
make -j
|
||||
make install -j
|
||||
popd
|
||||
|
||||
wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-$(echo ${{ matrix.hdf5 }} | cut -d. -f 1,2)/hdf5-${{ matrix.hdf5 }}/src/hdf5-${{ matrix.hdf5 }}.tar.bz2
|
||||
tar -jxf hdf5-${{ matrix.hdf5 }}.tar.bz2
|
||||
pushd hdf5-${{ matrix.hdf5 }}
|
||||
CC=mpicc ./configure --disable-static --enable-shared --prefix=${HOME}/environments/${{ matrix.hdf5 }} --enable-hl --with-szlib --enable-parallel
|
||||
make -j
|
||||
make install -j
|
||||
popd
|
||||
wget https://parallel-netcdf.github.io/Release/pnetcdf-1.12.3.tar.gz
|
||||
tar -zxf pnetcdf-1.12.3.tar.gz
|
||||
pushd pnetcdf-1.12.3
|
||||
CC=mpicc ./configure --disable-static --enable-shared --prefix=${HOME}/environments/${{ matrix.hdf5 }}
|
||||
make -j
|
||||
make install -j
|
||||
popd
|
||||
|
||||
|
||||
###
|
||||
# Run CTest Serial Script
|
||||
###
|
||||
nc-ctest-serial:
|
||||
needs: build-deps-cdash
|
||||
runs-on: ubuntu-latest
|
||||
environment: CDashCI
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
hdf5: [ 1.10.8, 1.12.2, 1.14.0 ]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
CDASH_TOKEN: ${{ secrets.CDASH_TOKEN }}
|
||||
env:
|
||||
CDASH_TOKEN: ${{ secrets.CDASH_TOKEN }}
|
||||
|
||||
- name: Install System dependencies
|
||||
shell: bash -l {0}
|
||||
run: sudo apt update && sudo apt install -y libaec-dev zlib1g-dev automake autoconf libcurl4-openssl-dev libjpeg-dev wget curl bzip2 m4 flex bison cmake libzip-dev
|
||||
|
||||
###
|
||||
# Set Environmental Variables
|
||||
###
|
||||
|
||||
- run: echo "CMAKE_PREFIX_PATH=${HOME}/environments/${{ matrix.hdf5 }}/" >> $GITHUB_ENV
|
||||
- run: echo "LD_LIBRARY_PATH=${HOME}/environments/${{ matrix.hdf5 }}/lib" >> $GITHUB_ENV
|
||||
- run: echo "CTEST_OUTPUT_ON_FAILURE=1" >> $GITHUB_ENV
|
||||
|
||||
###
|
||||
# Fetch Cache
|
||||
###
|
||||
|
||||
- name: Fetch HDF Cache
|
||||
id: cache-hdf5
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
|
||||
- name: Check Cache
|
||||
shell: bash -l {0}
|
||||
run: ls ${HOME}/environments/${{ matrix.hdf5 }} && ls ${HOME}/environments/${{ matrix.hdf5}}/lib
|
||||
|
||||
- name: Run ctest serial script
|
||||
shell: bash -l {0}
|
||||
env:
|
||||
CDASH_TOKEN: ${{ secrets.CDASH_TOKEN }}
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ctest -j 12 -V -S ../ctest_scripts/ctest_serial.ctest
|
||||
|
||||
- name: Verbose Output if CTest Failure
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd build
|
||||
LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ctest -j 12 --rerun-failed --output-on-failure -VV
|
||||
if: ${{ failure() }}
|
5
.github/workflows/run_tests_osx.yml
vendored
5
.github/workflows/run_tests_osx.yml
vendored
@ -6,9 +6,12 @@
|
||||
|
||||
name: Run macOS-based netCDF Tests
|
||||
|
||||
|
||||
on: [pull_request,workflow_dispatch]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow}}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
build-deps-osx:
|
||||
|
4
.github/workflows/run_tests_s3.yml
vendored
4
.github/workflows/run_tests_s3.yml
vendored
@ -11,6 +11,10 @@ name: Run S3 netCDF Tests (under Ubuntu Linux)
|
||||
|
||||
on: [workflow_dispatch]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow}}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
build-deps-serial:
|
||||
|
96
.github/workflows/run_tests_ubuntu.yml
vendored
96
.github/workflows/run_tests_ubuntu.yml
vendored
@ -6,6 +6,10 @@ name: Run Ubuntu/Linux netCDF Tests
|
||||
|
||||
on: [pull_request, workflow_dispatch]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow}}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
build-deps-serial:
|
||||
@ -196,6 +200,19 @@ jobs:
|
||||
CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check -j
|
||||
if: ${{ success() }}
|
||||
|
||||
- name: Create source distribution
|
||||
shell: bash -l {0}
|
||||
if: ${{ success() }}
|
||||
run: make dist -j
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: netcdf-c-autotools-source-distribution
|
||||
path: |
|
||||
*.tar*
|
||||
*.zip
|
||||
*.tgz
|
||||
|
||||
##
|
||||
# Parallel
|
||||
##
|
||||
@ -449,7 +466,28 @@ jobs:
|
||||
use_nczarr: [ nczarr_off, nczarr_on ]
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: netcdf-c-autotools-source-distribution
|
||||
|
||||
- name: Unpack source distribution
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
if [ -f *.zip ];
|
||||
then
|
||||
unzip *.zip
|
||||
else
|
||||
tar xvzf $(ls *.tar* *.tgz *.zip | head -1)
|
||||
fi
|
||||
ls -d netcdf-c*
|
||||
for name in netcdf-c*;
|
||||
do
|
||||
if [ -d ${name} ];
|
||||
then
|
||||
cd ${name}
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Install System dependencies
|
||||
shell: bash -l {0}
|
||||
@ -498,11 +536,28 @@ jobs:
|
||||
|
||||
- name: Run autoconf
|
||||
shell: bash -l {0}
|
||||
run: autoreconf -if
|
||||
run: |
|
||||
for name in netcdf-c*;
|
||||
do
|
||||
if [ -d ${name} ];
|
||||
then
|
||||
cd ${name}
|
||||
break
|
||||
fi
|
||||
done
|
||||
autoreconf -if
|
||||
|
||||
- name: Configure
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
for name in netcdf-c*;
|
||||
do
|
||||
if [ -d ${name} ];
|
||||
then
|
||||
cd ${name}
|
||||
break
|
||||
fi
|
||||
done
|
||||
current_directory="$(pwd)"
|
||||
mkdir ../build
|
||||
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} "${current_directory}/configure" ${ENABLE_HDF5} ${ENABLE_DAP} ${ENABLE_NCZARR}
|
||||
@ -510,29 +565,56 @@ jobs:
|
||||
|
||||
- name: Look at config.log if error
|
||||
shell: bash -l {0}
|
||||
run: cd ../build && cat config.log
|
||||
run: |
|
||||
if [ -d ../build ];
|
||||
then
|
||||
cd ../build
|
||||
else
|
||||
cd build
|
||||
fi && cat config.log
|
||||
if: ${{ failure() }}
|
||||
|
||||
- name: Print Summary
|
||||
shell: bash -l {0}
|
||||
run: cd ../build && cat libnetcdf.settings
|
||||
run: |
|
||||
if [ -d ../build ];
|
||||
then
|
||||
cd ../build
|
||||
else
|
||||
cd build
|
||||
fi && cat libnetcdf.settings
|
||||
|
||||
- name: Build Library and Utilities
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make -j
|
||||
if [ -d ../build ];
|
||||
then
|
||||
cd ../build
|
||||
else
|
||||
cd build
|
||||
fi && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make -j
|
||||
if: ${{ success() }}
|
||||
|
||||
- name: Build Tests
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check TESTS="" -j
|
||||
if [ -d ../build ];
|
||||
then
|
||||
cd ../build
|
||||
else
|
||||
cd build
|
||||
fi && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check TESTS="" -j
|
||||
if: ${{ success() }}
|
||||
|
||||
- name: Run Tests
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check -j
|
||||
if [ -d ../build ];
|
||||
then
|
||||
cd ../build
|
||||
else
|
||||
cd build
|
||||
fi && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check -j
|
||||
if: ${{ success() }}
|
||||
|
||||
nc-cmake:
|
||||
|
4
.github/workflows/run_tests_win_cygwin.yml
vendored
4
.github/workflows/run_tests_win_cygwin.yml
vendored
@ -2,6 +2,10 @@ name: Run Cygwin-based tests
|
||||
|
||||
on: [pull_request,workflow_dispatch]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow}}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
SHELLOPTS: igncr
|
||||
CHERE_INVOKING: 1
|
||||
|
6
.github/workflows/run_tests_win_mingw.yml
vendored
6
.github/workflows/run_tests_win_mingw.yml
vendored
@ -4,13 +4,17 @@
|
||||
# for information related to github runners.
|
||||
###
|
||||
|
||||
name: Run MSYS2, MinGW64-based Tests
|
||||
name: Run MSYS2, MinGW64-based Tests (Not Visual Studio)
|
||||
|
||||
env:
|
||||
CPPFLAGS: "-D_BSD_SOURCE"
|
||||
|
||||
on: [pull_request,workflow_dispatch]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow}}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
build-and-test-autotools:
|
||||
|
@ -74,7 +74,7 @@ FIND_PROGRAM(NC_DPKG NAMES dpkg)
|
||||
IF(NC_DPKG)
|
||||
# Define a macro for getting the dpkg architecture.
|
||||
MACRO(getdpkg_arch arch)
|
||||
exec_program("${NC_DPKG}" ARGS "--print-architecture" OUTPUT_VARIABLE "${arch}")
|
||||
execute_process(COMMAND "${NC_DPKG}" "--print-architecture" OUTPUT_VARIABLE "${arch}" OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
ENDMACRO(getdpkg_arch)
|
||||
getdpkg_arch(dpkg_arch)
|
||||
|
||||
|
@ -6,9 +6,7 @@
|
||||
# Set Project Properties
|
||||
##################################
|
||||
|
||||
#Minimum required CMake Version
|
||||
cmake_minimum_required(VERSION 3.12.0)
|
||||
# CMake 3.12: Use libraries specified in CMAKE_REQUIRED_LIBRARIES for check include macros
|
||||
|
||||
#Project Name
|
||||
project(netCDF
|
||||
@ -18,6 +16,14 @@ DESCRIPTION "NetCDF is a set of software libraries and machine-independent data
|
||||
)
|
||||
set(PACKAGE "netCDF" CACHE STRING "")
|
||||
|
||||
# Backport of built-in `PROJECT_IS_TOP_LEVEL` from CMake 3.21
|
||||
if (NOT DEFINED NETCDF_IS_TOP_LEVEL)
|
||||
set(NETCDF_IS_TOP_LEVEL OFF)
|
||||
if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
|
||||
set(NETCDF_IS_TOP_LEVEL ON)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
#####
|
||||
# Version Info:
|
||||
#
|
||||
@ -51,7 +57,7 @@ SET(NC_DISPATCH_VERSION 5)
|
||||
find_program(UNAME NAMES uname)
|
||||
IF(UNAME)
|
||||
macro(getuname name flag)
|
||||
exec_program("${UNAME}" ARGS "${flag}" OUTPUT_VARIABLE "${name}")
|
||||
execute_process(COMMAND "${UNAME}" "${flag}" OUTPUT_VARIABLE "${name}" OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
endmacro(getuname)
|
||||
getuname(osname -s)
|
||||
getuname(osrel -r)
|
||||
@ -267,7 +273,8 @@ IF(CMAKE_COMPILER_IS_GNUCC OR APPLE)
|
||||
|
||||
# Coverage tests need to have optimization turned off.
|
||||
IF(ENABLE_COVERAGE_TESTS)
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -coverage -fprofile-arcs -ftest-coverage")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -coverage -fprofile-arcs -ftest-coverage")
|
||||
SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-arcs -ftest-coverage")
|
||||
MESSAGE(STATUS "Coverage Tests: On.")
|
||||
ENDIF()
|
||||
@ -1404,7 +1411,7 @@ IF(ENABLE_TESTS)
|
||||
SET(HOSTNAME_ARG "-s")
|
||||
ENDIF()
|
||||
IF(HOSTNAME_CMD)
|
||||
EXEC_PROGRAM(${HOSTNAME_CMD} ARGS "${HOSTNAME_ARG}" OUTPUT_VARIABLE HOSTNAME)
|
||||
EXECUTE_PROCESS(COMMAND ${HOSTNAME_CMD} "${HOSTNAME_ARG}" OUTPUT_VARIABLE HOSTNAME OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
SET(NC_CTEST_SITE "${HOSTNAME}" CACHE STRING "Hostname of test machine.")
|
||||
ENDIF()
|
||||
|
||||
@ -2250,7 +2257,7 @@ ENDIF(ENABLE_BASH_SCRIPT_TESTING)
|
||||
|
||||
MACRO(add_sh_test prefix F)
|
||||
IF(HAVE_BASH)
|
||||
ADD_TEST(${prefix}_${F} bash "-c" "export srcdir=${CMAKE_CURRENT_SOURCE_DIR};export TOPSRCDIR=${CMAKE_SOURCE_DIR};bash ${CMAKE_CURRENT_BINARY_DIR}/${F}.sh ${ARGN}")
|
||||
ADD_TEST(${prefix}_${F} bash "-c" "export srcdir=${CMAKE_CURRENT_SOURCE_DIR};export TOPSRCDIR=${CMAKE_SOURCE_DIR};${CMAKE_CURRENT_BINARY_DIR}/${F}.sh ${ARGN}")
|
||||
ENDIF()
|
||||
ENDMACRO()
|
||||
|
||||
@ -2789,5 +2796,8 @@ install(
|
||||
####
|
||||
|
||||
# CPack inclusion must come last.
|
||||
# INCLUDE(CPack)
|
||||
INCLUDE(CMakeInstallation.cmake)
|
||||
option(NETCDF_PACKAGE "Create netCDF-C package " ${NETCDF_IS_TOP_LEVEL})
|
||||
|
||||
if (NETCDF_PACKAGE)
|
||||
include(CMakeInstallation.cmake)
|
||||
endif()
|
||||
|
@ -211,7 +211,11 @@ install-data-hook:
|
||||
all-local: liblib/libnetcdf.la
|
||||
echo ${PACKAGE_VERSION} > VERSION
|
||||
if ENABLE_S3_TESTALL
|
||||
rm -f ${abs_top_builddir}/tmp_@PLATFORMUID@.uids
|
||||
echo "@TESTUID@" >> ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids
|
||||
cat ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids | sort | uniq > ${abs_top_builddir}/tmp_@PLATFORMUID@.uids
|
||||
rm -f ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids
|
||||
mv ${abs_top_builddir}/tmp_@PLATFORMUID@.uids ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids
|
||||
endif
|
||||
|
||||
if ENABLE_S3_TESTALL
|
||||
|
@ -8,6 +8,8 @@ This file contains a high-level description of this package's evolution. Release
|
||||
## 4.9.3 - TBD
|
||||
|
||||
* Improve the speed and data quantity for DAP4 queries. See [Github #2765](https://github.com/Unidata/netcdf-c/pull/2765).
|
||||
* Fix DAP2 proxy problems. See [Github #2764](https://github.com/Unidata/netcdf-c/pull/2764).
|
||||
* Cleanup a number of misc issues. See [Github #2763](https://github.com/Unidata/netcdf-c/pull/2763).
|
||||
* Mitigate the problem of test interference. See [Github #2755](https://github.com/Unidata/netcdf-c/pull/2755).
|
||||
* Extend NCZarr to support unlimited dimensions. See [Github #2755](https://github.com/Unidata/netcdf-c/pull/2755).
|
||||
* Fix significant bug in the NCZarr cache management. See [Github #2737](https://github.com/Unidata/netcdf-c/pull/2737).
|
||||
|
@ -10,7 +10,11 @@ SET(CTEST_SOURCE_DIRECTORY "..")
|
||||
SET(CTEST_BINARY_DIRECTORY ".")
|
||||
|
||||
set(CDASH_TOKEN $ENV{CDASH_TOKEN})
|
||||
MESSAGE("Using cdash token: ${CDASH_TOKEN}")
|
||||
IF (CDASH_TOKEN)
|
||||
MESSAGE("CDASH TOKEN FOUND")
|
||||
ELSE (CDASH_TOKEN)
|
||||
MESSAGE("NO CDASH TOKEN FOUND")
|
||||
ENDIF (CDASH_TOKEN)
|
||||
|
||||
|
||||
SITE_NAME(local_site_name)
|
||||
|
@ -10,9 +10,13 @@ SET(CTEST_SOURCE_DIRECTORY "..")
|
||||
SET(CTEST_BINARY_DIRECTORY ".")
|
||||
|
||||
set(CDASH_TOKEN $ENV{CDASH_TOKEN})
|
||||
MESSAGE("Using cdash token: ${CDASH_TOKEN}")
|
||||
|
||||
|
||||
IF (CDASH_TOKEN)
|
||||
MESSAGE("CDASH TOKEN FOUND")
|
||||
ELSE (CDASH_TOKEN)
|
||||
MESSAGE("NO CDASH TOKEN FOUND")
|
||||
ENDIF (CDASH_TOKEN)
|
||||
|
||||
SITE_NAME(local_site_name)
|
||||
set(CTEST_SITE ${local_site_name})
|
||||
|
||||
@ -29,7 +33,7 @@ find_program(CTEST_GIT_COMMAND NAMES git)
|
||||
find_program(CTEST_COVERAGE_COMMAND NAMES gcov)
|
||||
find_program(CTEST_MEMORYCHECK_COMMAND NAMES valgrind)
|
||||
|
||||
set(CTEST_BUILD_OPTIONS "-DENABLE_COVERAGE_TESTS=TRUE -DENABLE_ERANGE_FILL=TRUE -DENABLE_LOGGING=TRUE -DENABLE_BYTERANGE=TRUE -DENABLE_LARGE_FILE_TESTS=FALSE")
|
||||
set(CTEST_BUILD_OPTIONS "-DENABLE_COVERAGE_TESTS=FALSE -DENABLE_ERANGE_FILL=TRUE -DENABLE_LOGGING=TRUE -DENABLE_BYTERANGE=TRUE -DENABLE_LARGE_FILE_TESTS=FALSE")
|
||||
|
||||
|
||||
set(CTEST_DROP_METHOD https)
|
||||
@ -42,7 +46,6 @@ ctest_start("Experimental")
|
||||
ctest_configure()
|
||||
ctest_build()
|
||||
ctest_test()
|
||||
ctest_coverage()
|
||||
if (NOT "${CDASH_TOKEN}" STREQUAL "")
|
||||
ctest_submit(HTTPHEADER "Authorization: Bearer ${CDASH_TOKEN}")
|
||||
endif()
|
||||
|
52
ctest_scripts/ctest_serial_coverage.ctest
Normal file
52
ctest_scripts/ctest_serial_coverage.ctest
Normal file
@ -0,0 +1,52 @@
|
||||
###
|
||||
# Standard CTest Script for testing netCDF.
|
||||
# Requires a CDash Token.
|
||||
#
|
||||
# Set the CDASH_TOKEN environmental variable.
|
||||
#
|
||||
###
|
||||
|
||||
SET(CTEST_SOURCE_DIRECTORY "..")
|
||||
SET(CTEST_BINARY_DIRECTORY ".")
|
||||
|
||||
set(CDASH_TOKEN $ENV{CDASH_TOKEN})
|
||||
IF (CDASH_TOKEN)
|
||||
MESSAGE("CDASH TOKEN FOUND")
|
||||
ELSE (CDASH_TOKEN)
|
||||
MESSAGE("NO CDASH TOKEN FOUND")
|
||||
ENDIF (CDASH_TOKEN)
|
||||
|
||||
|
||||
SITE_NAME(local_site_name)
|
||||
set(CTEST_SITE ${local_site_name})
|
||||
|
||||
set(CTEST_BUILD_CONFIGURATION "Profiling")
|
||||
set(CTEST_CMAKE_GENERATOR "Unix Makefiles")
|
||||
set(CTEST_BUILD_NAME "default")
|
||||
set(CTEST_BUILD_CONFIGURATION "Profiling")
|
||||
set(CTEST_DROP_METHOD "https")
|
||||
set(CTEST_DROP_SITE_CDASH TRUE)
|
||||
set(CTEST_PROJECT_NAME netcdf-c)
|
||||
|
||||
find_program(CMAKE_COMMAND cmake)
|
||||
find_program(CTEST_GIT_COMMAND NAMES git)
|
||||
find_program(CTEST_COVERAGE_COMMAND NAMES gcov)
|
||||
find_program(CTEST_MEMORYCHECK_COMMAND NAMES valgrind)
|
||||
|
||||
set(CTEST_BUILD_OPTIONS "-DENABLE_COVERAGE_TESTS=TRUE -DENABLE_ERANGE_FILL=TRUE -DENABLE_LOGGING=TRUE -DENABLE_BYTERANGE=TRUE -DENABLE_LARGE_FILE_TESTS=FALSE")
|
||||
|
||||
|
||||
set(CTEST_DROP_METHOD https)
|
||||
set(CTEST_DROP_SITE "cdash.unidata.ucar.edu:443")
|
||||
set(CTEST_DROP_LOCATION "/submit.php?project=netcdf-c")
|
||||
|
||||
set(CTEST_CONFIGURE_COMMAND "${CMAKE_COMMAND} -DCMAKE_BUILD_TYPE:STRING=${CTEST_BUILD_CONFIGURATION} ${CTEST_BUILD_OPTIONS} ${CTEST_SOURCE_DIRECTORY}")
|
||||
|
||||
ctest_start("Experimental")
|
||||
ctest_configure()
|
||||
ctest_build()
|
||||
ctest_test()
|
||||
ctest_coverage()
|
||||
if (NOT "${CDASH_TOKEN}" STREQUAL "")
|
||||
ctest_submit(HTTPHEADER "Authorization: Bearer ${CDASH_TOKEN}")
|
||||
endif()
|
@ -10,7 +10,11 @@ SET(CTEST_SOURCE_DIRECTORY "..")
|
||||
SET(CTEST_BINARY_DIRECTORY ".")
|
||||
|
||||
set(CDASH_TOKEN $ENV{CDASH_TOKEN})
|
||||
MESSAGE("Using cdash token: ${CDASH_TOKEN}")
|
||||
IF (CDASH_TOKEN)
|
||||
MESSAGE("CDASH TOKEN FOUND")
|
||||
ELSE (CDASH_TOKEN)
|
||||
MESSAGE("NO CDASH TOKEN FOUND")
|
||||
ENDIF (CDASH_TOKEN)
|
||||
|
||||
|
||||
SITE_NAME(local_site_name)
|
||||
|
@ -78,15 +78,4 @@ ENDIF(ENABLE_TESTS)
|
||||
#FILE(COPY ./cdltestfiles DESTINATION ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
#FILE(COPY ./rawtestfiles DESTINATION ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
|
||||
## Specify files to be distributed by 'make dist'
|
||||
FILE(GLOB CUR_EXTRA_DIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/*.c ${CMAKE_CURRENT_SOURCE_DIR}/*.h ${CMAKE_CURRENT_SOURCE_DIR}/*.sh
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/cdltestfiles
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/rawtestfiles
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/baseline
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/baselineraw
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/baselineremote
|
||||
#${CMAKE_CURRENT_SOURCE_DIR}/baselinerthredds
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/baselinehyrax
|
||||
)
|
||||
SET(CUR_EXTRA_DIST ${CUR_EXTRA_DIST} CMakeLists.txt Makefile.am)
|
||||
ADD_EXTRA_DIST("${CUR_EXTRA_DIST}")
|
||||
|
||||
|
@ -53,6 +53,9 @@ typedef struct NCRCinfo {
|
||||
NClist* s3profiles; /* NClist<struct AWSprofile*> */
|
||||
} NCRCinfo;
|
||||
|
||||
/* Opaque structures */
|
||||
struct NCS3INFO;
|
||||
|
||||
#if defined(__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
@ -94,7 +97,7 @@ EXTERNL int NC_getactives3profile(NCURI* uri, const char** profilep);
|
||||
EXTERNL int NC_s3profilelookup(const char* profile, const char* key, const char** valuep);
|
||||
EXTERNL int NC_authgets3profile(const char* profile, struct AWSprofile** profilep);
|
||||
EXTERNL int NC_iss3(NCURI* uri);
|
||||
EXTERNL int NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** newurlp);
|
||||
EXTERNL int NC_s3urlrebuild(NCURI* url, struct NCS3INFO* s3, NCURI** newurlp);
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
|
@ -6,6 +6,11 @@
|
||||
#ifndef NCS3SDK_H
|
||||
#define NCS3SDK_H 1
|
||||
|
||||
/* Track the server type, if known */
|
||||
typedef enum NCS3SVC {NCS3UNK=0, /* unknown */
|
||||
NCS3=1, /* s3.amazon.aws */
|
||||
NCS3GS=0 /* storage.googleapis.com */
|
||||
} NCS3SVC;
|
||||
|
||||
typedef struct NCS3INFO {
|
||||
char* host; /* non-null if other*/
|
||||
@ -13,6 +18,7 @@ typedef struct NCS3INFO {
|
||||
char* bucket; /* bucket name */
|
||||
char* rootkey;
|
||||
char* profile;
|
||||
NCS3SVC svc;
|
||||
} NCS3INFO;
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -3,6 +3,11 @@
|
||||
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
|
||||
*********************************************************************/
|
||||
|
||||
/* WARNING: oc2/occurlfunctions.c and libdap4/d4curlfunctions.c
|
||||
should be merged since they are essentially the same file.
|
||||
In the meantime, changes to one should be propagated to the other.
|
||||
*/
|
||||
|
||||
#include "d4includes.h"
|
||||
#include "d4curlfunctions.h"
|
||||
|
||||
@ -123,33 +128,43 @@ set_curlflag(NCD4INFO* state, int flag)
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CURLOPT_USE_SSL:
|
||||
case CURLOPT_SSLCERT: case CURLOPT_SSLKEY:
|
||||
case CURLOPT_SSL_VERIFYPEER: case CURLOPT_SSL_VERIFYHOST:
|
||||
{
|
||||
struct ssl* ssl = &state->auth->ssl;
|
||||
case CURLOPT_SSL_VERIFYPEER:
|
||||
/* VERIFYPEER == 0 => VERIFYHOST == 0 */
|
||||
/* We need to have 2 states: default and a set value */
|
||||
/* So -1 => default, >= 0 => use value; */
|
||||
if(ssl->verifypeer >= 0)
|
||||
SETCURLOPT(state, CURLOPT_SSL_VERIFYPEER, (OPTARG)(ssl->verifypeer));
|
||||
/* So -1 => default >= 0 => use value */
|
||||
if(state->auth->ssl.verifypeer >= 0) {
|
||||
SETCURLOPT(state, CURLOPT_SSL_VERIFYPEER, (OPTARG)(state->auth->ssl.verifypeer));
|
||||
if(state->auth->ssl.verifypeer == 0) state->auth->ssl.verifyhost = 0;
|
||||
}
|
||||
break;
|
||||
case CURLOPT_SSL_VERIFYHOST:
|
||||
#ifdef HAVE_LIBCURL_766
|
||||
if(ssl->verifyhost >= 0)
|
||||
SETCURLOPT(state, CURLOPT_SSL_VERIFYHOST, (OPTARG)(ssl->verifyhost));
|
||||
if(state->auth->ssl.verifyhost >= 0) {
|
||||
SETCURLOPT(state, CURLOPT_SSL_VERIFYHOST, (OPTARG)(state->auth->ssl.verifyhost));
|
||||
}
|
||||
#endif
|
||||
if(ssl->certificate)
|
||||
SETCURLOPT(state, CURLOPT_SSLCERT, ssl->certificate);
|
||||
if(ssl->key)
|
||||
SETCURLOPT(state, CURLOPT_SSLKEY, ssl->key);
|
||||
if(ssl->keypasswd)
|
||||
break;
|
||||
case CURLOPT_SSLCERT:
|
||||
if(state->auth->ssl.certificate)
|
||||
SETCURLOPT(state, CURLOPT_SSLCERT, state->auth->ssl.certificate);
|
||||
break;
|
||||
case CURLOPT_SSLKEY:
|
||||
if(state->auth->ssl.key)
|
||||
SETCURLOPT(state, CURLOPT_SSLKEY, state->auth->ssl.key);
|
||||
if(state->auth->ssl.keypasswd)
|
||||
/* libcurl prior to 7.16.4 used 'CURLOPT_SSLKEYPASSWD' */
|
||||
SETCURLOPT(state, CURLOPT_KEYPASSWD, ssl->keypasswd);
|
||||
if(ssl->cainfo)
|
||||
SETCURLOPT(state, CURLOPT_CAINFO, ssl->cainfo);
|
||||
if(ssl->capath)
|
||||
SETCURLOPT(state, CURLOPT_CAPATH, ssl->capath);
|
||||
}
|
||||
break;
|
||||
SETCURLOPT(state, CURLOPT_SSLKEYPASSWD, state->auth->ssl.keypasswd);
|
||||
break;
|
||||
case CURLOPT_CAINFO:
|
||||
if(state->auth->ssl.cainfo)
|
||||
SETCURLOPT(state, CURLOPT_CAINFO, state->auth->ssl.cainfo);
|
||||
break;
|
||||
case CURLOPT_CAPATH:
|
||||
if(state->auth->ssl.capath)
|
||||
SETCURLOPT(state, CURLOPT_CAPATH, state->auth->ssl.capath);
|
||||
break;
|
||||
case CURLOPT_USE_SSL:
|
||||
break;
|
||||
|
||||
#ifdef HAVE_CURLOPT_BUFFERSIZE
|
||||
case CURLOPT_BUFFERSIZE:
|
||||
@ -200,6 +215,12 @@ NCD4_set_flags_perlink(NCD4INFO* state)
|
||||
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_COOKIEJAR);
|
||||
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_USERPWD);
|
||||
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_PROXY);
|
||||
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_SSL_VERIFYPEER);
|
||||
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_SSL_VERIFYHOST);
|
||||
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_SSLCERT);
|
||||
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_SSLKEY);
|
||||
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_CAINFO);
|
||||
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_CAPATH);
|
||||
if(ret == NC_NOERR) ret = set_curlflag(state,CURLOPT_USE_SSL);
|
||||
if(ret == NC_NOERR) ret = set_curlflag(state, CURLOPT_FOLLOWLOCATION);
|
||||
if(ret == NC_NOERR) ret = set_curlflag(state, CURLOPT_MAXREDIRS);
|
||||
|
@ -136,6 +136,7 @@ static const struct MACRODEF {
|
||||
{"xarray","mode",{"zarr", NULL}},
|
||||
{"noxarray","mode",{"nczarr", "noxarray", NULL}},
|
||||
{"zarr","mode",{"nczarr","zarr", NULL}},
|
||||
{"gs3","mode",{"gs3","nczarr",NULL}}, /* Google S3 API */
|
||||
{NULL,NULL,{NULL}}
|
||||
};
|
||||
|
||||
@ -196,6 +197,7 @@ static struct NCPROTOCOLLIST {
|
||||
{"dods","http","mode=dap2"},
|
||||
{"dap4","http","mode=dap4"},
|
||||
{"s3","s3","mode=s3"},
|
||||
{"gs3","gs3","mode=gs3"},
|
||||
{NULL,NULL,NULL} /* Terminate search */
|
||||
};
|
||||
|
||||
@ -899,7 +901,7 @@ NC_infermodel(const char* path, int* omodep, int iscreate, int useparallel, void
|
||||
/* If s3, then rebuild the url */
|
||||
if(NC_iss3(uri)) {
|
||||
NCURI* newuri = NULL;
|
||||
if((stat = NC_s3urlrebuild(uri,NULL,NULL,&newuri))) goto done;
|
||||
if((stat = NC_s3urlrebuild(uri,NULL,&newuri))) goto done;
|
||||
ncurifree(uri);
|
||||
uri = newuri;
|
||||
} else if(strcmp(uri->protocol,"file")==0) {
|
||||
|
@ -168,6 +168,7 @@ done:
|
||||
}
|
||||
nullfree(tmp1);
|
||||
clearPath(&inparsed);
|
||||
//fprintf(stderr,">>> ncpathcvt: inpath=%s result=%s\n",inpath,result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -437,7 +437,9 @@ rccompile(const char* filepath)
|
||||
NCURI* uri = NULL;
|
||||
char* nextline = NULL;
|
||||
NCglobalstate* globalstate = NC_getglobalstate();
|
||||
char* bucket = NULL;
|
||||
NCS3INFO s3;
|
||||
|
||||
memset(&s3,0,sizeof(s3));
|
||||
|
||||
if((ret=NC_readfile(filepath,tmp))) {
|
||||
nclog(NCLOGWARN, "Could not open configuration file: %s",filepath);
|
||||
@ -484,9 +486,8 @@ rccompile(const char* filepath)
|
||||
if(NC_iss3(uri)) {
|
||||
NCURI* newuri = NULL;
|
||||
/* Rebuild the url to S3 "path" format */
|
||||
nullfree(bucket);
|
||||
bucket = NULL;
|
||||
if((ret = NC_s3urlrebuild(uri,&bucket,NULL,&newuri))) goto done;
|
||||
NC_s3clear(&s3);
|
||||
if((ret = NC_s3urlrebuild(uri,&s3,&newuri))) goto done;
|
||||
ncurifree(uri);
|
||||
uri = newuri;
|
||||
newuri = NULL;
|
||||
@ -546,6 +547,7 @@ rccompile(const char* filepath)
|
||||
rcorder(rc);
|
||||
|
||||
done:
|
||||
NC_s3clear(&s3);
|
||||
if(contents) free(contents);
|
||||
ncurifree(uri);
|
||||
ncbytesfree(tmp);
|
||||
|
@ -30,6 +30,7 @@
|
||||
#undef AWSDEBUG
|
||||
|
||||
#define AWSHOST ".amazonaws.com"
|
||||
#define GOOGLEHOST "storage.googleapis.com"
|
||||
|
||||
enum URLFORMAT {UF_NONE=0, UF_VIRTUAL=1, UF_PATH=2, UF_S3=3, UF_OTHER=4};
|
||||
|
||||
@ -44,15 +45,12 @@ Rebuild an S3 url into a canonical path-style url.
|
||||
If region is not in the host, then use specified region
|
||||
if provided, otherwise us-east-1.
|
||||
@param url (in) the current url
|
||||
@param region (in) region to use if needed; NULL => us-east-1
|
||||
(out) region from url or the input region
|
||||
@param bucketp (in) bucket to use if needed
|
||||
(out) bucket from url
|
||||
@param s3 (in/out) NCS3INFO structure
|
||||
@param pathurlp (out) the resulting pathified url string
|
||||
*/
|
||||
|
||||
int
|
||||
NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** newurlp)
|
||||
NC_s3urlrebuild(NCURI* url, NCS3INFO* s3, NCURI** newurlp)
|
||||
{
|
||||
int i,stat = NC_NOERR;
|
||||
NClist* hostsegments = NULL;
|
||||
@ -63,6 +61,7 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne
|
||||
char* host = NULL;
|
||||
char* path = NULL;
|
||||
char* region = NULL;
|
||||
NCS3SVC svc = NCS3UNK;
|
||||
|
||||
if(url == NULL)
|
||||
{stat = NC_EURL; goto done;}
|
||||
@ -83,14 +82,27 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne
|
||||
Path: https://s3.<region>.amazonaws.com/<bucket-name>/<path> (3)
|
||||
or: https://s3.amazonaws.com/<bucket-name>/<path> -- region defaults to us-east-1 (4)
|
||||
S3: s3://<bucket-name>/<path> (5)
|
||||
Other: https://<host>/<bucket-name>/<path> (6)
|
||||
Google: https://storage.googleapis.com/<bucket-name>/<path> (6)
|
||||
or: gs3://<bucket-name>/<path> (7)
|
||||
Other: https://<host>/<bucket-name>/<path> (8)
|
||||
*/
|
||||
if(url->host == NULL || strlen(url->host) == 0)
|
||||
{stat = NC_EURL; goto done;}
|
||||
|
||||
/* Reduce the host to standard form such as s3.amazonaws.com by pulling out the
|
||||
region and bucket from the host */
|
||||
if(strcmp(url->protocol,"s3")==0 && nclistlength(hostsegments)==1) { /* Format (5) */
|
||||
bucket = nclistremove(hostsegments,0);
|
||||
/* region unknown at this point */
|
||||
/* Host will be set to canonical form later */
|
||||
svc = NCS3;
|
||||
} else if(strcmp(url->protocol,"gs3")==0 && nclistlength(hostsegments)==1) { /* Format (7) */
|
||||
bucket = nclistremove(hostsegments,0);
|
||||
/* region unknown at this point */
|
||||
/* Host will be set to canonical form later */
|
||||
svc = NCS3GS;
|
||||
} else if(endswith(url->host,AWSHOST)) { /* Virtual or path */
|
||||
svc = NCS3;
|
||||
/* If we find a bucket as part of the host, then remove it */
|
||||
switch (nclistlength(hostsegments)) {
|
||||
default: stat = NC_EURL; goto done;
|
||||
@ -99,11 +111,11 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne
|
||||
/* bucket unknown at this point */
|
||||
break;
|
||||
case 4: /* Format (2) or (3) */
|
||||
if(strcasecmp(nclistget(hostsegments,1),"s3")==0) { /* Format (2) */
|
||||
if(strcasecmp(nclistget(hostsegments,0),"s3")!=0) { /* Presume format (2) */
|
||||
/* region unknown at this point */
|
||||
bucket = nclistremove(hostsegments,0); /* Note removeal */
|
||||
bucket = nclistremove(hostsegments,0); /* Make canonical */
|
||||
} else if(strcasecmp(nclistget(hostsegments,0),"s3")==0) { /* Format (3) */
|
||||
region = strdup(nclistget(hostsegments,1));
|
||||
region = nclistremove(hostsegments,1); /* Make canonical */
|
||||
/* bucket unknown at this point */
|
||||
} else /* ! Format (2) and ! Format (3) => error */
|
||||
{stat = NC_EURL; goto done;}
|
||||
@ -111,20 +123,27 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne
|
||||
case 5: /* Format (1) */
|
||||
if(strcasecmp(nclistget(hostsegments,1),"s3")!=0)
|
||||
{stat = NC_EURL; goto done;}
|
||||
region = strdup(nclistget(hostsegments,2));
|
||||
bucket = strdup(nclistremove(hostsegments,0));
|
||||
/* Make canonical */
|
||||
region = nclistremove(hostsegments,2);
|
||||
bucket = nclistremove(hostsegments,0);
|
||||
break;
|
||||
}
|
||||
} else { /* Presume Format (6) */
|
||||
} else if(strcasecmp(url->host,GOOGLEHOST)==0) { /* Google (6) */
|
||||
if((host = strdup(url->host))==NULL)
|
||||
{stat = NC_ENOMEM; goto done;}
|
||||
/* region is unknown */
|
||||
/* bucket is unknown at this point */
|
||||
svc = NCS3GS;
|
||||
} else { /* Presume Format (8) */
|
||||
if((host = strdup(url->host))==NULL)
|
||||
{stat = NC_ENOMEM; goto done;}
|
||||
/* region is unknown */
|
||||
/* bucket is unknown */
|
||||
}
|
||||
|
||||
/* region = (1) from url, (2) inoutregion, (3) default */
|
||||
if(region == NULL)
|
||||
region = (inoutregionp?nulldup(*inoutregionp):NULL);
|
||||
/* region = (1) from url, (2) s3->region, (3) default */
|
||||
if(region == NULL && s3 != NULL)
|
||||
region = nulldup(s3->region);
|
||||
if(region == NULL) {
|
||||
const char* region0 = NULL;
|
||||
/* Get default region */
|
||||
@ -133,23 +152,30 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne
|
||||
}
|
||||
if(region == NULL) {stat = NC_ES3; goto done;}
|
||||
|
||||
/* bucket = (1) from url, (2) inoutbucket */
|
||||
/* bucket = (1) from url, (2) s3->bucket */
|
||||
if(bucket == NULL && nclistlength(pathsegments) > 0) {
|
||||
bucket = nclistremove(pathsegments,0); /* Get from the URL path; will reinsert below */
|
||||
}
|
||||
if(bucket == NULL)
|
||||
bucket = (inoutbucketp?nulldup(*inoutbucketp):NULL);
|
||||
if(bucket == NULL && s3 != NULL)
|
||||
bucket = nulldup(s3->bucket);
|
||||
if(bucket == NULL) {stat = NC_ES3; goto done;}
|
||||
|
||||
if(host == NULL) { /* Construct the revised host */
|
||||
if(svc == NCS3) {
|
||||
/* Construct the revised host */
|
||||
ncbytesclear(buf);
|
||||
ncbytescat(buf,"s3.");
|
||||
ncbytescat(buf,region);
|
||||
ncbytescat(buf,AWSHOST);
|
||||
nullfree(host);
|
||||
host = ncbytesextract(buf);
|
||||
} else if(svc == NCS3GS) {
|
||||
nullfree(host);
|
||||
host = strdup(GOOGLEHOST);
|
||||
}
|
||||
|
||||
/* Construct the revised path */
|
||||
ncbytesclear(buf);
|
||||
|
||||
/* Construct the revised path */
|
||||
if(bucket != NULL) {
|
||||
ncbytescat(buf,"/");
|
||||
ncbytescat(buf,bucket);
|
||||
@ -159,10 +185,13 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne
|
||||
ncbytescat(buf,nclistget(pathsegments,i));
|
||||
}
|
||||
path = ncbytesextract(buf);
|
||||
|
||||
/* complete the new url */
|
||||
if((newurl=ncuriclone(url))==NULL) {stat = NC_ENOMEM; goto done;}
|
||||
ncurisetprotocol(newurl,"https");
|
||||
assert(host != NULL);
|
||||
ncurisethost(newurl,host);
|
||||
assert(path != NULL);
|
||||
ncurisetpath(newurl,path);
|
||||
/* Rebuild the url->url */
|
||||
ncurirebuild(newurl);
|
||||
@ -171,9 +200,11 @@ NC_s3urlrebuild(NCURI* url, char** inoutbucketp, char** inoutregionp, NCURI** ne
|
||||
fprintf(stderr,">>> NC_s3urlrebuild: final=%s bucket=%s region=%s\n",uri->uri,bucket,region);
|
||||
#endif
|
||||
if(newurlp) {*newurlp = newurl; newurl = NULL;}
|
||||
if(inoutbucketp) {*inoutbucketp = bucket; bucket = NULL;}
|
||||
if(inoutregionp) {*inoutregionp = region; region = NULL;}
|
||||
|
||||
if(s3 != NULL) {
|
||||
s3->bucket = bucket; bucket = NULL;
|
||||
s3->region = region; region = NULL;
|
||||
s3->svc = svc;
|
||||
}
|
||||
done:
|
||||
nullfree(region);
|
||||
nullfree(bucket)
|
||||
@ -218,7 +249,7 @@ NC_s3urlprocess(NCURI* url, NCS3INFO* s3, NCURI** newurlp)
|
||||
s3->profile = strdup(profile0);
|
||||
|
||||
/* Rebuild the URL to path format and get a usable region and optional bucket*/
|
||||
if((stat = NC_s3urlrebuild(url,&s3->bucket,&s3->region,&url2))) goto done;
|
||||
if((stat = NC_s3urlrebuild(url,s3,&url2))) goto done;
|
||||
s3->host = strdup(url2->host);
|
||||
/* construct the rootkey minus the leading bucket */
|
||||
pathsegments = nclistnew();
|
||||
@ -268,7 +299,7 @@ NC_s3clear(NCS3INFO* s3)
|
||||
}
|
||||
|
||||
/*
|
||||
Check if a url has indicators that signal an S3 url.
|
||||
Check if a url has indicators that signal an S3 or Google S3 url.
|
||||
*/
|
||||
|
||||
int
|
||||
@ -277,13 +308,17 @@ NC_iss3(NCURI* uri)
|
||||
int iss3 = 0;
|
||||
|
||||
if(uri == NULL) goto done; /* not a uri */
|
||||
/* is the protocol "s3"? */
|
||||
/* is the protocol "s3" or "gs3" ? */
|
||||
if(strcasecmp(uri->protocol,"s3")==0) {iss3 = 1; goto done;}
|
||||
/* Is "s3" in the mode list? */
|
||||
if(NC_testmode(uri,"s3")) {iss3 = 1; goto done;}
|
||||
if(strcasecmp(uri->protocol,"gs3")==0) {iss3 = 1; goto done;}
|
||||
/* Is "s3" or "gs3" in the mode list? */
|
||||
if(NC_testmode(uri,"s3")) {iss3 = 1; goto done;}
|
||||
if(NC_testmode(uri,"gs3")) {iss3 = 1; goto done;}
|
||||
/* Last chance; see if host looks s3'y */
|
||||
if(endswith(uri->host,AWSHOST)) {iss3 = 1; goto done;}
|
||||
|
||||
if(uri->host != NULL) {
|
||||
if(endswith(uri->host,AWSHOST)) {iss3 = 1; goto done;}
|
||||
if(strcasecmp(uri->host,GOOGLEHOST)==0) {iss3 = 1; goto done;}
|
||||
}
|
||||
done:
|
||||
return iss3;
|
||||
}
|
||||
|
@ -96,6 +96,7 @@
|
||||
|
||||
/*****************/
|
||||
|
||||
#include "ncs3sdk.h"
|
||||
#include "nch5s3comms.h" /* S3 Communications */
|
||||
|
||||
/****************/
|
||||
@ -1063,7 +1064,7 @@ done:
|
||||
*----------------------------------------------------------------------------
|
||||
*/
|
||||
s3r_t *
|
||||
NCH5_s3comms_s3r_open(const char* root, const char *region, const char *access_id, const char* access_key)
|
||||
NCH5_s3comms_s3r_open(const char* root, NCS3SVC svc, const char *region, const char *access_id, const char* access_key)
|
||||
{
|
||||
int ret_value = SUCCEED;
|
||||
size_t tmplen = 0;
|
||||
@ -1092,10 +1093,15 @@ NCH5_s3comms_s3r_open(const char* root, const char *region, const char *access_i
|
||||
* RECORD THE ROOT PATH
|
||||
*************************************/
|
||||
|
||||
/* Verify that the region is a substring of root */
|
||||
if(region != NULL && region[0] != '\0') {
|
||||
if(strstr(root,region) == NULL)
|
||||
HGOTO_ERROR(H5E_ARGS, NC_EINVAL, NULL, "region not present in root path.");
|
||||
switch (svc) {
|
||||
case NCS3:
|
||||
/* Verify that the region is a substring of root */
|
||||
if(region != NULL && region[0] != '\0') {
|
||||
if(strstr(root,region) == NULL)
|
||||
HGOTO_ERROR(H5E_ARGS, NC_EINVAL, NULL, "region not present in root path.");
|
||||
}
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
handle->rootpath = nulldup(root);
|
||||
|
||||
|
@ -502,7 +502,7 @@ EXTERNL hrb_t *NCH5_s3comms_hrb_init_request(const char *resource, const char *h
|
||||
* DECLARATION OF S3REQUEST ROUTINES *
|
||||
*************************************/
|
||||
|
||||
EXTERNL s3r_t *NCH5_s3comms_s3r_open(const char* root, const char* region, const char* id, const char* access_key);
|
||||
EXTERNL s3r_t *NCH5_s3comms_s3r_open(const char* root, NCS3SVC svc, const char* region, const char* id, const char* access_key);
|
||||
|
||||
EXTERNL int NCH5_s3comms_s3r_close(s3r_t *handle);
|
||||
|
||||
|
@ -82,9 +82,11 @@ ncloginit(void)
|
||||
}
|
||||
|
||||
/*!
|
||||
Enable/Disable logging.
|
||||
Enable logging messages to a given level. Set to NCLOGOFF to disable
|
||||
all messages, NCLOGERR for errors only, NCLOGWARN for warnings and
|
||||
errors, and so on
|
||||
|
||||
\param[in] tf If 1, then turn on logging, if 0, then turn off logging.
|
||||
\param[in] level Messages above this level are ignored
|
||||
|
||||
\return The previous value of the logging flag.
|
||||
*/
|
||||
@ -136,8 +138,11 @@ ncvlog(int level, const char* fmt, va_list ap)
|
||||
const char* prefix;
|
||||
|
||||
if(!nclogginginitialized) ncloginit();
|
||||
if(nclog_global.loglevel > level || nclog_global.nclogstream == NULL)
|
||||
return;
|
||||
|
||||
if(nclog_global.loglevel < level || nclog_global.nclogstream == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
prefix = nctagname(level);
|
||||
fprintf(nclog_global.nclogstream,"%s: ",prefix);
|
||||
if(fmt != NULL) {
|
||||
|
@ -15,8 +15,8 @@
|
||||
#include "ncrc.h"
|
||||
#include "ncxml.h"
|
||||
|
||||
#include "nch5s3comms.h"
|
||||
#include "ncs3sdk.h"
|
||||
#include "nch5s3comms.h"
|
||||
|
||||
#define NCTRACING
|
||||
#ifdef NCTRACING
|
||||
@ -181,7 +181,7 @@ NC_s3sdkcreateclient(NCS3INFO* info)
|
||||
if((stat = NC_s3profilelookup(info->profile, "aws_secret_access_key", &accesskey))) goto done;
|
||||
}
|
||||
if((s3client->rooturl = makes3rooturl(info))==NULL) {stat = NC_ENOMEM; goto done;}
|
||||
s3client->h5s3client = NCH5_s3comms_s3r_open(s3client->rooturl,info->region,accessid,accesskey);
|
||||
s3client->h5s3client = NCH5_s3comms_s3r_open(s3client->rooturl,info->svc,info->region,accessid,accesskey);
|
||||
if(s3client->h5s3client == NULL) {stat = NC_ES3; goto done;}
|
||||
|
||||
done:
|
||||
@ -613,6 +613,10 @@ HTTP/1.1 200
|
||||
<DisplayName>string</DisplayName>
|
||||
<ID>string</ID>
|
||||
</Owner>
|
||||
#ifdef GOOGLES3
|
||||
<Generation>string</Generation>
|
||||
<MetaGeneration>string</MetaGeneration>
|
||||
#endif
|
||||
...
|
||||
</Contents>
|
||||
...
|
||||
@ -679,6 +683,8 @@ parse_listbucketresult(char* xml, unsigned long long xmllen, struct LISTOBJECTSV
|
||||
result->nextcontinuationtoken = trim(ncxml_text(x),RECLAIM);
|
||||
} else if(strcmp(elem,"StartAfter")==0) {
|
||||
result->startafter = trim(ncxml_text(x),RECLAIM);
|
||||
} else if(strcmp(elem,"StartAfter")==0) {
|
||||
result->startafter = trim(ncxml_text(x),RECLAIM);
|
||||
} else {
|
||||
nclog(NCLOGERR,"Unexpected Element: <%s>",elem);
|
||||
stat = NC_ES3;
|
||||
@ -711,7 +717,7 @@ parse_object(ncxml_t root, NClist* objects)
|
||||
|
||||
for(x=ncxml_child_first(root);x != NULL;x=ncxml_child_next(x)) {
|
||||
const char* elem = ncxml_name(x);
|
||||
if(strcmp(elem,"ChecksumAlorithm")==0) {
|
||||
if(strcmp(elem,"ChecksumAlgorithm")==0) {
|
||||
if((stat = parse_checksumalgorithm(x,object->checksumalgorithms))) goto done;
|
||||
} else if(strcmp(elem,"ETag")==0) {
|
||||
object->etag = trim(ncxml_text(x),RECLAIM);
|
||||
@ -725,6 +731,10 @@ parse_object(ncxml_t root, NClist* objects)
|
||||
object->size = trim(ncxml_text(x),RECLAIM);
|
||||
} else if(strcmp(elem,"StorageClass")==0) {
|
||||
object->storageclass = trim(ncxml_text(x),RECLAIM);
|
||||
} else if(strcmp(elem,"Generation")==0) {
|
||||
/* Ignore */
|
||||
} else if(strcmp(elem,"MetaGeneration")==0) {
|
||||
/* Ignore */
|
||||
} else {
|
||||
nclog(NCLOGERR,"Unexpected Element: <%s>",elem);
|
||||
stat = NC_ES3;
|
||||
|
@ -1281,7 +1281,7 @@ removedups(NClist* list)
|
||||
/* look for dups for this entry */
|
||||
for(j=nclistlength(list)-2;j>i;j-=2) {
|
||||
if(strcasecmp(nclistget(list,i),nclistget(list,j))==0
|
||||
&& strcasecmp(nclistget(list,i+1),nclistget(list,j+1))) {
|
||||
&& strcasecmp(nclistget(list,i+1),nclistget(list,j+1))==0) {
|
||||
nclistremove(list,j+1); nclistremove(list,j);
|
||||
}
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
#ifdef ENABLE_HDF5_ROS3
|
||||
#include <H5FDros3.h>
|
||||
#include "ncs3sdk.h"
|
||||
#endif
|
||||
|
||||
/*Nemonic */
|
||||
@ -883,12 +884,11 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid)
|
||||
#ifdef ENABLE_BYTERANGE
|
||||
else if(h5->byterange) { /* Arrange to use the byte-range drivers */
|
||||
char* newpath = NULL;
|
||||
char* awsregion0 = NULL;
|
||||
#ifdef ENABLE_HDF5_ROS3
|
||||
H5FD_ros3_fapl_t fa;
|
||||
const char* profile0 = NULL;
|
||||
const char* awsaccessid0 = NULL;
|
||||
const char* awssecretkey0 = NULL;
|
||||
const char* profile0 = NULL;
|
||||
int iss3 = NC_iss3(h5->uri);
|
||||
|
||||
fa.version = H5FD_CURR_ROS3_FAPL_T_VERSION;
|
||||
@ -898,9 +898,11 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid)
|
||||
fa.secret_key[0] = '\0';
|
||||
|
||||
if(iss3) {
|
||||
/* Rebuild the URL */
|
||||
NCS3INFO s3;
|
||||
NCURI* newuri = NULL;
|
||||
if((retval = NC_s3urlrebuild(h5->uri,NULL,&awsregion0,&newuri))) goto exit;
|
||||
/* Rebuild the URL */
|
||||
memset(&s3,0,sizeof(s3));
|
||||
if((retval = NC_s3urlrebuild(h5->uri,&s3,&newuri))) goto exit;
|
||||
if((newpath = ncuribuild(newuri,NULL,NULL,NCURISVC))==NULL)
|
||||
{retval = NC_EURL; goto exit;}
|
||||
ncurifree(h5->uri);
|
||||
@ -909,22 +911,23 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid)
|
||||
BAIL(retval);
|
||||
if((retval = NC_s3profilelookup(profile0,AWS_ACCESS_KEY_ID,&awsaccessid0)))
|
||||
BAIL(retval);
|
||||
if((retval = NC_s3profilelookup(profile0,AWS_SECRET_ACCESS_KEY,&awssecretkey0)))
|
||||
if((retval = NC_s3profilelookup(profile0,AWS_SECRET_ACCESS_KEY,&awssecretkey0)))
|
||||
BAIL(retval);
|
||||
if(awsregion0 == NULL)
|
||||
awsregion0 = strdup(S3_REGION_DEFAULT);
|
||||
if(s3.region == NULL)
|
||||
s3.region = strdup(S3_REGION_DEFAULT);
|
||||
if(awsaccessid0 == NULL || awssecretkey0 == NULL ) {
|
||||
/* default, non-authenticating, "anonymous" fapl configuration */
|
||||
fa.authenticate = (hbool_t)0;
|
||||
} else {
|
||||
fa.authenticate = (hbool_t)1;
|
||||
assert(awsregion0 != NULL && strlen(awsregion0) > 0);
|
||||
assert(s3.region != NULL && strlen(s3.region) > 0);
|
||||
assert(awsaccessid0 != NULL && strlen(awsaccessid0) > 0);
|
||||
assert(awssecretkey0 != NULL && strlen(awssecretkey0) > 0);
|
||||
strlcat(fa.aws_region,awsregion0,H5FD_ROS3_MAX_REGION_LEN);
|
||||
strlcat(fa.aws_region,s3.region,H5FD_ROS3_MAX_REGION_LEN);
|
||||
strlcat(fa.secret_id, awsaccessid0, H5FD_ROS3_MAX_SECRET_ID_LEN);
|
||||
strlcat(fa.secret_key, awssecretkey0, H5FD_ROS3_MAX_SECRET_KEY_LEN);
|
||||
}
|
||||
NC_s3clear(&s3);
|
||||
/* create and set fapl entry */
|
||||
if(H5Pset_fapl_ros3(fapl_id, &fa) < 0)
|
||||
BAIL(NC_EHDFERR);
|
||||
@ -938,7 +941,6 @@ nc4_open_file(const char *path, int mode, void* parameters, int ncid)
|
||||
if ((h5->hdfid = nc4_H5Fopen((newpath?newpath:path), flags, fapl_id)) < 0)
|
||||
BAIL(NC_EHDFERR);
|
||||
nullfree(newpath);
|
||||
nullfree(awsregion0);
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
|
@ -179,7 +179,7 @@ NCZ_zclose_var1(NC_VAR_INFO_T* var)
|
||||
}
|
||||
|
||||
/**
|
||||
* @internal Close resources for vars in a group.
|
||||
* @internal Close nczarr resources for vars in a group.
|
||||
*
|
||||
* @param grp Pointer to group info struct.
|
||||
*
|
||||
|
@ -34,7 +34,7 @@ int
|
||||
zreport(int err, const char* msg, const char* file, const char* fcn, int line)
|
||||
{
|
||||
if(err == 0) return err;
|
||||
ZLOG(NCLOGWARN,"!!! zreport: err=%d msg=%s",err,msg);
|
||||
ZLOG(NCLOGWARN,"!!! zreport: err=%d msg=%s @ %s#%s:%d",err,msg,file,fcn,line);
|
||||
ncbacktrace();
|
||||
return zbreakpoint(err);
|
||||
}
|
||||
|
@ -1448,29 +1448,11 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames)
|
||||
{
|
||||
int stat = NC_NOERR;
|
||||
int i,j;
|
||||
char* varpath = NULL;
|
||||
char* key = NULL;
|
||||
NCZ_FILE_INFO_T* zinfo = NULL;
|
||||
NC_VAR_INFO_T* var = NULL;
|
||||
NCZ_VAR_INFO_T* zvar = NULL;
|
||||
NCZMAP* map = NULL;
|
||||
NCjson* jvar = NULL;
|
||||
NCjson* jncvar = NULL;
|
||||
NCjson* jdimrefs = NULL;
|
||||
NCjson* jvalue = NULL;
|
||||
int purezarr = 0;
|
||||
int xarray = 0;
|
||||
int formatv1 = 0;
|
||||
nc_type vtype;
|
||||
int vtypelen;
|
||||
size64_t* shapes = NULL;
|
||||
int rank = 0;
|
||||
int zarr_rank = 1; /* Need to watch out for scalars */
|
||||
NClist* dimnames = nclistnew();
|
||||
#ifdef ENABLE_NCZARR_FILTERS
|
||||
NCjson* jfilter = NULL;
|
||||
int chainindex;
|
||||
#endif
|
||||
|
||||
ZTRACE(3,"file=%s grp=%s |varnames|=%u",file->controller->path,grp->hdr.name,nclistlength(varnames));
|
||||
|
||||
@ -1483,7 +1465,32 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames)
|
||||
|
||||
/* Load each var in turn */
|
||||
for(i = 0; i < nclistlength(varnames); i++) {
|
||||
const char* varname = nclistget(varnames,i);
|
||||
/* per-variable info */
|
||||
NC_VAR_INFO_T* var = NULL;
|
||||
NCZ_VAR_INFO_T* zvar = NULL;
|
||||
NCjson* jvar = NULL;
|
||||
NCjson* jncvar = NULL;
|
||||
NCjson* jdimrefs = NULL;
|
||||
NCjson* jvalue = NULL;
|
||||
char* varpath = NULL;
|
||||
char* key = NULL;
|
||||
const char* varname = NULL;
|
||||
size64_t* shapes = NULL;
|
||||
NClist* dimnames = NULL;
|
||||
int varsized = 0;
|
||||
int suppress = 0; /* Abort processing of this variable */
|
||||
nc_type vtype = NC_NAT;
|
||||
int vtypelen = 0;
|
||||
int rank = 0;
|
||||
int zarr_rank = 0; /* Need to watch out for scalars */
|
||||
#ifdef ENABLE_NCZARR_FILTERS
|
||||
NCjson* jfilter = NULL;
|
||||
int chainindex = 0;
|
||||
#endif
|
||||
|
||||
dimnames = nclistnew();
|
||||
varname = nclistget(varnames,i);
|
||||
|
||||
if((stat = nc4_var_list_add2(grp, varname, &var)))
|
||||
goto done;
|
||||
|
||||
@ -1522,6 +1529,7 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames)
|
||||
if(version != zinfo->zarr.zarr_version)
|
||||
{stat = (THROW(NC_ENCZARR)); goto done;}
|
||||
}
|
||||
|
||||
/* Set the type and endianness of the variable */
|
||||
{
|
||||
int endianness;
|
||||
@ -1609,23 +1617,6 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames)
|
||||
jdimrefs = NULL;
|
||||
}
|
||||
|
||||
/* shape */
|
||||
{
|
||||
if((stat = NCJdictget(jvar,"shape",&jvalue))) goto done;
|
||||
if(NCJsort(jvalue) != NCJ_ARRAY) {stat = (THROW(NC_ENCZARR)); goto done;}
|
||||
if(zvar->scalar) {
|
||||
rank = 0;
|
||||
zarr_rank = 1; /* Zarr does not support scalars */
|
||||
} else
|
||||
rank = (zarr_rank = NCJlength(jvalue));
|
||||
/* Save the rank of the variable */
|
||||
if((stat = nc4_var_set_ndims(var, rank))) goto done;
|
||||
/* extract the shapes */
|
||||
if((shapes = (size64_t*)malloc(sizeof(size64_t)*zarr_rank)) == NULL)
|
||||
{stat = (THROW(NC_ENOMEM)); goto done;}
|
||||
if((stat = decodeints(jvalue, shapes))) goto done;
|
||||
}
|
||||
|
||||
/* Capture dimension_separator (must precede chunk cache creation) */
|
||||
{
|
||||
NCglobalstate* ngs = NC_getglobalstate();
|
||||
@ -1661,6 +1652,36 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames)
|
||||
}
|
||||
}
|
||||
|
||||
/* shape */
|
||||
{
|
||||
if((stat = NCJdictget(jvar,"shape",&jvalue))) goto done;
|
||||
if(NCJsort(jvalue) != NCJ_ARRAY) {stat = (THROW(NC_ENCZARR)); goto done;}
|
||||
|
||||
/* Process the rank */
|
||||
zarr_rank = NCJlength(jvalue);
|
||||
if(zarr_rank == 0) {
|
||||
/* suppress variable */
|
||||
ZLOG(NCLOGWARN,"Empty shape for variable %s suppressed",var->hdr.name);
|
||||
suppress = 1;
|
||||
goto suppressvar;
|
||||
}
|
||||
|
||||
if(zvar->scalar) {
|
||||
rank = 0;
|
||||
zarr_rank = 1; /* Zarr does not support scalars */
|
||||
} else
|
||||
rank = (zarr_rank = NCJlength(jvalue));
|
||||
|
||||
if(zarr_rank > 0) {
|
||||
/* Save the rank of the variable */
|
||||
if((stat = nc4_var_set_ndims(var, rank))) goto done;
|
||||
/* extract the shapes */
|
||||
if((shapes = (size64_t*)malloc(sizeof(size64_t)*zarr_rank)) == NULL)
|
||||
{stat = (THROW(NC_ENOMEM)); goto done;}
|
||||
if((stat = decodeints(jvalue, shapes))) goto done;
|
||||
}
|
||||
}
|
||||
|
||||
/* chunks */
|
||||
{
|
||||
size64_t chunks[NC_MAX_VAR_DIMS];
|
||||
@ -1668,8 +1689,7 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames)
|
||||
if(jvalue != NULL && NCJsort(jvalue) != NCJ_ARRAY)
|
||||
{stat = (THROW(NC_ENCZARR)); goto done;}
|
||||
/* Verify the rank */
|
||||
assert (zarr_rank == NCJlength(jvalue));
|
||||
if(zvar->scalar) {
|
||||
if(zvar->scalar || zarr_rank == 0) {
|
||||
if(var->ndims != 0)
|
||||
{stat = (THROW(NC_ENCZARR)); goto done;}
|
||||
zvar->chunkproduct = 1;
|
||||
@ -1746,37 +1766,47 @@ define_vars(NC_FILE_INFO_T* file, NC_GRP_INFO_T* grp, NClist* varnames)
|
||||
if((stat = NCZ_filter_build(file,var,jfilter,chainindex++))) goto done;
|
||||
}
|
||||
}
|
||||
/* Suppress variable if there are filters and var is not fixed-size */
|
||||
if(varsized && nclistlength((NClist*)var->filters) > 0)
|
||||
suppress = 1;
|
||||
#endif
|
||||
if((stat = computedimrefs(file, var, purezarr, xarray, rank, dimnames, shapes, var->dim)))
|
||||
goto done;
|
||||
|
||||
if(!zvar->scalar) {
|
||||
/* Extract the dimids */
|
||||
for(j=0;j<rank;j++)
|
||||
var->dimids[j] = var->dim[j]->hdr.id;
|
||||
if(zarr_rank > 0) {
|
||||
if((stat = computedimrefs(file, var, purezarr, xarray, rank, dimnames, shapes, var->dim)))
|
||||
goto done;
|
||||
if(!zvar->scalar) {
|
||||
/* Extract the dimids */
|
||||
for(j=0;j<rank;j++)
|
||||
var->dimids[j] = var->dim[j]->hdr.id;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ENABLE_NCZARR_FILTERS
|
||||
/* At this point, we can finalize the filters */
|
||||
if((stat = NCZ_filter_setup(var))) goto done;
|
||||
if(!suppress) {
|
||||
/* At this point, we can finalize the filters */
|
||||
if((stat = NCZ_filter_setup(var))) goto done;
|
||||
}
|
||||
#endif
|
||||
|
||||
suppressvar:
|
||||
if(suppress) {
|
||||
/* Reclaim NCZarr variable specific info */
|
||||
(void)NCZ_zclose_var1(var);
|
||||
/* Remove from list of variables and reclaim the top level var object */
|
||||
(void)nc4_var_list_del(grp, var);
|
||||
var = NULL;
|
||||
}
|
||||
|
||||
/* Clean up from last cycle */
|
||||
nclistfreeall(dimnames); dimnames = nclistnew();
|
||||
nclistfreeall(dimnames); dimnames = NULL;
|
||||
nullfree(varpath); varpath = NULL;
|
||||
nullfree(shapes); shapes = NULL;
|
||||
nullfree(key); key = NULL;
|
||||
if(formatv1) {NCJreclaim(jncvar); jncvar = NULL;}
|
||||
NCJreclaim(jvar); jvar = NULL;
|
||||
var = NULL;
|
||||
}
|
||||
|
||||
done:
|
||||
nullfree(shapes);
|
||||
nullfree(varpath);
|
||||
nullfree(key);
|
||||
nclistfreeall(dimnames);
|
||||
NCJreclaim(jvar);
|
||||
if(formatv1) NCJreclaim(jncvar);
|
||||
return ZUNTRACE(THROW(stat));
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,6 @@ ${NCCOPY} -4 -V three_dmn_rec_var -F *,32001,0,0,0,0,1,1,0 ./tmp_bloscx3.nc ./tm
|
||||
# This should fail because shuffle is off
|
||||
if ${NCCOPY} -4 -V three_dmn_rec_var -F *,32001,0,0,0,0,1,0,0 ./tmp_bloscx3.nc ./tmp_bloscx4_fail.nc ; then
|
||||
echo "*** not xfail: nccopy "
|
||||
exit 1;
|
||||
else
|
||||
echo "*** xfail: nccopy "
|
||||
fi
|
||||
|
@ -8,6 +8,5 @@ set -e
|
||||
echo ""
|
||||
echo "*** Testing #encode=" mechanism
|
||||
|
||||
#${NCDUMP} -h 'http://opendap2.oceanbrowser.net/thredds/dodsC/data/emodnet1-domains/tmp%20test.nc?lon[0:8]#encode=none'
|
||||
# raw: http://iridl.ldeo.columbia.edu/SOURCES/.Indices/.soi/.c8110/.anomaly/T/(Jan 1979)/VALUE/dods
|
||||
${NCDUMP} -h 'http://iridl.ldeo.columbia.edu/SOURCES/.Indices/.soi/.c8110/.anomaly/T/%28Jan%201979%29/VALUE/dods?anomaly[0]'
|
||||
|
@ -1,6 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
if test "x$SETX" != x ; then set -x ; fi
|
||||
|
||||
set -e
|
||||
|
||||
quiet=0
|
||||
|
@ -143,8 +143,8 @@ IF(ENABLE_TESTS)
|
||||
add_sh_test(nczarr_test run_ut_misc)
|
||||
add_sh_test(nczarr_test run_ncgen4)
|
||||
if(LARGE_FILE_TESTS)
|
||||
BUILD_BIN_TEST(test_readcaching})
|
||||
BUILD_BIN_TEST(test_writecaching})
|
||||
BUILD_BIN_TEST(test_readcaching)
|
||||
BUILD_BIN_TEST(test_writecaching)
|
||||
BUILD_BIN_TEST(test_chunkcases ${TSTCOMMONSRC})
|
||||
add_sh_test(nczarr_test run_cachetest)
|
||||
add_sh_test(nczarr_test run_chunkcases)
|
||||
@ -193,6 +193,8 @@ IF(ENABLE_TESTS)
|
||||
|
||||
if(ENABLE_NCZARR_ZIP)
|
||||
add_sh_test(nczarr_test run_newformat)
|
||||
# Test various corrupted files
|
||||
ADD_SH_TEST(nczarr_test run_corrupt.sh)
|
||||
endif()
|
||||
|
||||
IF(FALSE) # Obsolete tests
|
||||
|
@ -161,6 +161,9 @@ endif # ISMINGW
|
||||
endif #ENABLE_FILTER_TESTING
|
||||
endif #ENABLE_NCZARR_FILTERS
|
||||
|
||||
# Test various corrupted files
|
||||
TESTS += run_corrupt.sh
|
||||
|
||||
endif #BUILD_UTILITIES
|
||||
|
||||
# These programs are used by the test cases
|
||||
@ -200,7 +203,8 @@ run_purezarr.sh run_interop.sh run_misc.sh \
|
||||
run_filter.sh \
|
||||
run_newformat.sh run_nczarr_fill.sh run_quantize.sh \
|
||||
run_jsonconvention.sh run_nczfilter.sh run_unknown.sh \
|
||||
run_scalar.sh run_strings.sh run_nulls.sh run_notzarr.sh run_external.sh run_unlim_io.sh
|
||||
run_scalar.sh run_strings.sh run_nulls.sh run_notzarr.sh run_external.sh \
|
||||
run_unlim_io.sh run_corrupt.sh
|
||||
|
||||
EXTRA_DIST += \
|
||||
ref_ut_map_create.cdl ref_ut_map_writedata.cdl ref_ut_map_writemeta2.cdl ref_ut_map_writemeta.cdl \
|
||||
@ -228,6 +232,9 @@ ref_nulls_nczarr.baseline ref_nulls_zarr.baseline ref_nulls.cdl ref_notzarr.tar.
|
||||
EXTRA_DIST += ref_power_901_constants_orig.zip ref_power_901_constants.cdl ref_quotes_orig.zip ref_quotes.cdl \
|
||||
ref_zarr_test_data.cdl.gz ref_zarr_test_data_2d.cdl.gz
|
||||
|
||||
# Additional Files
|
||||
EXTRA_DIST += ref_noshape.file.zip
|
||||
|
||||
CLEANFILES = ut_*.txt ut*.cdl tmp*.nc tmp*.cdl tmp*.txt tmp*.dmp tmp*.zip tmp*.nc tmp*.dump tmp*.tmp tmp*.zmap tmp_ngc.c ref_zarr_test_data.cdl tst_*.nc.zip ref_quotes.zip ref_power_901_constants.zip
|
||||
|
||||
BUILT_SOURCES = test_quantize.c test_filter_vlen.c test_unlim_vars.c test_endians.c \
|
||||
|
BIN
nczarr_test/ref_noshape.file.zip
Normal file
BIN
nczarr_test/ref_noshape.file.zip
Normal file
Binary file not shown.
35
nczarr_test/run_corrupt.sh
Executable file
35
nczarr_test/run_corrupt.sh
Executable file
@ -0,0 +1,35 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Test various kinds of corrupted files
|
||||
|
||||
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
. "$srcdir/test_nczarr.sh"
|
||||
|
||||
set -e
|
||||
|
||||
s3isolate "testdir_corrupt"
|
||||
THISDIR=`pwd`
|
||||
cd $ISOPATH
|
||||
|
||||
export NCLOGGING=WARN
|
||||
|
||||
testnoshape1() {
|
||||
zext=file
|
||||
unzip ${srcdir}/ref_noshape.file.zip
|
||||
fileargs ${ISOPATH}/ref_noshape "mode=zarr,$zext"
|
||||
rm -f tmp_noshape1_${zext}.cdl
|
||||
${NCDUMP} $flags $fileurl > tmp_noshape1_${zext}.cdl
|
||||
}
|
||||
|
||||
testnoshape2() {
|
||||
# Test against the original issue URL
|
||||
rm -f tmp_noshape2_gs.cdl
|
||||
fileurl="https://storage.googleapis.com/cmip6/CMIP6/CMIP/NASA-GISS/GISS-E2-1-G/historical/r1i1p1f1/day/tasmin/gn/v20181015/#mode=zarr,s3&aws.profile=no"
|
||||
${NCDUMP} -h $flags $fileurl > tmp_noshape2_gs.cdl
|
||||
}
|
||||
|
||||
testnoshape1
|
||||
if test "x$FEATURE_S3TESTS" = xyes ; then testnoshape2; fi
|
@ -1,5 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
@ -43,16 +45,19 @@ testcasezip() {
|
||||
}
|
||||
|
||||
testcases3() {
|
||||
set -x
|
||||
echo -e "\to Running S3 Testcase:\t$1\t$2"
|
||||
zext=s3
|
||||
base=$1
|
||||
mode=$2
|
||||
rm -f tmp_${base}_${zext}.cdl
|
||||
url="https://${UH}/${UB}/${base}.zarr#mode=${mode},s3"
|
||||
echo "flags: $flags"
|
||||
# Dumping everything causes timeout so dump a single var
|
||||
${NCDUMP} -v "/group_with_dims/var2D" $flags $url > tmp_${base}_${zext}.cdl
|
||||
${NCDUMP} -v "group_with_dims/var2D" $flags $url > tmp_${base}_${zext}.cdl
|
||||
# Find the proper ref file
|
||||
diff -b ${ISOPATH}/ref_${base}_2d.cdl tmp_${base}_${zext}.cdl
|
||||
set +x
|
||||
}
|
||||
|
||||
testallcases() {
|
||||
|
@ -1,5 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
#set -x
|
||||
#set -e
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
@ -29,7 +30,7 @@ cp ${srcdir}/ref_notzarr.tar.gz .
|
||||
gunzip ref_notzarr.tar.gz
|
||||
tar -xf ref_notzarr.tar
|
||||
if test "x$FEATURE_S3TESTS" = xyes ; then
|
||||
${execdir}/s3util -f notzarr.file/notzarr.txt -u "https://${URL}" -k "/${S3ISOPATH}/notzarr.s3/notzarr.txt" upload
|
||||
${execdir}/s3util -f notzarr.file/notzarr.txt -u "https://${URL}" -k "//${S3ISOPATH}/notzarr.s3/notzarr.txt" upload
|
||||
fi
|
||||
|
||||
echo "Test empty file"
|
||||
|
@ -7,7 +7,7 @@ if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
|
||||
set -e
|
||||
|
||||
s3isolate "testdir_nczarr"
|
||||
s3isolate "testdir_scalar"
|
||||
THISDIR=`pwd`
|
||||
cd $ISOPATH
|
||||
|
||||
@ -43,7 +43,7 @@ echo "*** create nczarr file"
|
||||
${NCGEN} -4 -b -o "$nczarrurl" $top_srcdir/nczarr_test/ref_scalar.cdl
|
||||
|
||||
echo "*** read purezarr"
|
||||
${NCDUMP} -n ref_scalar $zarrurl > tmp_scalar_zarr0_${zext}.cdl
|
||||
${NCDUMP} -n ref_scalar $zarrurl > tmp_scalar_zarr_${zext}.cdl
|
||||
${ZMD} -h $zarrurl > tmp_scalar_zarr_${zext}.txt
|
||||
echo "*** read nczarr"
|
||||
${NCDUMP} -n ref_scalar $nczarrurl > tmp_scalar_nczarr_${zext}.cdl
|
||||
@ -53,8 +53,8 @@ echo "*** verify"
|
||||
diff -bw $top_srcdir/nczarr_test/ref_scalar.cdl tmp_scalar_nczarr_${zext}.cdl
|
||||
|
||||
# Fixup
|
||||
zarrscalar tmp_scalar_zarr0_${zext}.cdl tmp_scalar_zarr_${zext}.cdl
|
||||
diff -bw $top_srcdir/nczarr_test/ref_scalar.cdl tmp_scalar_zarr_${zext}.cdl
|
||||
zarrscalar tmp_scalar_zarr_${zext}.cdl tmp_rescale_zarr_${zext}.cdl
|
||||
diff -bw $top_srcdir/nczarr_test/ref_scalar.cdl tmp_rescale_zarr_${zext}.cdl
|
||||
}
|
||||
|
||||
testcase file
|
||||
|
@ -40,6 +40,10 @@ set(netCDF_HAS_DAP4 @HAS_DAP4@)
|
||||
set(netCDF_HAS_DISKLESS @HAS_DISKLESS@)
|
||||
set(netCDF_HAS_MMAP @HAS_MMAP@)
|
||||
set(netCDF_HAS_JNA @HAS_JNA@)
|
||||
if (netCDF_HAS_HDF4 OR netCDF_HAS_HDF5)
|
||||
include(CMakeFindDependencyMacro)
|
||||
find_dependency(HDF5)
|
||||
endif ()
|
||||
|
||||
if (@HAS_PARALLEL@)
|
||||
include(CMakeFindDependencyMacro)
|
||||
|
@ -1,6 +1,11 @@
|
||||
/* Copyright 2018, UCAR/Unidata and OPeNDAP, Inc.
|
||||
See the COPYRIGHT file for more information. */
|
||||
|
||||
/* WARNING: oc2/occurlfunctions.c and libdap4/d4curlfunctions.c
|
||||
should be merged since they are essentially the same file.
|
||||
In the meantime, changes to one should be propagated to the other.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
#include <stdlib.h>
|
||||
#ifdef HAVE_STDINT_H
|
||||
@ -127,36 +132,43 @@ ocset_curlflag(OCstate* state, int flag)
|
||||
}
|
||||
break;
|
||||
|
||||
case CURLOPT_USE_SSL:
|
||||
case CURLOPT_SSLCERT: case CURLOPT_SSLKEY:
|
||||
case CURLOPT_SSL_VERIFYPEER: case CURLOPT_SSL_VERIFYHOST:
|
||||
case CURLOPT_CAINFO: case CURLOPT_CAPATH:
|
||||
{
|
||||
struct ssl* ssl = &state->auth->ssl;
|
||||
case CURLOPT_SSL_VERIFYPEER:
|
||||
/* VERIFYPEER == 0 => VERIFYHOST == 0 */
|
||||
/* We need to have 2 states: default and a set value */
|
||||
/* So -1 => default >= 0 => use value */
|
||||
if(ssl->verifypeer >= 0) {
|
||||
SETCURLOPT(state, CURLOPT_SSL_VERIFYPEER, (OPTARG)(ssl->verifypeer));
|
||||
}
|
||||
if(state->auth->ssl.verifypeer >= 0) {
|
||||
SETCURLOPT(state, CURLOPT_SSL_VERIFYPEER, (OPTARG)(state->auth->ssl.verifypeer));
|
||||
if(state->auth->ssl.verifypeer == 0) state->auth->ssl.verifyhost = 0;
|
||||
}
|
||||
break;
|
||||
case CURLOPT_SSL_VERIFYHOST:
|
||||
#ifdef HAVE_LIBCURL_766
|
||||
if(ssl->verifyhost >= 0) {
|
||||
SETCURLOPT(state, CURLOPT_SSL_VERIFYHOST, (OPTARG)(ssl->verifyhost));
|
||||
if(state->auth->ssl.verifyhost >= 0) {
|
||||
SETCURLOPT(state, CURLOPT_SSL_VERIFYHOST, (OPTARG)(state->auth->ssl.verifyhost));
|
||||
}
|
||||
#endif
|
||||
if(ssl->certificate)
|
||||
SETCURLOPT(state, CURLOPT_SSLCERT, ssl->certificate);
|
||||
if(ssl->key)
|
||||
SETCURLOPT(state, CURLOPT_SSLKEY, ssl->key);
|
||||
if(ssl->keypasswd)
|
||||
break;
|
||||
case CURLOPT_SSLCERT:
|
||||
if(state->auth->ssl.certificate)
|
||||
SETCURLOPT(state, CURLOPT_SSLCERT, state->auth->ssl.certificate);
|
||||
break;
|
||||
case CURLOPT_SSLKEY:
|
||||
if(state->auth->ssl.key)
|
||||
SETCURLOPT(state, CURLOPT_SSLKEY, state->auth->ssl.key);
|
||||
if(state->auth->ssl.keypasswd)
|
||||
/* libcurl prior to 7.16.4 used 'CURLOPT_SSLKEYPASSWD' */
|
||||
SETCURLOPT(state, CURLOPT_KEYPASSWD, ssl->keypasswd);
|
||||
if(ssl->cainfo)
|
||||
SETCURLOPT(state, CURLOPT_CAINFO, ssl->cainfo);
|
||||
if(ssl->capath)
|
||||
SETCURLOPT(state, CURLOPT_CAPATH, ssl->capath);
|
||||
}
|
||||
break;
|
||||
SETCURLOPT(state, CURLOPT_SSLKEYPASSWD, state->auth->ssl.keypasswd);
|
||||
break;
|
||||
case CURLOPT_CAINFO:
|
||||
if(state->auth->ssl.cainfo)
|
||||
SETCURLOPT(state, CURLOPT_CAINFO, state->auth->ssl.cainfo);
|
||||
break;
|
||||
case CURLOPT_CAPATH:
|
||||
if(state->auth->ssl.capath)
|
||||
SETCURLOPT(state, CURLOPT_CAPATH, state->auth->ssl.capath);
|
||||
break;
|
||||
case CURLOPT_USE_SSL:
|
||||
break;
|
||||
|
||||
#ifdef HAVE_CURLOPT_BUFFERSIZE
|
||||
case CURLOPT_BUFFERSIZE:
|
||||
@ -210,6 +222,12 @@ ocset_flags_perlink(OCstate* state)
|
||||
if(stat == OC_NOERR) stat = ocset_curlflag(state,CURLOPT_COOKIEJAR);
|
||||
if(stat == OC_NOERR) stat = ocset_curlflag(state,CURLOPT_USERPWD);
|
||||
if(stat == OC_NOERR) stat = ocset_curlflag(state,CURLOPT_PROXY);
|
||||
if(stat == OC_NOERR) stat = ocset_curlflag(state,CURLOPT_SSL_VERIFYPEER);
|
||||
if(stat == OC_NOERR) stat = ocset_curlflag(state,CURLOPT_SSL_VERIFYHOST);
|
||||
if(stat == OC_NOERR) stat = ocset_curlflag(state,CURLOPT_SSLCERT);
|
||||
if(stat == OC_NOERR) stat = ocset_curlflag(state,CURLOPT_SSLKEY);
|
||||
if(stat == OC_NOERR) stat = ocset_curlflag(state,CURLOPT_CAINFO);
|
||||
if(stat == OC_NOERR) stat = ocset_curlflag(state,CURLOPT_CAPATH);
|
||||
if(stat == OC_NOERR) stat = ocset_curlflag(state,CURLOPT_USE_SSL);
|
||||
if(stat == OC_NOERR) stat = ocset_curlflag(state, CURLOPT_FOLLOWLOCATION);
|
||||
if(stat == OC_NOERR) stat = ocset_curlflag(state, CURLOPT_MAXREDIRS);
|
||||
|
@ -113,7 +113,7 @@ H5Z_filter_test(unsigned int flags, size_t cd_nelmts,
|
||||
break;
|
||||
case TC_ODDSIZE:
|
||||
/* Print out the chunk size */
|
||||
fprintf(stderr,"nbytes = %lld chunk size = %lld\n",(long long)nbytes,(long long)*buf_size);
|
||||
fprintf(stderr,">>> nbytes = %lld chunk size = %lld\n",(long long)nbytes,(long long)*buf_size);
|
||||
fflush(stderr);
|
||||
break;
|
||||
default: break;
|
||||
@ -122,13 +122,15 @@ H5Z_filter_test(unsigned int flags, size_t cd_nelmts,
|
||||
if (flags & H5Z_FLAG_REVERSE) { /* Decompress */
|
||||
|
||||
if(testcase == TC_EXPANDED) {
|
||||
#ifdef DEBUG
|
||||
int i;
|
||||
float* b = (float*)*buf;
|
||||
fprintf(stderr,"TC_EXPANDED: decompress: nbytes=%u buf_size=%u xdata[0..8]=|",(unsigned)nbytes,(unsigned)*buf_size);
|
||||
fprintf(stderr,">>> TC_EXPANDED: decompress: nbytes=%u buf_size=%u xdata[0..8]=|",(unsigned)nbytes,(unsigned)*buf_size);
|
||||
for(i=0;i<8;i++) {
|
||||
fprintf(stderr," %u",(int)(b[1024+i]));
|
||||
}
|
||||
fprintf(stderr,"|\n");
|
||||
#endif
|
||||
/* Replace buffer */
|
||||
newbuf = H5allocate_memory(*buf_size,0);
|
||||
if(newbuf == NULL) abort();
|
||||
@ -149,8 +151,8 @@ fprintf(stderr,"TC_EXPANDED: decompress: nbytes=%u buf_size=%u xdata[0..8]=|",(u
|
||||
if(testcase == TC_EXPANDED) {
|
||||
int i;
|
||||
float* b;
|
||||
#if 0
|
||||
fprintf(stderr,"TC_EXPANDED: compress: nbytes=%u buf_size=%u size=%u\n",(unsigned)nbytes,(unsigned)*buf_size,(unsigned)size);
|
||||
#ifdef DEBUG
|
||||
fprintf(stderr,">>> TC_EXPANDED: compress: nbytes=%u buf_size=%u size=%u\n",(unsigned)nbytes,(unsigned)*buf_size,(unsigned)size);
|
||||
#endif
|
||||
/* Replace buffer with one that is bigger than the input size */
|
||||
newbuf = H5allocate_memory(size,0);
|
||||
@ -218,7 +220,7 @@ extract1(void* field, size_t size, const unsigned int* params)
|
||||
llp = (unsigned long long*)field;
|
||||
*llp = u.ll;
|
||||
break;
|
||||
default: fprintf(stderr,"insert: unexpected size: %u\n",(unsigned)size); abort();
|
||||
default: fprintf(stderr,">>> insert: unexpected size: %u\n",(unsigned)size); abort();
|
||||
}
|
||||
}
|
||||
|
||||
@ -247,7 +249,7 @@ paramcheck(size_t nparams, const unsigned int* params, struct All* extracted)
|
||||
memset(&all,0,sizeof(all));
|
||||
|
||||
if(nparams != NPARAMS) {
|
||||
fprintf(stderr,"Incorrect number of parameters: expected=%ld sent=%ld\n",(unsigned long)NPARAMS,(unsigned long)nparams);
|
||||
fprintf(stderr,">>> Incorrect number of parameters: expected=%ld sent=%ld\n",(unsigned long)NPARAMS,(unsigned long)nparams);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -270,7 +272,7 @@ paramcheck(size_t nparams, const unsigned int* params, struct All* extracted)
|
||||
#ifdef DEBUG
|
||||
{
|
||||
size_t i;
|
||||
fprintf(stderr,"bigendian=%d nparams=%d params=\n",bigendian,nparams);
|
||||
fprintf(stderr,">>> nparams=%lu params=\n",nparams);
|
||||
for(i=0;i<nparams;i++) {
|
||||
fprintf(stderr,"[%d] %ud %d %f\n", (unsigned int)i, params[i],(signed int)params[i],*(float*)¶ms[i]);
|
||||
}
|
||||
@ -285,7 +287,7 @@ fail:
|
||||
static void
|
||||
mismatch(const char* which)
|
||||
{
|
||||
fprintf(stderr,"mismatch: %s\n",which);
|
||||
fprintf(stderr,">>> mismatch: %s\n",which);
|
||||
fflush(stderr);
|
||||
}
|
||||
|
||||
|
@ -129,6 +129,9 @@ top_builddir="$TOPBUILDDIR"
|
||||
# Currently not used, but left as a Visual Studio placeholder.
|
||||
# VS=Debug
|
||||
|
||||
# Set when using gdb
|
||||
#DL=".libs/"
|
||||
|
||||
# srcdir may or may not be defined, but if not, then create it
|
||||
if test "x$srcdir" = x ; then
|
||||
# we need to figure out our directory
|
||||
@ -169,11 +172,11 @@ fi
|
||||
|
||||
# We need to locate certain executables (and other things),
|
||||
# capture absolute paths, and make visible
|
||||
export NCDUMP="${top_builddir}/ncdump${VS}/ncdump${ext}"
|
||||
export NCCOPY="${top_builddir}/ncdump${VS}/nccopy${ext}"
|
||||
export NCGEN="${top_builddir}/ncgen${VS}/ncgen${ext}"
|
||||
export NCGEN3="${top_builddir}/ncgen3${VS}/ncgen3${ext}"
|
||||
export NCPATHCVT="${top_builddir}/ncdump${VS}/ncpathcvt${ext}"
|
||||
export NCDUMP="${top_builddir}/ncdump${VS}/${DL}ncdump${ext}"
|
||||
export NCCOPY="${top_builddir}/ncdump${VS}/${DL}nccopy${ext}"
|
||||
export NCGEN="${top_builddir}/ncgen${VS}/${DL}ncgen${ext}"
|
||||
export NCGEN3="${top_builddir}/ncgen3${VS}/${DL}ncgen3${ext}"
|
||||
export NCPATHCVT="${top_builddir}/ncdump${VS}/${DL}ncpathcvt${ext}"
|
||||
|
||||
# Temporary hacks (until we have a test_utils directory)
|
||||
# to locate certain specific test files
|
||||
|
@ -14,7 +14,7 @@ isolate "testdir_uts3sdk"
|
||||
# Create an isolation path for S3; build on the isolation directory
|
||||
S3ISODIR="$ISODIR"
|
||||
S3ISOPATH="/${S3TESTSUBTREE}"
|
||||
S3ISOPATH="${S3ISOPATH}/$S3ISODIR"
|
||||
S3ISOPATH="/${S3ISOPATH}/$S3ISODIR"
|
||||
|
||||
test_cleanup() {
|
||||
${CMD} ${execdir}/../nczarr_test/s3util -u "${URL}" -k "${S3ISOPATH}" clear
|
||||
|
Loading…
Reference in New Issue
Block a user