Replace ENABLE_S3 with NETCDF_ENABLE_S3

This commit is contained in:
Kyle Shores 2024-03-18 15:40:19 -05:00
parent 78fb3dcf37
commit 8fd6dcb979
31 changed files with 108 additions and 108 deletions

View File

@ -682,13 +682,13 @@ if(NOT WIN32)
endif()
# Options for S3 Support
option(ENABLE_S3 "Enable S3 support." OFF)
option(ENABLE_S3_INTERNAL "Enable S3 Internal support." OFF)
option(NETCDF_ENABLE_NCZARR_S3 "Enable NCZarr S3 support; Deprecated in favor of ENABLE_S3" OFF)
option(NETCDF_ENABLE_S3 "Enable S3 support." OFF)
option(NETCDF_ENABLE_S3_INTERNAL "Enable S3 Internal support." OFF)
option(NETCDF_ENABLE_NCZARR_S3 "Enable NCZarr S3 support; Deprecated in favor of NETCDF_ENABLE_S3" OFF)
if(NOT NETCDF_ENABLE_REMOTE_FUNCTIONALITY)
set(ENABLE_S3 OFF CACHE BOOL "" FORCE)
set(ENABLE_S3_INTERNAL OFF CACHE BOOL "" FORCE)
set(NETCDF_ENABLE_S3 OFF CACHE BOOL "" FORCE)
set(NETCDF_ENABLE_S3_INTERNAL OFF CACHE BOOL "" FORCE)
set(NETCDF_ENABLE_NCZARR_S3 OFF CACHE BOOL "" FORCE)
endif()
@ -703,34 +703,34 @@ if(WITH_S3_TESTING)
message(WARNING "**** DO NOT USE WITH_S3_TESTING=ON UNLESS YOU HAVE ACCESS TO THE UNIDATA S3 BUCKET! ***")
endif()
# NETCDF_ENABLE_NCZARR_S3 is now an alias for ENABLE_S3 (but...)
if (NOT ENABLE_S3 AND NETCDF_ENABLE_NCZARR_S3)
set(ENABLE_S3 ON CACHE BOOL "NCARR S3" FORCE) # For back compatibility
# NETCDF_ENABLE_NCZARR_S3 is now an alias for NETCDF_ENABLE_S3 (but...)
if (NOT NETCDF_ENABLE_S3 AND NETCDF_ENABLE_NCZARR_S3)
set(NETCDF_ENABLE_S3 ON CACHE BOOL "NCARR S3" FORCE) # For back compatibility
endif()
unset(NETCDF_ENABLE_NCZARR_S3)
if(NOT NETCDF_ENABLE_REMOTE_FUNCTIONALITY)
message(WARNING "NETCDF_ENABLE_REMOTE_FUNCTIONALITY=NO => disable all s3 functionality")
set(ENABLE_S3 OFF CACHE BOOL "" FORCE)
set(ENABLE_S3_INTERNAL OFF CACHE BOOL "" FORCE)
set(NETCDF_ENABLE_S3 OFF CACHE BOOL "" FORCE)
set(NETCDF_ENABLE_S3_INTERNAL OFF CACHE BOOL "" FORCE)
set(NETCDF_ENABLE_NCZARR_S3 OFF CACHE BOOL "" FORCE)
set(NETCDF_ENABLE_HDF5_ROS3 OFF CACHE BOOL "Use ROS3" FORCE)
set(WITH_S3_TESTING OFF CACHE STRING "" FORCE)
endif()
if(ENABLE_S3)
if(NOT ENABLE_S3_AWS AND NOT ENABLE_S3_INTERNAL)
message(FATAL_ERROR "S3 support library not found; please specify option -DENABLE_S3=NO")
set(ENABLE_S3 OFF CACHE BOOL "S3 support" FORCE)
if(NETCDF_ENABLE_S3)
if(NOT NETCDF_ENABLE_S3_AWS AND NOT NETCDF_ENABLE_S3_INTERNAL)
message(FATAL_ERROR "S3 support library not found; please specify option -DNETCDF_ENABLE_S3=NO")
set(NETCDF_ENABLE_S3 OFF CACHE BOOL "S3 support" FORCE)
endif()
if(ENABLE_S3_AWS AND ENABLE_S3_INTERNAL)
if(NETCDF_ENABLE_S3_AWS AND NETCDF_ENABLE_S3_INTERNAL)
message(WARNING "Both aws-sdk-cpp and s3-internal enabled => use s3-internal")
set(ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE)
set(NETCDF_ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE)
endif()
endif()
if(NOT ENABLE_S3)
if(NOT NETCDF_ENABLE_S3)
if(WITH_S3_TESTING STREQUAL "PUBLIC" OR WITH_S3_TESTING)
message(WARNING "S3 support is disabled => WITH_S3_TESTING=OFF")
set(WITH_S3_TESTING OFF CACHE STRING "" FORCE)
@ -1399,7 +1399,7 @@ if(NETCDF_ENABLE_DAP4)
add_subdirectory(libdap4)
add_subdirectory(libncxml)
else()
if(ENABLE_S3_INTERNAL)
if(NETCDF_ENABLE_S3_INTERNAL)
add_subdirectory(libncxml)
endif()
endif()
@ -1639,9 +1639,9 @@ is_enabled(ENABLE_ZERO_LENGTH_COORD_BOUND RELAX_COORD_BOUND)
is_enabled(USE_CDF5 HAS_CDF5)
is_enabled(NETCDF_ENABLE_ERANGE_FILL HAS_ERANGE_FILL)
is_enabled(HDF5_HAS_PAR_FILTERS HAS_PAR_FILTERS)
is_enabled(ENABLE_S3 HAS_S3)
is_enabled(ENABLE_S3_AWS HAS_S3_AWS)
is_enabled(ENABLE_S3_INTERNAL HAS_S3_INTERNAL)
is_enabled(NETCDF_ENABLE_S3 HAS_S3)
is_enabled(NETCDF_ENABLE_S3_AWS HAS_S3_AWS)
is_enabled(NETCDF_ENABLE_S3_INTERNAL HAS_S3_INTERNAL)
is_enabled(HAS_HDF5_ROS3 HAS_HDF5_ROS3)
is_enabled(NETCDF_ENABLE_NCZARR HAS_NCZARR)
is_enabled(NETCDF_ENABLE_NCZARR_ZIP HAS_NCZARR_ZIP)
@ -1656,10 +1656,10 @@ is_enabled(HAVE_BLOSC HAS_BLOSC)
is_enabled(HAVE_BZ2 HAS_BZ2)
is_enabled(NETCDF_ENABLE_REMOTE_FUNCTIONALITY DO_REMOTE_FUNCTIONALITY)
if(ENABLE_S3_INTERNAL)
if(NETCDF_ENABLE_S3_INTERNAL)
set(WHICH_S3_SDK "internal")
set(NC_WHICH_S3_SDK "internal")
elseif(ENABLE_S3_AWS)
elseif(NETCDF_ENABLE_S3_AWS)
set(WHICH_S3_SDK "aws-sdk-cpp")
set(NC_WHICH_S3_SDK "aws-sdk-cpp")
else()
@ -1668,14 +1668,14 @@ else()
endif()
if(WITH_S3_TESTING STREQUAL PUBLIC)
set(ENABLE_S3_TESTING "public")
set(NETCDF_ENABLE_S3_TESTING "public")
elseif(WITH_S3_TESTING)
set(ENABLE_S3_TESTING "yes")
set(ENABLE_S3_TESTALL "yes")
set(NETCDF_ENABLE_S3_TESTING "yes")
set(NETCDF_ENABLE_S3_TESTALL "yes")
elseif(NOT WITH_S3_TESTING)
set(ENABLE_S3_TESTING "no")
set(NETCDF_ENABLE_S3_TESTING "no")
else()
set(ENABLE_S3_TESTING "no")
set(NETCDF_ENABLE_S3_TESTING "no")
endif()
# The Unidata testing S3 bucket
@ -1687,7 +1687,7 @@ set(S3TESTBUCKET "unidata-zarr-test-data" CACHE STRING "S3 test bucket")
set(S3TESTSUBTREE "netcdf-c" CACHE STRING "Working S3 path.")
# Build a unique id based on the date
string(TIMESTAMP TESTUID "%s")
if(ENABLE_S3_TESTING)
if(NETCDF_ENABLE_S3_TESTING)
file(APPEND "${CMAKE_CURRENT_BINARY_DIR}/s3cleanup_${PLATFORMUID}.uids" "${TESTUID}\n")
endif()

View File

@ -88,9 +88,9 @@ NCDAP4TESTDIR = dap4_test
XML = libncxml
endif #DAP4
if ENABLE_S3_INTERNAL
if NETCDF_ENABLE_S3_INTERNAL
XML = libncxml # Internal S3 requires XML
endif #ENABLE_S3_INTERNAL
endif #NETCDF_ENABLE_S3_INTERNAL
# Build PnetCDF
if USE_PNETCDF
@ -210,7 +210,7 @@ install-data-hook:
# Also track the S3 cleanup id
all-local: liblib/libnetcdf.la
echo ${PACKAGE_VERSION} > VERSION
if ENABLE_S3_TESTALL
if NETCDF_ENABLE_S3_TESTALL
rm -f ${abs_top_builddir}/tmp_@PLATFORMUID@.uids
echo "@TESTUID@" >> ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids
cat ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids | sort | uniq > ${abs_top_builddir}/tmp_@PLATFORMUID@.uids
@ -218,7 +218,7 @@ if ENABLE_S3_TESTALL
mv ${abs_top_builddir}/tmp_@PLATFORMUID@.uids ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids
endif
if ENABLE_S3_TESTALL
if NETCDF_ENABLE_S3_TESTALL
distclean-local:
rm -f ${abs_top_builddir}/s3cleanup_@PLATFORMUID@.uids
endif

View File

@ -395,20 +395,20 @@ endif ()
# Note we check for the library after checking for enable_s3
# because for some reason this screws up if we unconditionally test for sdk
# and it is not available. Fix someday
if(ENABLE_S3)
if(NOT ENABLE_S3_INTERNAL)
if(NETCDF_ENABLE_S3)
if(NOT NETCDF_ENABLE_S3_INTERNAL)
# See if aws-s3-sdk is available
find_package(AWSSDK REQUIRED COMPONENTS s3;transfer)
if(AWSSDK_FOUND)
set(ENABLE_S3_AWS ON CACHE BOOL "S3 AWS" FORCE)
set(NETCDF_ENABLE_S3_AWS ON CACHE BOOL "S3 AWS" FORCE)
target_include_directories(netcdf
PRIVATE
${AWSSDK_INCLUDE_DIR}
)
else(AWSSDK_FOUND)
set(ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE)
set(NETCDF_ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE)
endif(AWSSDK_FOUND)
else(NOT ENABLE_S3_INTERNAL)
else(NOT NETCDF_ENABLE_S3_INTERNAL)
# Find crypto libraries required with testing with the internal s3 api.
#find_library(SSL_LIB NAMES ssl openssl)
find_package(OpenSSL REQUIRED)
@ -421,9 +421,9 @@ if(ENABLE_S3)
# message(FATAL_ERROR "Can't find a crypto library, required by S3_INTERNAL")
#endif(NOT CRYPTO_LIB)
endif(NOT ENABLE_S3_INTERNAL)
endif(NOT NETCDF_ENABLE_S3_INTERNAL)
else()
set(ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE)
set(NETCDF_ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE)
endif()
################################

View File

@ -158,13 +158,13 @@ are set when opening a binary file on Windows. */
#cmakedefine NETCDF_ENABLE_PLUGINS 1
/* if true, enable S3 support */
#cmakedefine ENABLE_S3 1
#cmakedefine NETCDF_ENABLE_S3 1
/* if true, AWS S3 SDK is available */
#cmakedefine ENABLE_S3_AWS 1
#cmakedefine NETCDF_ENABLE_S3_AWS 1
/* if true, Force use of S3 internal library */
#cmakedefine ENABLE_S3_INTERNAL 1
#cmakedefine NETCDF_ENABLE_S3_INTERNAL 1
/* if true, enable S3 testing*/
#cmakedefine WITH_S3_TESTING "PUBLIC"

View File

@ -969,23 +969,23 @@ else
fi
if test "x$enable_s3" = xyes ; then
AC_DEFINE([ENABLE_S3], [1], [if true, build netcdf-c with S3 support enabled])
AC_DEFINE([NETCDF_ENABLE_S3], [1], [if true, build netcdf-c with S3 support enabled])
fi
if test "x$enable_s3_aws" = xyes ; then
LIBS="$LIBS$S3LIBS"
AC_DEFINE([ENABLE_S3_AWS], [1], [If true, then use aws S3 library])
AC_DEFINE([NETCDF_ENABLE_S3_AWS], [1], [If true, then use aws S3 library])
fi
if test "x$enable_s3_internal" = xyes ; then
AC_DEFINE([ENABLE_S3_INTERNAL], [1], [If true, then use internal S3 library])
AC_DEFINE([NETCDF_ENABLE_S3_INTERNAL], [1], [If true, then use internal S3 library])
fi
AC_DEFINE_UNQUOTED([WITH_S3_TESTING], [$with_s3_testing], [control S3 testing.])
if test "x$with_s3_testing" = xyes ; then
AC_MSG_WARN([*** DO NOT SPECIFY WITH_S3_TESTING=YES UNLESS YOU HAVE ACCESS TO THE UNIDATA S3 BUCKET! ***])
AC_DEFINE([ENABLE_S3_TESTALL], [yes], [control S3 testing.])
AC_DEFINE([NETCDF_ENABLE_S3_TESTALL], [yes], [control S3 testing.])
fi
fi
@ -1930,12 +1930,12 @@ AM_CONDITIONAL(NETCDF_ENABLE_BYTERANGE, [test "x$enable_byterange" = xyes])
AM_CONDITIONAL(RELAX_COORD_BOUND, [test "xyes" = xyes])
AM_CONDITIONAL(HAS_PAR_FILTERS, [test x$hdf5_supports_par_filters = xyes ])
# We need to simplify the set of S3 and Zarr flag combinations
AM_CONDITIONAL(ENABLE_S3, [test "x$enable_s3" = xyes])
AM_CONDITIONAL(ENABLE_S3_AWS, [test "x$enable_s3_aws" = xyes])
AM_CONDITIONAL(ENABLE_S3_INTERNAL, [test "x$enable_s3_internal" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_S3, [test "x$enable_s3" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_S3_AWS, [test "x$enable_s3_aws" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_S3_INTERNAL, [test "x$enable_s3_internal" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_NCZARR, [test "x$enable_nczarr" = xyes])
AM_CONDITIONAL(ENABLE_S3_TESTPUB, [test "x$with_s3_testing" != xno]) # all => public
AM_CONDITIONAL(ENABLE_S3_TESTALL, [test "x$with_s3_testing" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_S3_TESTPUB, [test "x$with_s3_testing" != xno]) # all => public
AM_CONDITIONAL(NETCDF_ENABLE_S3_TESTALL, [test "x$with_s3_testing" = xyes])
AM_CONDITIONAL(NETCDF_ENABLE_NCZARR_ZIP, [test "x$enable_nczarr_zip" = xyes])
AM_CONDITIONAL(HAVE_DEFLATE, [test "x$have_deflate" = xyes])
AM_CONDITIONAL(HAVE_SZ, [test "x$have_sz" = xyes])
@ -2045,7 +2045,7 @@ AC_SUBST(HAS_S3_AWS,[$enable_s3_aws])
AC_SUBST(HAS_S3_INTERNAL,[$enable_s3_internal])
AC_SUBST(HAS_HDF5_ROS3,[$has_hdf5_ros3])
AC_SUBST(HAS_NCZARR,[$enable_nczarr])
AC_SUBST(ENABLE_S3_TESTING,[$with_s3_testing])
AC_SUBST(NETCDF_ENABLE_S3_TESTING,[$with_s3_testing])
AC_SUBST(HAS_NCZARR_ZIP,[$enable_nczarr_zip])
AC_SUBST(DO_NCZARR_ZIP_TESTS,[$enable_nczarr_zip])
AC_SUBST(HAS_QUANTIZE,[$enable_quantize])

View File

@ -119,8 +119,8 @@ Note also that if S3 support is enabled, then you need to have a C++ compiler in
The necessary CMake flags are as follows (with defaults)
1. *-DENABLE_S3* -- Controll S3 support
2. *-DENABLE_S3_INTERNAL* -- Force use of the *nch5s3comms* SDK instead of the *aws-cpp-sdk*.
1. *-DNETCDF_ENABLE_S3* -- Controll S3 support
2. *-DNETCDF_ENABLE_S3_INTERNAL* -- Force use of the *nch5s3comms* SDK instead of the *aws-cpp-sdk*.
3. *-DWITH-S3-TESTING_=ON|OFF|PUBLIC -- "ON" means do all S3 tests, "OFF" means do no S3 testing, "PUBLIC" means do only those tests that involve publically accessible S3 data.
Note that unlike Automake, CMake can properly locate C++ libraries, so it should not be necessary to specify _-laws-cpp-sdk-s3_ assuming that the aws s3 libraries are installed in the default location.
@ -267,7 +267,7 @@ This is an experimental SDK provided internally in the netcdf-c library.
### Build Options
In order to enable this SDK, the Automake option *--enable-s3-internal* or the CMake option *-DENABLE_S3_INTERNAL=ON* must be specified.
In order to enable this SDK, the Automake option *--enable-s3-internal* or the CMake option *-DNETCDF_ENABLE_S3_INTERNAL=ON* must be specified.
### Testing S3 Support {#nccloud_testing_S3_support}

View File

@ -711,7 +711,7 @@ and specific script files.
The actual cleanup requires different approaches for cmake and for automake.
In cmake, the CTestCustom.cmake mechanism is used and contains the following command:
````
IF(ENABLE_S3_TESTING)
IF(NETCDF_ENABLE_S3_TESTING)
# Assume run in top-level CMAKE_BINARY_DIR
set(CTEST_CUSTOM_POST_TEST "bash -x ${CMAKE_BINARY_DIR}/s3cleanup.sh")
ENDIF()
@ -722,7 +722,7 @@ because it is invoked after all tests are run in the nczarr_test
directory. So nczarr_test/Makefile.am contains the following
equivalent code:
````
if ENABLE_S3_TESTALL
if NETCDF_ENABLE_S3_TESTALL
check-local:
bash -x ${top_srcdir}/s3cleanup.sh
endif

View File

@ -527,7 +527,7 @@ also test S3 support with this option.
Enabling S3 support is controlled by this cmake option:
````
-DENABLE_S3=ON
-DNETCDF_ENABLE_S3=ON
````
However, to find the aws sdk libraries,
the following environment variables must be set:

View File

@ -65,7 +65,7 @@ typedef struct NC_HDF5_FILE_INFO {
#if defined(NETCDF_ENABLE_BYTERANGE)
int byterange;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
struct NCauth* auth;
#endif
} NC_HDF5_FILE_INFO_T;

View File

@ -26,7 +26,7 @@ typedef struct NC_HTTP_STATE {
struct NCURI* url; /* parsed url */
long httpcode;
char* errmsg; /* do not free if format is HTTPCURL */
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
struct NC_HTTP_S3 {
void* s3client;
struct NCS3INFO* info;

View File

@ -17,7 +17,7 @@ if NETCDF_ENABLE_NCZARR
AM_CPPFLAGS += -I${top_srcdir}/libnczarr
endif
if ENABLE_S3_AWS
if NETCDF_ENABLE_S3_AWS
AM_LDFLAGS += -lstdc++
endif

View File

@ -50,8 +50,8 @@ if(NETCDF_ENABLE_BYTERANGE)
)
ENDIF(NETCDF_ENABLE_BYTERANGE)
IF(ENABLE_S3)
if(ENABLE_S3_INTERNAL)
IF(NETCDF_ENABLE_S3)
if(NETCDF_ENABLE_S3_INTERNAL)
target_sources(dispatch
PRIVATE
ncs3sdk_h5.c nch5s3comms.c nch5s3comms.h nccurl_sha256.c nccurl_sha256.h nccurl_hmac.c nccurl_hmac.h nccurl_setup.h
@ -88,8 +88,8 @@ IF(NETCDF_ENABLE_NCZARR)
target_include_directories(dispatch PUBLIC ../libnczarr)
endif(NETCDF_ENABLE_NCZARR)
if(ENABLE_S3)
if(ENABLE_S3_AWS)
if(NETCDF_ENABLE_S3)
if(NETCDF_ENABLE_S3_AWS)
target_include_directories(dispatch PUBLIC ${AWSSDK_INCLUDE_DIRS})
if(NOT MSVC)
target_compile_features(dispatch PUBLIC cxx_std_11)

View File

@ -47,8 +47,8 @@ if NETCDF_ENABLE_BYTERANGE
libdispatch_la_SOURCES += dhttp.c
endif # NETCDF_ENABLE_BYTERANGE
if ENABLE_S3
if ENABLE_S3_INTERNAL
if NETCDF_ENABLE_S3
if NETCDF_ENABLE_S3_INTERNAL
# Renamed to avoid conflicts with the HDF5 files
libdispatch_la_SOURCES += ncs3sdk_h5.c nch5s3comms.c nch5s3comms.h ncutil.h nccurl_setup.h \
nccurl_sha256.c nccurl_sha256.h nccurl_hmac.c nccurl_hmac.h

View File

@ -28,7 +28,7 @@ See LICENSE.txt for license information.
#include <curl/curl.h>
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
#include "ncs3sdk.h"
#endif

View File

@ -26,7 +26,7 @@
#include "ncuri.h"
#include "ncauth.h"
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
#include "ncs3sdk.h"
#endif
#include "nchttp.h"
@ -100,7 +100,7 @@ nc_http_open_verbose(const char* path, int verbose, NC_HTTP_STATE** statep)
{stat = NCTHROW(NC_ENOMEM); goto done;}
state->path = strdup(path);
state->url = uri; uri = NULL;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
state->format = (NC_iss3(state->url,NULL)?HTTPS3:HTTPCURL);
#else
state->format = HTTPCURL;
@ -122,7 +122,7 @@ nc_http_open_verbose(const char* path, int verbose, NC_HTTP_STATE** statep)
if(cstat != CURLE_OK) {stat = NCTHROW(NC_ECURL); goto done;}
}
} break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case HTTPS3: {
if((state->s3.info = (NCS3INFO*)calloc(1,sizeof(NCS3INFO)))==NULL)
{stat = NCTHROW(NC_ENOMEM); goto done;}
@ -158,7 +158,7 @@ nc_http_close(NC_HTTP_STATE* state)
ncbytesfree(state->curl.response.buf);
nclistfreeall(state->curl.request.headers); state->curl.request.headers = NULL;
break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case HTTPS3: {
if(state->s3.s3client)
NC_s3sdkclose(state->s3.s3client, state->s3.info, 0, NULL);
@ -203,7 +203,7 @@ nc_http_reset(NC_HTTP_STATE* state)
(void)CURLERR(curl_easy_setopt(state->curl.curl, CURLOPT_READDATA, NULL));
headersoff(state);
break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case HTTPS3:
break; /* Done automatically */
#endif
@ -250,7 +250,7 @@ nc_http_read(NC_HTTP_STATE* state, size64_t start, size64_t count, NCbytes* buf)
if((stat = execute(state)))
goto done;
break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case HTTPS3: {
/* Make sure buf has enough space allocated */
ncbytessetalloc(buf,count);
@ -301,7 +301,7 @@ nc_http_write(NC_HTTP_STATE* state, NCbytes* payload)
if((stat = execute(state)))
goto done;
break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case HTTPS3:
if((stat = NC_s3sdkwriteobject(state->s3.s3client,
state->s3.info->bucket,
@ -357,7 +357,7 @@ nc_http_size(NC_HTTP_STATE* state, long long* sizep)
if((stat = lookupheader(state,"content-length",&hdr))==NC_NOERR)
sscanf(hdr,"%llu",sizep);
break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case HTTPS3: {
size64_t len = 0;
if((stat = NC_s3sdkinfo(state->s3.s3client,state->s3.info->bucket,state->s3.info->rootkey,&len,&state->errmsg))) goto done;

View File

@ -27,7 +27,7 @@
#include "nclog.h"
#include "ncrc.h"
#include "nchttp.h"
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
#include "ncs3sdk.h"
#endif
@ -61,7 +61,7 @@ struct MagicFile {
#endif
char* curlurl; /* url to use with CURLOPT_SET_URL */
NC_HTTP_STATE* state;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
NCS3INFO s3;
void* s3client;
char* errmsg;
@ -902,7 +902,7 @@ NC_infermodel(const char* path, int* omodep, int iscreate, int useparallel, void
ncurisetfragments(uri,sfrag);
nullfree(sfrag); sfrag = NULL;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
/* If s3, then rebuild the url */
if(NC_iss3(uri,NULL)) {
NCURI* newuri = NULL;

View File

@ -221,7 +221,7 @@ nc4_close_netcdf4_file(NC_FILE_INFO_T *h5, int abort, NC_memio *memio)
NC4_clear_provenance(&h5->provenance);
ncurifree(hdf5_info->uri);
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
/* Free the http info */
NC_authfree(hdf5_info->auth);
#endif

View File

@ -50,7 +50,7 @@ if(NETCDF_ENABLE_NCZARR_ZIP)
set(libnczarr_SOURCES ${libnczarr_SOURCES} zmap_zip.c)
endif()
if(ENABLE_S3)
if(NETCDF_ENABLE_S3)
set(libnczarr_SOURCES ${libnczarr_SOURCES} zmap_s3sdk.c)
endif()

View File

@ -72,9 +72,9 @@ if NETCDF_ENABLE_NCZARR_FILTERS
libnczarr_la_SOURCES += zfilter.c
endif
if ENABLE_S3
if NETCDF_ENABLE_S3
libnczarr_la_SOURCES += zmap_s3sdk.c
if ENABLE_S3_AWS
if NETCDF_ENABLE_S3_AWS
AM_CXXFLAGS += -std=c++11
endif
endif

View File

@ -85,7 +85,7 @@ NCZ_finalize_internal(void)
#ifdef NETCDF_ENABLE_NCZARR_FILTERS
NCZ_filter_finalize();
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
NCZ_s3finalize();
#endif
return NC_NOERR;

View File

@ -22,7 +22,7 @@ nczmap_features(NCZM_IMPL impl)
#ifdef NETCDF_ENABLE_NCZARR_ZIP
case NCZM_ZIP: return zmap_zip.features;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case NCZM_S3: return zmap_s3sdk.features;
#endif
default: break;
@ -58,7 +58,7 @@ nczmap_create(NCZM_IMPL impl, const char *path, int mode, size64_t flags, void*
if(stat) goto done;
break;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case NCZM_S3:
stat = zmap_s3sdk.create(path, mode, flags, parameters, &map);
if(stat) goto done;
@ -96,7 +96,7 @@ nczmap_open(NCZM_IMPL impl, const char *path, int mode, size64_t flags, void* pa
if(stat) goto done;
break;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case NCZM_S3:
stat = zmap_s3sdk.open(path, mode, flags, parameters, &map);
if(stat) goto done;
@ -127,7 +127,7 @@ nczmap_truncate(NCZM_IMPL impl, const char *path)
if((stat = zmap_zip.truncate(path))) goto done;
break;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case NCZM_S3:
if((stat = zmap_s3sdk.truncate(path))) goto done;
break;

View File

@ -220,7 +220,7 @@ extern NCZMAP_DS_API zmap_nz4;
#ifdef NETCDF_ENABLE_NCZARR_ZIP
extern NCZMAP_DS_API zmap_zip;
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
extern NCZMAP_DS_API zmap_s3sdk;
#endif
@ -323,7 +323,7 @@ EXTERNL int nczmap_close(NCZMAP* map, int deleteit);
EXTERNL int nczmap_create(NCZM_IMPL impl, const char *path, int mode, size64_t constraints, void* parameters, NCZMAP** mapp);
EXTERNL int nczmap_open(NCZM_IMPL impl, const char *path, int mode, size64_t constraints, void* parameters, NCZMAP** mapp);
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
EXTERNL void NCZ_s3finalize(void);
#endif

View File

@ -42,9 +42,9 @@ ENDif (USE_FFIO)
if (NETCDF_ENABLE_BYTERANGE)
list(APPEND libsrc_SOURCES httpio.c)
if (ENABLE_S3)
if (NETCDF_ENABLE_S3)
list(APPEND libsrc_SOURCES s3io.c)
endif(ENABLE_S3)
endif(NETCDF_ENABLE_S3)
endif(NETCDF_ENABLE_BYTERANGE)
add_library(netcdf3 OBJECT ${libsrc_SOURCES})

View File

@ -32,7 +32,7 @@ endif !USE_FFIO
if NETCDF_ENABLE_BYTERANGE
libnetcdf3_la_SOURCES += httpio.c
if ENABLE_S3
if NETCDF_ENABLE_S3
libnetcdf3_la_SOURCES += s3io.c
endif

View File

@ -41,7 +41,7 @@ extern int ffio_open(const char*,int,off_t,size_t,size_t*,void*,ncio**,void** co
extern int httpio_open(const char*,int,off_t,size_t,size_t*,void*,ncio**,void** const);
#endif
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
extern int s3io_open(const char*,int,off_t,size_t,size_t*,void*,ncio**,void** const);
#endif
@ -107,7 +107,7 @@ ncio_open(const char *path, int ioflags,
if(modetest == NC_HTTP) {
return httpio_open(path,ioflags,igeto,igetsz,sizehintp,parameters,iopp,mempp);
}
# ifdef ENABLE_S3
# ifdef NETCDF_ENABLE_S3
if(modetest == NC_S3SDK) {
return s3io_open(path,ioflags,igeto,igetsz,sizehintp,parameters,iopp,mempp);
}

View File

@ -137,11 +137,11 @@ IF(ENABLE_TESTS)
TARGET_INCLUDE_DIRECTORIES(zs3parse PUBLIC ../libnczarr)
build_bin_test_with_util_lib(zmapio ut_util)
IF(ENABLE_S3 AND NOT WITH_S3_TESTING STREQUAL "NO")
IF(NETCDF_ENABLE_S3 AND NOT WITH_S3_TESTING STREQUAL "NO")
# Helper programs for testing
BUILD_BIN_TEST(s3util ${COMMONSRC})
# Pure AWS Test
IF(ENABLE_S3_AWS)
IF(NETCDF_ENABLE_S3_AWS)
SET(TMP_CMAKE_CXX_STANDARD ${MAKE_CXX_STANDARD})
SET(CMAKE_CXX_STANDARD 11)
ADD_EXECUTABLE(tst_pure_awssdk tst_pure_awssdk.cpp)
@ -159,7 +159,7 @@ IF(ENABLE_TESTS)
ENDIF(MSVC)
SET(CMAKE_CXX_STANDARD ${TMP_CMAKE_CXX_STANDARD})
ENDIF(ENABLE_S3_AWS)
ENDIF(NETCDF_ENABLE_S3_AWS)
TARGET_INCLUDE_DIRECTORIES(s3util PUBLIC ../libnczarr)
endif()

View File

@ -179,11 +179,11 @@ zmapio_SOURCES = zmapio.c
noinst_PROGRAMS += zs3parse
zs3parse_SOURCES = zs3parse.c
if ENABLE_S3
if NETCDF_ENABLE_S3
noinst_PROGRAMS += s3util
s3util_SOURCES = s3util.c
if ENABLE_S3_TESTALL
if ENABLE_S3_AWS
if NETCDF_ENABLE_S3_TESTALL
if NETCDF_ENABLE_S3_AWS
check_PROGRAMS += tst_pure_awssdk
tst_pure_awssdk_SOURCES = tst_pure_awssdk.cpp
AM_CXXFLAGS += -std=c++11
@ -344,7 +344,7 @@ clean-local:
rm -fr tmp_*.nc tmp_*.zarr tst_quantize*.zarr tmp*.file results.file results.s3 results.zip
rm -fr rcmiscdir ref_power_901_constants.file
if ENABLE_S3_TESTALL
if NETCDF_ENABLE_S3_TESTALL
check-local:
bash ${abs_top_builddir}/s3cleanup.sh
endif

View File

@ -289,7 +289,7 @@ rootpathfor(const char* path)
case NCZM_ZIP:
rootpath = strdup("/"); /*constant*/
break;
#ifdef ENABLE_S3
#ifdef NETCDF_ENABLE_S3
case NCZM_S3: {
char* p = NULL;
/* Split the path part */

View File

@ -40,7 +40,7 @@ FEATURE_S3_AWS=@HAS_S3_AWS@
FEATURE_S3_INTERNAL=@HAS_S3_INTERNAL@
FEATURE_S3=@HAS_S3@
FEATURE_NCZARR=@HAS_NCZARR@
FEATURE_S3TESTS=@ENABLE_S3_TESTING@
FEATURE_S3TESTS=@NETCDF_ENABLE_S3_TESTING@
FEATURE_NCZARR_ZIP=@DO_NCZARR_ZIP_TESTS@
FEATURE_LARGE_TESTS=@DO_LARGE_TESTS@

View File

@ -32,7 +32,7 @@ ENDIF(NETCDF_ENABLE_HDF5)
add_bin_test(unit_test test_pathcvt)
IF(NETCDF_BUILD_UTILITIES)
IF(ENABLE_S3 AND WITH_S3_TESTING)
IF(NETCDF_ENABLE_S3 AND WITH_S3_TESTING)
# SDK Test
build_bin_test(test_s3sdk ${XGETOPTSRC})
add_sh_test(unit_test run_s3sdk)

View File

@ -38,8 +38,8 @@ TESTS += tst_nc4internal
TESTS += run_reclaim_tests.sh
endif # USE_HDF5
if ENABLE_S3
if ENABLE_S3_TESTALL
if NETCDF_ENABLE_S3
if NETCDF_ENABLE_S3_TESTALL
check_PROGRAMS += test_s3sdk
TESTS += run_s3sdk.sh
endif