Merge remote-tracking branch 'upstream/main' into DAOS_sync

This commit is contained in:
Ward Fisher 2024-06-25 15:24:04 -06:00
commit 787ea1f438
18 changed files with 131 additions and 95 deletions

View File

@ -226,6 +226,12 @@ include(GenerateExportHeader)
# Compiler and Linker Configuration
################################
# Set in support of https://github.com/Unidata/netcdf-c/issues/2700
if(${CMAKE_C_COMPILER_ID} MATCHES "Intel")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fhonor-infinities")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fhonor-infinities")
endif()
option(NETCDF_FIND_SHARED_LIBS "Find dynamically-built versions of dependent libraries" ${BUILD_SHARED_LIBS})
##
@ -488,12 +494,16 @@ endif()
# Turn off enable_netcdf4 because it will be used
# as a shorthand for ENABLE_HDF5|ENABLE_HDF4|ENABLE_NCZARR
set(NETCDF_ENABLE_NETCDF4 OFF CACHE BOOL "" FORCE)
option(NETCDF_ENABLE_DAP "Enable DAP2 and DAP4 Client." ON)
option(NETCDF_ENABLE_NCZARR "Enable NCZarr Client." ON)
option(NETCDF_ENABLE_PNETCDF "Build with parallel I/O for CDF-1, 2, and 5 files using PnetCDF." OFF)
set(NETCDF_ENABLE_CDF5 AUTO CACHE STRING "AUTO")
option(NETCDF_ENABLE_CDF5 "Enable CDF5 support" ON)
option(NETCDF_ENABLE_HDF4 "Enable HDF4 Read Support" OFF)
option(NETCDF_ENABLE_HDF4_FILE_TESTS "Enable HDF4 File Tests" ${NETCDF_ENABLE_HDF4})
if(NETCDF_ENABLE_HDF4)
set(USE_HDF4 ON)
endif()
# Netcdf-4 support (i.e. libsrc4) is required by more than just HDF5 (e.g. NCZarr)
# So depending on what above formats are enabled, enable netcdf-4
@ -532,22 +542,15 @@ else()
set(NETCDF_ENABLE_HDF4 OFF)
endif()
# Option Logging, only valid for netcdf4.
# Option Logging, only valid for netcdf4 dispatchers.
option(NETCDF_ENABLE_LOGGING "Enable Logging." OFF)
if(NOT NETCDF_ENABLE_NETCDF4)
set(NETCDF_ENABLE_LOGGING OFF)
endif()
if(NETCDF_ENABLE_LOGGING)
target_compile_definitions(netcdf PRIVATE LOGGING NETCDF_ENABLE_SET_LOG_LEVEL)
set(LOGGING ON)
set(NETCDF_ENABLE_SET_LOG_LEVEL ON)
endif()
option(NETCDF_ENABLE_SET_LOG_LEVEL_FUNC "Enable definition of nc_set_log_level()." ON)
if(NETCDF_ENABLE_NETCDF4 AND NOT NETCDF_ENABLE_LOGGING AND NETCDF_ENABLE_SET_LOG_LEVEL_FUNC)
target_compile_definitions(netcdf PRIVATE -DNETCDF_ENABLE_SET_LOG_LEVEL)
set(NETCDF_ENABLE_SET_LOG_LEVEL ON)
set(NETCDF_ENABLE_LOGGING OFF)
endif()
set(LOGGING ${NETCDF_ENABLE_LOGGING})
set(NETCDF_ENABLE_SET_LOG_LEVEL ${NETCDF_ENABLE_LOGGING})
# Option to allow for strict null file padding.
# See https://github.com/Unidata/netcdf-c/issues/657 for more information
option(NETCDF_ENABLE_STRICT_NULL_BYTE_HEADER_PADDING "Enable strict null byte header padding." OFF)
@ -1427,7 +1430,6 @@ endif(USE_HDF5)
if(USE_HDF4)
add_subdirectory(libhdf4)
add_subdirectory(hdf4_test)
endif(USE_HDF4)
if(NETCDF_ENABLE_DAP2)
@ -1466,6 +1468,8 @@ if(NETCDF_ENABLE_NCZARR)
DESTINATION ${netCDF_BINARY_DIR}/nczarr_test/)
endif()
# Tests and files which depend on libnetcdf must be included
# *after* this line.
add_subdirectory(liblib)
if(NETCDF_ENABLE_PLUGINS)
@ -1486,6 +1490,9 @@ if(NETCDF_ENABLE_TESTS)
add_subdirectory(nctest)
endif()
add_subdirectory(nc_test)
if(USE_HDF4)
add_subdirectory(hdf4_test)
endif()
if(USE_HDF5)
include_directories(h5_test)
add_subdirectory(nc_test4)
@ -1664,6 +1671,7 @@ is_enabled(BUILD_SHARED_LIBS enable_shared)
is_enabled(NETCDF_ENABLE_V2_API HAS_NC2)
is_enabled(NETCDF_ENABLE_NETCDF4 HAS_NC4)
is_enabled(NETCDF_ENABLE_HDF4 HAS_HDF4)
is_enabled(USE_HDF4 HAS_HDF4)
is_enabled(USE_HDF5 HAS_HDF5)
is_enabled(OFF HAS_BENCHMARKS)
is_enabled(STATUS_PNETCDF HAS_PNETCDF)

View File

@ -7,6 +7,8 @@ This file contains a high-level description of this package's evolution. Release
## 4.9.3 - TBD
* Cleanup the option code for NETCDF_ENABLE_SET_LOG_LEVEL\[_FUNC\] See [Github #2931](https://github.com/Unidata/netcdf-c/issues/2931) for more information.
* Fix duplicate definition when using aws-sdk-cpp. See [Github #2928](https://github.com/Unidata/netcdf-c/issues/2928) for more information.
* Cleanup various obsolete options and do some code refactoring. See [Github #2926](https://github.com/Unidata/netcdf-c/issues/2926) for more information.
* Convert the Zarr-related ENABLE_XXX options to NETCDF_ENABLE_XXX options (part of the cmake overhaul). See [Github #2923](https://github.com/Unidata/netcdf-c/issues/2923) for more information.
* Refactor macro `_FillValue` to `NC_FillValue` to avoid conflict with libc++ headers. See [Github #2858](https://github.com/Unidata/netcdf-c/issues/2858) for more information.

View File

@ -14,8 +14,8 @@ find_package(MakeDist)
################################
# HDF4
################################
if(NETCDF_ENABLE_HDF4)
set(USE_HDF4 ON )
if(USE_HDF4)
set(NETCDF_USE_HDF4 ON )
# Check for include files, libraries.
find_path(MFHDF_H_INCLUDE_DIR mfhdf.h)
@ -65,11 +65,17 @@ if(NETCDF_ENABLE_HDF4)
if(NOT JPEG_LIB)
message(FATAL_ERROR "HDF4 Support enabled but cannot find libjpeg")
endif()
set(HDF4_LIBRARIES ${JPEG_LIB} ${HDF4_LIBRARIES} )
set(HDF4_LIBRARIES ${JPEG_LIB} ${HDF4_LIBRARIES} CACHE STRING "")
message(STATUS "Found JPEG libraries: ${JPEG_LIB}")
target_link_libraries(netcdf
PRIVATE
${HDF4_LIBRARIES}
)
# Option to enable HDF4 file tests.
option(NETCDF_ENABLE_HDF4_FILE_TESTS "Run HDF4 file tests. This fetches sample HDF4 files from the Unidata resources site to test with (requires curl)." ON)
#option(NETCDF_ENABLE_HDF4_FILE_TESTS "Run HDF4 file tests. This fetches sample HDF4 files from the Unidata resources site to test with (requires curl)." ON)
if(NETCDF_ENABLE_HDF4_FILE_TESTS)
find_program(PROG_CURL NAMES curl)
if(PROG_CURL)
@ -77,10 +83,11 @@ if(NETCDF_ENABLE_HDF4)
else()
message(STATUS "Unable to locate 'curl'. Disabling hdf4 file tests.")
set(USE_HDF4_FILE_TESTS OFF )
set(NETCDF_ENABLE_HDF4_FILE_TESTS OFF)
endif()
set(USE_HDF4_FILE_TESTS ${USE_HDF4_FILE_TESTS} )
endif()
endif()
endif(USE_HDF4)
################################
# HDF5

View File

@ -25,13 +25,13 @@ message(STATUS "Checking for Deprecated Options")
list(APPEND opts BUILD_UTILITIES ENABLE_BENCHMARKS ENABLE_BYTERANGE ENABLE_CDF5 ENABLE_CONVERSION_WARNINGS)
list(APPEND opts ENABLE_DAP ENABLE_DAP2 ENABLE_DAP4 ENABLE_DISKLESS ENABLE_DOXYGEN ENABLE_ERANGE_FILL)
list(APPEND opts ENABLE_EXAMPLES ENABLE_EXAMPLES_TESTS ENABLE_EXTREME_NUMBERS ENABLE_FFIO ENABLE_FILTER_BLOSC)
list(APPEND opts ENABLEFILTER_BZ2 ENABLE_FILTER_SZIP ENABLE_FILTER_TESTING ENABLE_FILTER_ZSTD ENABLE_FSYNC)
list(APPEND opts ENABLE_FILTER_BZ2 ENABLE_FILTER_SZIP ENABLE_FILTER_TESTING ENABLE_FILTER_ZSTD ENABLE_FSYNC)
list(APPEND opts ENABLE_HDF4 ENABLE_HDF5 ENABLE_LARGE_FILE_SUPPORT ENABLE_LARGE_FILE_TESTS ENABLE_LIBXML2)
list(APPEND opts ENABLE_LOGGING ENABLE_METADATA_PERF_TESTS ENABLE_MMAP ENABLE_NCZARR ENABLE_NCZARR_FILTERS)
list(APPEND opts ENABLE_NCZARR_S3 ENABLE_NCZARR_ZIP ENABLE_NETCDF_4 ENABLE_PARALLEL4 ENABLE_PARALLEL_TESTS)
list(APPEND opts ENABLE_PLUGINS ENABLE_PNETCDF ENABLE_QUANTIZE ENABLE_REMOTE_FUNCTIONALITY ENABLE_S3 ENABLE_S3_AWS)
list(APPEND opts ENABLE_S3_INTERNAL ENABLE_STDIO ENABLE_STRICT_NULL_BYTE_HEADER_PADDING ENABLE_TESTS ENABLE_UNIT_TESTS)
list(APPEND opts FIND_SHARED_LIBS LIB_NAME)
list(APPEND opts FIND_SHARED_LIBS LIB_NAME ENABLE_HDF4_FILE_TESTS)
foreach(opt ${opts})
#MESSAGE(STATUS "Option: ${opt}")

View File

@ -148,6 +148,7 @@ macro(build_bin_test F)
add_executable(${F} "${CMAKE_CURRENT_BINARY_DIR}/${F}.c" ${ARGN})
endif()
target_link_libraries(${F} netcdf ${ALL_TLL_LIBS})
if(MSVC)
set_target_properties(${F}
PROPERTIES
@ -176,6 +177,8 @@ endmacro()
# Binary tests which are used by a script looking for a specific name.
macro(build_bin_test_no_prefix F)
build_bin_test(${F})
if(WIN32)
#SET_PROPERTY(TEST ${F} PROPERTY FOLDER "tests/")
set_target_properties(${F} PROPERTIES

View File

@ -475,10 +475,8 @@ with zip */
#cmakedefine LOGGING 1
/* If true, define nc_set_log_level. */
#cmakedefine ENABLE_SET_LOG_LEVEL 1
/* If true, define nc_set_log_level_func */
#cmakedefine NETCDF_ENABLE_SET_LOG_LEVEL_FUNC 1
#cmakedefine NETCDF_ENABLE_LOGGING 1
#cmakedefine NETCDF_ENABLE_SET_LOG_LEVEL 1
/* min blocksize for posixio. */
#cmakedefine NCIO_MINBLOCKSIZE ${NCIO_MINBLOCKSIZE}

View File

@ -450,17 +450,15 @@ AC_ARG_ENABLE([logging],
Ignored if netCDF-4 is not enabled.])])
test "x$enable_logging" = xyes || enable_logging=no
AC_MSG_RESULT([$enable_logging])
# Does the user want to turn off nc_set_log_level() function? (It will
# always be defined if --enable-logging is used.)
AC_MSG_CHECKING([whether nc_set_log_level() function is included (will do nothing unless enable-logging is also used)])
AC_ARG_ENABLE([set_log_level_func], [AS_HELP_STRING([--disable-set-log-level-func],
[disable the nc_set_log_level function])])
test "x$enable_set_log_level_func" = xno -a "x$enable_logging" = xno || enable_set_log_level_func=yes
if test "x$enable_set_log_level_func" = xyes -a "x$enable_hdf5" = xyes; then
AC_DEFINE([ENABLE_SET_LOG_LEVEL], 1, [If true, define nc_set_log_level.])
if test "x$enable_logging" = xyes; then
enable_set_log_level_func=yes
enable_set_log_level=yes
AC_DEFINE([NETCDF_ENABLE_SET_LOG_LEVEL], 1, [If true, enable nc_set_log_level function.])
else
enable_set_log_level_func=no
enable_set_log_level=no
fi
AC_MSG_RESULT($enable_set_log_level_func)
# CURLOPT_USERNAME is not defined until curl version 7.19.1
# CURLOPT_PASSWORD is not defined until curl version 7.19.1

View File

@ -6,18 +6,20 @@
# See netcdf-c/COPYRIGHT file for more info.
# Copy some test files from current source dir to out-of-tree build dir.
FILE(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.sh ${CMAKE_CURRENT_SOURCE_DIR}/*.hdf4)
FILE(COPY ${COPY_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
IF(WIN32)
FILE(COPY ${COPY_FILES} DESTINATION ${RUNTIME_OUTPUT_DIRECTORY}/)
ENDIF()
file(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.sh ${CMAKE_CURRENT_SOURCE_DIR}/*.hdf4)
file(COPY ${COPY_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
if(WIN32)
file(COPY ${COPY_FILES} DESTINATION ${RUNTIME_OUTPUT_DIRECTORY}/)
endif()
IF(USE_HDF4_FILE_TESTS AND NOT WIN32)
if(USE_HDF4_FILE_TESTS AND NOT WIN32)
build_bin_test_no_prefix(tst_interops2)
target_link_libraries(tst_interops2 netcdf ${ALL_TLL_LIBS})
build_bin_test_no_prefix(tst_interops3)
add_bin_test(hdf4_test tst_chunk_hdf4)
add_bin_test(hdf4_test tst_h4_lendian)
add_bin_test(hdf4_test tst_hdf4_extra)
add_sh_test(hdf4_test run_get_hdf4_files)
add_sh_test(hdf4_test run_formatx_hdf4)
ENDIF()
endif()

View File

@ -52,12 +52,10 @@ void nc_log(int severity, const char *fmt, ...);
#define BAIL_QUIET BAIL
#ifdef USE_NETCDF4
#ifndef ENABLE_SET_LOG_LEVEL
#ifndef NETCDF_ENABLE_SET_LOG_LEVEL
/* Define away any calls to nc_set_log_level(), if its not enabled. */
#define nc_set_log_level(e)
#endif /* ENABLE_SET_LOG_LEVEL */
#endif
#endif /* NETCDF_ENABLE_SET_LOG_LEVEL */
#endif /* LOGGING */

View File

@ -45,6 +45,7 @@ struct NCglobalstate;
extern "C" {
#endif
/* API for ncs3sdk_XXX.[c|cpp] */
EXTERNL int NC_s3sdkinitialize(void);
EXTERNL int NC_s3sdkfinalize(void);
EXTERNL void* NC_s3sdkcreateclient(NCS3INFO* context);
@ -60,8 +61,7 @@ EXTERNL int NC_s3sdksearch(void* s3client0, const char* bucket, const char* pref
EXTERNL int NC_s3sdkdeletekey(void* client0, const char* bucket, const char* pathkey, char** errmsgp);
/* From ds3util.c */
EXTERNL int NC_s3sdkinitialize(void);
EXTERNL int NC_s3sdkfinalize(void);
EXTERNL void NC_s3sdkenvironment(void);
EXTERNL int NC_getdefaults3region(NCURI* uri, const char** regionp);
EXTERNL int NC_s3urlprocess(NCURI* url, NCS3INFO* s3, NCURI** newurlp);

View File

@ -43,9 +43,6 @@ enum URLFORMAT {UF_NONE=0, UF_VIRTUAL=1, UF_PATH=2, UF_S3=3, UF_OTHER=4};
static const char* awsconfigfiles[] = {".aws/config",".aws/credentials",NULL};
#define NCONFIGFILES (sizeof(awsconfigfiles)/sizeof(char*))
static int ncs3_initialized = 0;
static int ncs3_finalized = 0;
/**************************************************/
/* Forward */
@ -56,38 +53,21 @@ static int awsparse(const char* text, NClist* profiles);
/**************************************************/
/* Capture environmental Info */
EXTERNL int
NC_s3sdkinitialize(void)
EXTERNL void
NC_s3sdkenvironment(void)
{
if(!ncs3_initialized) {
ncs3_initialized = 1;
ncs3_finalized = 0;
}
{
/* Get various environment variables as defined by the AWS sdk */
NCglobalstate* gs = NC_getglobalstate();
if(getenv("AWS_REGION")!=NULL)
gs->aws.default_region = nulldup(getenv("AWS_REGION"));
else if(getenv("AWS_DEFAULT_REGION")!=NULL)
gs->aws.default_region = nulldup(getenv("AWS_DEFAULT_REGION"));
else if(gs->aws.default_region == NULL)
gs->aws.default_region = nulldup(AWS_GLOBAL_DEFAULT_REGION);
gs->aws.access_key_id = nulldup(getenv("AWS_ACCESS_KEY_ID"));
gs->aws.config_file = nulldup(getenv("AWS_CONFIG_FILE"));
gs->aws.profile = nulldup(getenv("AWS_PROFILE"));
gs->aws.secret_access_key = nulldup(getenv("AWS_SECRET_ACCESS_KEY"));
}
return NC_NOERR;
}
EXTERNL int
NC_s3sdkfinalize(void)
{
if(!ncs3_finalized) {
ncs3_initialized = 0;
ncs3_finalized = 1;
}
return NC_NOERR;
/* Get various environment variables as defined by the AWS sdk */
NCglobalstate* gs = NC_getglobalstate();
if(getenv("AWS_REGION")!=NULL)
gs->aws.default_region = nulldup(getenv("AWS_REGION"));
else if(getenv("AWS_DEFAULT_REGION")!=NULL)
gs->aws.default_region = nulldup(getenv("AWS_DEFAULT_REGION"));
else if(gs->aws.default_region == NULL)
gs->aws.default_region = nulldup(AWS_GLOBAL_DEFAULT_REGION);
gs->aws.access_key_id = nulldup(getenv("AWS_ACCESS_KEY_ID"));
gs->aws.config_file = nulldup(getenv("AWS_CONFIG_FILE"));
gs->aws.profile = nulldup(getenv("AWS_PROFILE"));
gs->aws.secret_access_key = nulldup(getenv("AWS_SECRET_ACCESS_KEY"));
}
/**************************************************/

View File

@ -133,10 +133,9 @@ NC_s3sdkinitialize(void)
if(!ncs3_initialized) {
ncs3_initialized = 1;
ncs3_finalized = 0;
#ifdef DEBUG
//ncs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Debug;
//ncs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Debug;
ncs3options.loggingOptions.logLevel = Aws::Utils::Logging::LogLevel::Trace;
ncs3options.httpOptions.installSigPipeHandler = true;
ncs3options.loggingOptions.logger_create_fn = [] { return std::make_shared<Aws::Utils::Logging::ConsoleLogSystem>(Aws::Utils::Logging::LogLevel::Trace); };
@ -144,6 +143,9 @@ NC_s3sdkinitialize(void)
#endif
Aws::InitAPI(ncs3options);
/* Get environment information */
NC_s3sdkenvironment();
}
return NCUNTRACE(NC_NOERR);
}
@ -500,7 +502,6 @@ NC_s3sdkwriteobject(void* s3client0, const char* bucket, const char* pathkey, s
int stat = NC_NOERR;
const char* key = NULL;
const char* mcontent = (char*)content;
NCTRACE(11,"bucket=%s pathkey=%s count=%lld content=%p",bucket,pathkey,count,content);
AWSS3CLIENT s3client = (AWSS3CLIENT)s3client0;
@ -535,7 +536,7 @@ NC_s3sdkwriteobject(void* s3client0, const char* bucket, const char* pathkey, s
put_request.SetContentLength((long long)count);
std::shared_ptr<Aws::IOStream> data = std::shared_ptr<Aws::IOStream>(new Aws::StringStream());
data->rdbuf()->pubsetbuf((char*)content,count);
data->rdbuf()->pubsetbuf((char*)content,(std::streamsize)count);
put_request.SetBody(data);
auto put_result = AWSS3GET(s3client)->PutObject(put_request);
if(!put_result.IsSuccess()) {

View File

@ -108,6 +108,37 @@ static int queryinsert(NClist* list, char* ekey, char* evalue);
#define NT(x) ((x)==NULL?"null":x)
/**************************************************/
static int ncs3_initialized = 0;
static int ncs3_finalized = 0;
EXTERNL int
NC_s3sdkinitialize(void)
{
if(!ncs3_initialized) {
ncs3_initialized = 1;
ncs3_finalized = 0;
}
/* Get environment information */
NC_s3sdkenvironment(void);
return NC_NOERR;
}
EXTERNL int
NC_s3sdkfinalize(void)
{
if(!ncs3_finalized) {
ncs3_initialized = 0;
ncs3_finalized = 1;
}
return NC_NOERR;
}
/**************************************************/
#if 0
static void
dumps3info(NCS3INFO* s3info, const char* tag)

View File

@ -22,5 +22,7 @@ if (NETCDF_ENABLE_DLL)
target_compile_definitions(netcdfhdf4 PRIVATE DLL_NETCDF DLL_EXPORT)
endif()
target_link_libraries(netcdfhdf4 PUBLIC ${HDF4_LIBRARIES})
# Remember to package this file for CMake builds.
ADD_EXTRA_DIST(${libhdf4_SOURCES} CMakeLists.txt)

View File

@ -499,20 +499,21 @@ s3clear(void* s3client, const char* bucket, const char* rootkey)
{
int stat = NC_NOERR;
char** list = NULL;
char** p;
size_t nkeys = 0;
if(s3client && bucket && rootkey) {
if((stat = NC_s3sdksearch(s3client, bucket, rootkey, &nkeys, &list, NULL)))
goto done;
if(list != NULL) {
for(p=list;*p;p++) {
size_t i;
for(i=0;i<nkeys;i++) {
char* p = list[i];
/* If the key is the rootkey, skip it */
if(strcmp(rootkey,*p)==0) continue;
if(strcmp(rootkey,p)==0) continue;
#ifdef S3DEBUG
fprintf(stderr,"s3clear: %s\n",*p);
fprintf(stderr,"s3clear: %s\n",p);
#endif
if((stat = NC_s3sdkdeletekey(s3client, bucket, *p, NULL)))
if((stat = NC_s3sdkdeletekey(s3client, bucket, p, NULL)))
goto done;
}
}

View File

@ -1711,7 +1711,7 @@ nc4_normalize_name(const char *name, char *norm_name)
return NC_NOERR;
}
#ifdef ENABLE_SET_LOG_LEVEL
#ifdef NETCDF_ENABLE_SET_LOG_LEVEL
/**
* Initialize parallel I/O logging. For parallel I/O builds, open log
@ -1812,7 +1812,7 @@ nc_set_log_level(int new_level)
return NC_NOERR;
}
#endif /* ENABLE_SET_LOG_LEVEL */
#endif /* NETCDF_ENABLE_SET_LOG_LEVEL */
#if LOGGING
#define MAX_NESTS 10

View File

@ -82,8 +82,10 @@ void
markcdf4(const char* msg)
{
enhanced_flag = 1;
if(markcdf4_msg == NULL)
if(markcdf4_msg == NULL) {
markcdf4_msg = (char*)msg;
}
}
char*

View File

@ -32,4 +32,7 @@ testnoshape2() {
}
testnoshape1
if test "x$FEATURE_S3TESTS" = xyes ; then testnoshape2; fi
if test "x$FEATURE_S3TESTS" = xyes && test "x$FEATURE_S3_INTERNAL" = xyes ; then
# The aws-sdk-cpp driver does not support google storage
testnoshape2
fi