2013-08-15 23:43:59 +08:00
## This is a CMake file, part of Unidata's netCDF package.
2018-03-27 01:53:31 +08:00
# Copyright 2012-2018, see the COPYRIGHT file for more information.
2015-10-15 01:08:54 +08:00
#
2013-08-15 23:43:59 +08:00
2013-10-01 05:51:34 +08:00
##################################
# Set Project Properties
##################################
2024-01-27 03:29:38 +08:00
cmake_minimum_required ( VERSION 3.13.0 )
2013-10-01 05:51:34 +08:00
2021-11-03 05:35:04 +08:00
#Project Name
2022-02-08 00:14:57 +08:00
project ( netCDF
2024-01-11 06:11:00 +08:00
L A N G U A G E S C C X X
H O M E P A G E _ U R L " h t t p s : / / w w w . u n i d a t a . u c a r . e d u / s o f t w a r e / n e t c d f / "
D E S C R I P T I O N " N e t C D F i s a s e t o f s o f t w a r e l i b r a r i e s a n d m a c h i n e - i n d e p e n d e n t d a t a f o r m a t s t h a t s u p p o r t t h e c r e a t i o n , a c c e s s , a n d s h a r i n g o f a r r a y - o r i e n t e d s c i e n t i f i c d a t a . "
2024-01-11 06:16:49 +08:00
V E R S I O N 4 . 9 . 3
2022-02-08 00:14:57 +08:00
)
2024-01-11 06:11:00 +08:00
#Add custom CMake Module
set ( CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules/;${PROJECT_SOURCE_DIR}/cmake"
C A C H E I N T E R N A L " L o c a t i o n o f o u r c u s t o m C M a k e m o d u l e s . " )
2013-03-28 03:15:00 +08:00
set ( PACKAGE "netCDF" CACHE STRING "" )
2014-07-16 00:50:58 +08:00
2024-01-18 05:17:50 +08:00
include ( netcdf_functions_macros )
2023-10-25 18:50:07 +08:00
# Backport of built-in `PROJECT_IS_TOP_LEVEL` from CMake 3.21
if ( NOT DEFINED NETCDF_IS_TOP_LEVEL )
set ( NETCDF_IS_TOP_LEVEL OFF )
if ( CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR )
set ( NETCDF_IS_TOP_LEVEL ON )
endif ( )
endif ( )
2024-01-19 23:25:24 +08:00
################################
# The target
################################
add_library ( netcdf )
add_library ( netCDF::netcdf ALIAS netcdf )
2014-05-13 06:05:39 +08:00
#####
# Version Info:
#
# Release Version
# Library Version
# SO Version
#
# SO Version is computed from library version. See:
# http://www.gnu.org/software/libtool/manual/libtool.html#Libtool-versioning
#####
2024-01-18 06:07:22 +08:00
set ( NC_VERSION_NOTE "-development" )
set ( netCDF_VERSION ${ PROJECT_VERSION } ${ NC_VERSION_NOTE } )
set ( VERSION ${ netCDF_VERSION } )
set ( NC_VERSION ${ netCDF_VERSION } )
set ( PACKAGE_VERSION ${ VERSION } )
2014-02-20 07:32:26 +08:00
2022-05-20 12:00:40 +08:00
# These values should match those in configure.ac
2024-01-18 06:07:22 +08:00
set ( netCDF_LIB_VERSION 19 )
set ( netCDF_SO_VERSION 19 )
2022-05-20 12:00:40 +08:00
2020-07-15 20:58:16 +08:00
# Version of the dispatch table. This must match the value in
# configure.ac.
2024-01-18 06:07:22 +08:00
set ( NC_DISPATCH_VERSION 5 )
2019-11-16 05:57:07 +08:00
2014-04-22 06:11:48 +08:00
# Get system configuration, Use it to determine osname, os release, cpu. These
# will be used when committing to CDash.
2013-03-12 05:13:51 +08:00
find_program ( UNAME NAMES uname )
2024-01-18 06:07:22 +08:00
if ( UNAME )
2014-04-22 01:15:33 +08:00
getuname ( osname -s )
getuname ( osrel -r )
getuname ( cpu -m )
2015-04-24 01:03:26 +08:00
set ( TMP_BUILDNAME "${osname}-${osrel}-${cpu}" )
2024-01-18 06:07:22 +08:00
endif ( )
2015-04-24 01:03:26 +08:00
2022-02-09 11:53:30 +08:00
# Define some Platforms
if ( osname MATCHES "CYGWIN.*" )
2024-01-18 06:07:22 +08:00
set ( ISCYGWIN yes )
2022-02-09 11:53:30 +08:00
endif ( )
2023-08-31 00:17:00 +08:00
if ( osname MATCHES "Darwin.*" )
2024-01-18 06:07:22 +08:00
set ( ISOSX yes )
2022-02-09 11:53:30 +08:00
endif ( )
if ( MSVC )
2024-01-18 06:07:22 +08:00
set ( ISMSVC yes )
2022-02-09 11:53:30 +08:00
endif ( )
2022-06-18 04:35:12 +08:00
if ( osname MATCHES "MINGW.*" OR osname MATCHES "MSYS.*" )
2024-01-18 06:07:22 +08:00
set ( ISMINGW yes )
set ( MINGW yes )
2022-02-09 11:53:30 +08:00
endif ( )
2015-04-24 01:03:26 +08:00
###
# Allow for some customization of the buildname.
# This will make it easier to identify different builds,
# based on values passed from command line/shell scripts.
#
# For ctest scripts, we can use CTEST_BUILD_NAME.
###
2024-01-18 06:07:22 +08:00
set ( BUILDNAME_PREFIX "" CACHE STRING "" )
set ( BUILDNAME_SUFFIX "" CACHE STRING "" )
2015-04-24 01:03:26 +08:00
2024-01-18 06:07:22 +08:00
if ( BUILDNAME_PREFIX )
set ( TMP_BUILDNAME "${BUILDNAME_PREFIX}-${TMP_BUILDNAME}" )
endif ( )
2015-04-24 01:03:26 +08:00
2024-01-18 06:07:22 +08:00
if ( BUILDNAME_SUFFIX )
set ( TMP_BUILDNAME "${TMP_BUILDNAME}-${BUILDNAME_SUFFIX}" )
endif ( )
2015-04-24 01:03:26 +08:00
2024-01-18 06:07:22 +08:00
if ( NOT BUILDNAME )
set ( BUILDNAME "${TMP_BUILDNAME}" CACHE STRING "Build name variable for CDash" )
endif ( )
2015-04-24 01:03:26 +08:00
###
# End BUILDNAME customization.
###
2013-08-14 00:00:22 +08:00
# For CMAKE_INSTALL_LIBDIR
2024-01-18 06:07:22 +08:00
include ( GNUInstallDirs )
2012-10-03 04:56:46 +08:00
2024-01-18 06:07:22 +08:00
if ( MSVC )
set ( GLOBAL PROPERTY USE_FOLDERS ON )
2024-01-27 03:51:19 +08:00
target_compile_options ( netcdf "/utf-8" )
2024-01-18 06:07:22 +08:00
endif ( )
2012-10-03 04:56:46 +08:00
2013-10-01 05:51:34 +08:00
# auto-configure style checks, other CMake modules.
2024-01-18 06:07:22 +08:00
include ( CheckLibraryExists )
include ( CheckIncludeFile )
include ( CheckIncludeFiles )
include ( CheckTypeSize )
include ( CheckFunctionExists )
include ( CheckCXXSourceCompiles )
include ( CheckCSourceCompiles )
include ( TestBigEndian )
include ( CheckSymbolExists )
include ( GetPrerequisites )
include ( CheckCCompilerFlag )
2013-03-28 03:15:00 +08:00
2017-02-07 03:23:55 +08:00
# A check to see if the system is big endian
2017-02-09 05:51:41 +08:00
TEST_BIG_ENDIAN ( BIGENDIAN )
2024-01-18 06:07:22 +08:00
if ( ${ BIGENDIAN } )
set ( WORDS_BIGENDIAN "1" )
endif ( ${ BIGENDIAN } )
2017-02-09 05:51:41 +08:00
2022-06-26 06:47:49 +08:00
# Define a function to convert various true or false values
# to either TRUE|FALSE (uppercase).
# If value is not a recognized boolean, then return NOTFOUND
#1, ON, YES, TRUE, Y,
#0, OFF, NO, FALSE, N, IGNORE, NOTFOUND -NOTFOUND ""
2024-01-18 06:07:22 +08:00
set ( TRUELIST "on;yes;y;true" )
set ( FALSELIST "off;no;n;false;0;ignore;notfound" )
2014-04-03 04:26:42 +08:00
2013-10-01 05:51:34 +08:00
# Set the build type.
2024-01-18 06:07:22 +08:00
if ( NOT CMAKE_BUILD_TYPE )
set ( CMAKE_BUILD_TYPE DEBUG CACHE STRING "Choose the type of build, options are: None, Debug, Release."
2014-04-22 01:15:33 +08:00
F O R C E )
2024-01-18 06:07:22 +08:00
endif ( )
2013-10-01 05:51:34 +08:00
2014-04-03 04:26:42 +08:00
# Set build type uppercase
2024-01-18 06:07:22 +08:00
string ( TOUPPER ${ CMAKE_BUILD_TYPE } CMAKE_BUILD_TYPE )
2014-04-03 04:26:42 +08:00
2014-06-03 05:14:27 +08:00
# Determine the configure date.
2014-12-16 01:56:14 +08:00
2024-01-18 06:07:22 +08:00
if ( DEFINED ENV{SOURCE_DATE_EPOCH} )
execute_process (
2016-05-15 23:20:18 +08:00
C O M M A N D " d a t e " " - u " " - d " " @ $ E N V { S O U R C E _ D A T E _ E P O C H } "
O U T P U T _ V A R I A B L E C O N F I G _ D A T E
)
2024-01-18 06:07:22 +08:00
else ( )
execute_process (
2016-05-15 23:20:18 +08:00
C O M M A N D d a t e
O U T P U T _ V A R I A B L E C O N F I G _ D A T E
)
2024-01-18 06:07:22 +08:00
endif ( )
if ( CONFIG_DATE )
2014-12-16 01:56:14 +08:00
string ( STRIP ${ CONFIG_DATE } CONFIG_DATE )
2024-01-18 06:07:22 +08:00
endif ( )
2014-09-30 02:04:35 +08:00
##
# Allow for extra dependencies.
##
2024-01-18 06:07:22 +08:00
set ( EXTRA_DEPS "" )
2014-09-30 02:04:35 +08:00
2013-10-01 05:51:34 +08:00
################################
# End Project Properties
################################
################################
# Set CTest Properties
################################
2024-01-18 06:07:22 +08:00
enable_testing ( )
include ( CTest )
2013-10-01 05:51:34 +08:00
# Set Memory test program for non-MSVC based builds.
# Assume valgrind for now.
2024-01-18 06:07:22 +08:00
if ( ( NOT MSVC ) AND ( NOT MINGW ) AND ( NOT ISCYGWIN ) )
set ( CTEST_MEMORYCHECK_COMMAND valgrind CACHE STRING "" )
endif ( )
2013-10-01 05:51:34 +08:00
# Set variable to define the build type.
2024-01-18 06:07:22 +08:00
include ( GenerateExportHeader )
2013-10-01 05:51:34 +08:00
################################
# End CTest Properties
################################
2013-05-25 05:19:07 +08:00
2013-03-28 03:15:00 +08:00
2013-10-01 05:51:34 +08:00
################################
2016-03-08 05:04:52 +08:00
# Compiler and Linker Configuration
2013-10-01 05:51:34 +08:00
################################
2016-01-09 01:56:25 +08:00
##
2014-01-25 02:10:41 +08:00
# Default building shared libraries.
2014-06-12 05:51:31 +08:00
# BUILD_SHARED_LIBS is provided by/used by
# CMake directly.
2016-01-09 01:56:25 +08:00
##
2024-01-18 06:07:22 +08:00
option ( BUILD_SHARED_LIBS "Configure netCDF as a shared library." ON )
if ( BUILD_SHARED_LIBS )
set ( CMAKE_POSITION_INDEPENDENT_CODE ON )
endif ( )
2013-10-01 05:51:34 +08:00
2024-01-18 06:07:22 +08:00
option ( NC_FIND_SHARED_LIBS "Find dynamically-built versions of dependent libraries" ${ BUILD_SHARED_LIBS } )
2016-03-08 05:04:52 +08:00
##
# We've had a request to allow for non-versioned shared libraries.
2018-03-16 22:38:40 +08:00
# This seems reasonable enough to accommodate. See
2016-03-08 05:04:52 +08:00
# https://github.com/Unidata/netcdf-c/issues/228 for more info.
##
2024-01-18 06:07:22 +08:00
option ( ENABLE_SHARED_LIBRARY_VERSION "Encode the library SO version in the file name of the generated library file." ON )
2016-03-08 05:04:52 +08:00
2014-01-25 02:10:41 +08:00
# Set some default linux gcc & apple compiler options for
# debug builds.
2024-01-18 06:07:22 +08:00
if ( CMAKE_COMPILER_IS_GNUCC OR APPLE )
option ( ENABLE_COVERAGE_TESTS "Enable compiler flags needed to perform coverage tests." OFF )
option ( ENABLE_CONVERSION_WARNINGS "Enable warnings for implicit conversion from 64 to 32-bit datatypes." ON )
option ( ENABLE_LARGE_FILE_TESTS "Enable large file tests." OFF )
2014-03-07 23:40:50 +08:00
2014-01-25 02:10:41 +08:00
# Debugging flags
2024-01-18 06:07:22 +08:00
set ( CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wall" )
2013-10-01 05:51:34 +08:00
2014-05-21 05:31:08 +08:00
# Check to see if -Wl,--no-undefined is supported.
CHECK_C_LINKER_FLAG ( "-Wl,--no-undefined" LIBTOOL_HAS_NO_UNDEFINED )
2014-04-22 06:11:48 +08:00
2024-01-18 06:07:22 +08:00
if ( LIBTOOL_HAS_NO_UNDEFINED )
set ( CMAKE_SHARED_LINKER_FLAGS_DEBUG "${CMAKE_SHARED_LINKER_FLAGS_DEBUG} -Wl,--no-undefined" )
endif ( )
set ( CMAKE_REQUIRED_FLAGS "${TMP_CMAKE_REQUIRED_FLAGS}" )
2014-04-22 06:11:48 +08:00
2014-01-25 02:10:41 +08:00
# Coverage tests need to have optimization turned off.
2024-01-18 06:07:22 +08:00
if ( ENABLE_COVERAGE_TESTS )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -coverage -fprofile-arcs -ftest-coverage" )
set ( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -coverage -fprofile-arcs -ftest-coverage" )
set ( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-arcs -ftest-coverage" )
message ( STATUS "Coverage Tests: On." )
endif ( )
2014-04-22 06:11:48 +08:00
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
# Warnings for 64-to-32 bit conversions.
2024-01-18 06:07:22 +08:00
if ( ENABLE_CONVERSION_WARNINGS )
2014-04-03 04:26:42 +08:00
CHECK_C_COMPILER_FLAG ( -Wconversion CC_HAS_WCONVERSION )
CHECK_C_COMPILER_FLAG ( -Wshorten-64-to-32 CC_HAS_SHORTEN_64_32 )
2014-04-22 06:11:48 +08:00
2024-01-18 06:07:22 +08:00
if ( CC_HAS_SHORTEN_64_32 )
set ( CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wshorten-64-to-32" )
endif ( )
if ( CC_HAS_WCONVERSION )
set ( CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wconversion" )
endif ( )
2014-04-03 04:26:42 +08:00
2024-01-18 06:07:22 +08:00
endif ( ENABLE_CONVERSION_WARNINGS )
2014-01-25 02:10:41 +08:00
2024-01-18 06:07:22 +08:00
endif ( CMAKE_COMPILER_IS_GNUCC OR APPLE )
2014-01-25 02:10:41 +08:00
# End default linux gcc & apple compiler options.
2013-10-01 05:51:34 +08:00
2021-08-27 07:27:21 +08:00
# Use relative pathnames in __FILE__ macros on MINGW:
2024-01-18 06:07:22 +08:00
if ( MINGW )
2021-08-27 07:27:21 +08:00
CHECK_C_COMPILER_FLAG ( "-fmacro-prefix-map='${CMAKE_SOURCE_DIR}'=." CC_HAS_MACRO_PREFIX_MAP )
2024-01-18 06:07:22 +08:00
if ( CC_HAS_MACRO_PREFIX_MAP )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fmacro-prefix-map='${CMAKE_SOURCE_DIR}'=." )
endif ( )
endif ( )
2021-08-27 07:27:21 +08:00
2015-08-20 17:42:05 +08:00
# Suppress CRT Warnings.
2012-11-08 01:34:05 +08:00
# Only necessary for Windows
2024-01-18 06:07:22 +08:00
if ( MSVC )
2024-01-27 04:28:16 +08:00
target_compile_definitions ( netcdf PRIVATE -D_CRT_SECURE_NO_WARNINGS )
2024-01-18 06:07:22 +08:00
endif ( )
2012-12-14 06:09:41 +08:00
2021-08-24 17:37:34 +08:00
# Support ANSI format specifiers for *printf on MINGW:
2024-01-18 06:07:22 +08:00
if ( MINGW )
2024-01-27 04:28:16 +08:00
target_compile_definitions ( netcdf PRIVATE -D__USE_MINGW_ANSI_STDIO=1 )
2024-01-18 06:07:22 +08:00
endif ( )
2021-08-24 17:37:34 +08:00
2015-11-16 20:51:17 +08:00
#####
# System inspection checks
#####
2024-01-18 06:07:22 +08:00
include_directories ( ${ CMAKE_CURRENT_BINARY_DIR } /include )
include_directories ( ${ CMAKE_CURRENT_SOURCE_DIR } /include )
include_directories ( ${ CMAKE_CURRENT_SOURCE_DIR } /oc2 )
include_directories ( ${ CMAKE_CURRENT_SOURCE_DIR } /libsrc )
set ( CMAKE_REQUIRED_INCLUDES ${ CMAKE_CURRENT_SOURCE_DIR } /libsrc )
2015-11-16 20:51:17 +08:00
2013-10-01 05:51:34 +08:00
################################
# End Compiler Configuration
################################
2013-02-27 06:11:09 +08:00
##
# Configuration for post-install RPath
# Adapted from http://www.cmake.org/Wiki/CMake_RPATH_handling
##
2024-01-18 06:07:22 +08:00
if ( NOT WIN32 AND BUILD_SHARED_LIBS )
2014-03-07 23:46:26 +08:00
# use, i.e. don't skip the full RPATH for the build tree
2024-01-18 06:07:22 +08:00
set ( CMAKE_SKIP_BUILD_RPATH FALSE )
2014-03-07 23:46:26 +08:00
# when building, don't use the install RPATH already
# (but later on when installing)
2024-01-18 06:07:22 +08:00
set ( CMAKE_BUILD_WITH_INSTALL_RPATH FALSE )
2014-03-07 23:46:26 +08:00
2014-07-16 00:50:58 +08:00
if ( APPLE )
set ( CMAKE_MACOSX_RPATH ON )
endif ( APPLE )
2014-03-07 23:46:26 +08:00
# add the automatically determined parts of the RPATH
# which point to directories outside the build tree to the install RPATH
2024-01-18 06:07:22 +08:00
set ( CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE )
2014-03-07 23:46:26 +08:00
2014-07-16 00:50:58 +08:00
# the RPATH to be used when installing,
# but only if it's not a system directory
2024-01-18 06:07:22 +08:00
list ( FIND CMAKE_PLATFORM_IMPLICIT_LINK_DIRECTORIES "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}" isSystemDir )
if ( "${isSystemDir}" STREQUAL "-1" )
set ( CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}" )
endif ( "${isSystemDir}" STREQUAL "-1" )
2014-07-16 00:50:58 +08:00
2024-01-18 06:07:22 +08:00
endif ( )
2013-02-27 06:11:09 +08:00
##
# End configuration for post-install RPath
##
2013-10-01 05:51:34 +08:00
################################
2012-10-03 04:56:46 +08:00
# Option checks
2013-10-01 05:51:34 +08:00
################################
2023-08-11 06:57:57 +08:00
# Default Cache variables.
2024-01-18 06:07:22 +08:00
set ( DEFAULT_CHUNK_SIZE 16777216 CACHE STRING "Default Chunk Cache Size." )
set ( DEFAULT_CHUNK_CACHE_SIZE 16777216U CACHE STRING "Default Chunk Cache Size." )
set ( DEFAULT_CHUNKS_IN_CACHE 1000 CACHE STRING "Default number of chunks in cache." )
set ( DEFAULT_CHUNK_CACHE_PREEMPTION 0.75 CACHE STRING "Default file chunk cache preemption policy (a number between 0 and 1, inclusive." )
2023-08-11 06:57:57 +08:00
# HDF5 default cache size values
2024-01-18 06:07:22 +08:00
set ( CHUNK_CACHE_SIZE ${ DEFAULT_CHUNK_CACHE_SIZE } CACHE STRING "Default HDF5 Chunk Cache Size." )
set ( CHUNK_CACHE_NELEMS ${ DEFAULT_CHUNKS_IN_CACHE } CACHE STRING "Default maximum number of elements in cache." )
set ( CHUNK_CACHE_PREEMPTION ${ DEFAULT_CHUNK_CACHE_PREEMPTION } CACHE STRING "Default file chunk cache preemption policy for HDf5 files(a number between 0 and 1, inclusive." )
2023-08-11 06:57:57 +08:00
2024-01-18 06:07:22 +08:00
set ( NETCDF_LIB_NAME "" CACHE STRING "Default name of the netcdf library." )
set ( TEMP_LARGE "." CACHE STRING "Where to put large temp files if large file tests are run." )
set ( NCPROPERTIES_EXTRA "" CACHE STRING "Specify extra pairs for _NCProperties." )
2012-11-01 06:17:33 +08:00
2024-01-18 06:07:22 +08:00
if ( NOT NETCDF_LIB_NAME STREQUAL "" )
set ( MOD_NETCDF_NAME ON )
endif ( )
2012-10-03 04:56:46 +08:00
# Set the appropriate compiler/architecture for universal OSX binaries.
2024-01-18 06:07:22 +08:00
if ( ${ CMAKE_SYSTEM_NAME } EQUAL "Darwin" )
set ( CMAKE_OSX_ARCHITECTURES i386;x86_64 )
endif ( ${ CMAKE_SYSTEM_NAME } EQUAL "Darwin" )
2012-10-03 04:56:46 +08:00
2018-02-09 02:45:29 +08:00
2013-09-19 04:49:37 +08:00
# Option to use Static Runtimes in MSVC
2024-01-18 06:07:22 +08:00
if ( MSVC )
option ( NC_USE_STATIC_CRT "Use static CRT Libraries ('\\MT')." OFF )
if ( NC_USE_STATIC_CRT )
set ( USE_STATIC_CRT ON )
2013-09-19 04:49:37 +08:00
specify_static_crt_flag ( )
2024-01-18 06:07:22 +08:00
endif ( )
endif ( )
2013-09-19 04:49:37 +08:00
2013-02-20 07:29:41 +08:00
# Option to build netCDF Version 2
2014-04-04 00:45:22 +08:00
OPTION ( ENABLE_V2_API "Build netCDF Version 2." ON )
2024-01-18 06:07:22 +08:00
set ( BUILD_V2 ${ ENABLE_V2_API } )
if ( NOT ENABLE_V2_API )
set ( NO_NETCDF_2 ON )
else ( NOT ENABLE_V2_API )
set ( USE_NETCDF_2 TRUE )
endif ( NOT ENABLE_V2_API )
2012-10-03 04:56:46 +08:00
# Option to build utilities
2024-01-18 06:07:22 +08:00
option ( BUILD_UTILITIES "Build ncgen, ncgen3, ncdump." ON )
2014-03-07 23:40:50 +08:00
2012-10-03 04:56:46 +08:00
# Option to use MMAP
2024-01-18 06:07:22 +08:00
option ( ENABLE_MMAP "Use MMAP." ON )
2013-02-15 04:24:02 +08:00
2012-10-03 04:56:46 +08:00
# Option to use examples.
2024-01-18 06:07:22 +08:00
option ( ENABLE_EXAMPLES "Build Examples" ON )
2012-10-03 04:56:46 +08:00
2016-11-11 07:22:02 +08:00
###
# Allow the user to specify libraries
# to link against, similar to automakes 'LIBS' variable.
###
2024-01-18 06:07:22 +08:00
set ( NC_EXTRA_DEPS "" CACHE STRING "Additional libraries to link against." )
if ( NC_EXTRA_DEPS )
string ( REPLACE " " ";" DEPS_LIST ${ NC_EXTRA_DEPS } )
foreach ( _DEP ${ DEPS_LIST } )
string ( REGEX REPLACE "^-l" "" _LIB ${ _DEP } )
2016-11-11 07:22:02 +08:00
FIND_LIBRARY ( "${_LIB}_DEP" NAMES "${_LIB}" "lib${_LIB}" )
2024-01-18 06:07:22 +08:00
message ( ${ ${_LIB } _DEP} )
if ( "${${_LIB}_DEP}" STREQUAL "${_LIB}_DEP-NOTFOUND" )
message ( FATAL_ERROR "Error finding ${_LIB}." )
else ( )
message ( STATUS "Found ${_LIB}: ${${_LIB}_DEP}" )
endif ( )
set ( EXTRA_DEPS ${ EXTRA_DEPS } "${${_LIB}_DEP}" )
2024-01-19 05:02:48 +08:00
endforeach ( )
2024-01-18 06:07:22 +08:00
message ( "Extra deps: ${EXTRA_DEPS}" )
list ( REMOVE_DUPLICATES EXTRA_DEPS )
set ( CMAKE_REQUIRED_LIBRARIES ${ CMAKE_REQUIRED_LIBRARIES } ${ EXTRA_DEPS } )
endif ( )
2016-11-11 07:22:02 +08:00
###
# End user-specified dependent libraries.
###
2020-08-18 09:15:47 +08:00
################################
# Format Option checks
################################
# We need to now treat enable-netcdf4 and enable-hdf5 as separate,
# but for back compatability, we need to treat enable-netcdf4
# as equivalent to enable-hdf5.
# We detect this using these rules:
# 1. if ENABLE_HDF5 is off then disable hdf5
# 2. if ENABLE_NETCDF4 is off then disable hdf5
# 3. else enable hdf5
2024-01-18 06:07:22 +08:00
option ( ENABLE_NETCDF_4 "Use HDF5." ON )
option ( ENABLE_NETCDF4 "Use HDF5." ON )
option ( ENABLE_HDF5 "Use HDF5." ON )
if ( NOT ENABLE_HDF5 OR NOT ENABLE_NETCDF4 OR NOT ENABLE_NETCDF_4 )
set ( ENABLE_HDF5 OFF CACHE BOOL "Use HDF5" FORCE )
endif ( )
option ( ENABLE_HDF4 "Build netCDF-4 with HDF4 read capability(HDF4, HDF5 and Zlib required)." OFF )
option ( ENABLE_DAP "Enable DAP2 and DAP4 Client." ON )
option ( ENABLE_NCZARR "Enable NCZarr Client." ON )
option ( ENABLE_PNETCDF "Build with parallel I/O for CDF-1, 2, and 5 files using PnetCDF." OFF )
set ( ENABLE_CDF5 AUTO CACHE STRING "AUTO" )
option ( ENABLE_CDF5 "Enable CDF5 support" ON )
2020-08-18 09:15:47 +08:00
# Netcdf-4 support (i.e. libsrc4) is required by more than just HDF5 (e.g. NCZarr)
# So depending on what above formats are enabled, enable netcdf-4
if ( ENABLE_HDF5 OR ENABLE_HDF4 OR ENABLE_NCZARR )
2024-01-18 06:07:22 +08:00
set ( ENABLE_NETCDF_4 ON CACHE BOOL "Enable netCDF-4 API" FORCE )
set ( ENABLE_NETCDF4 ON CACHE BOOL "Enable netCDF4 Alias" FORCE )
2020-08-18 09:15:47 +08:00
endif ( )
2023-06-11 04:08:04 +08:00
# enable|disable all forms of network access
2024-01-18 06:07:22 +08:00
option ( ENABLE_REMOTE_FUNCTIONALITY "Enable|disable all forms remote data access (DAP, S3, etc)" ON )
message ( ">>> ENABLE_REMOTE_FUNCTIONALITY=${ENABLE_REMOTE_FUNCTIONALITY}" )
2023-06-13 05:23:44 +08:00
if ( NOT ENABLE_REMOTE_FUNCTIONALITY )
2024-01-18 06:07:22 +08:00
message ( WARNING "ENABLE_REMOTE_FUNCTIONALITY=NO => ENABLE_DAP[4]=NO" )
set ( ENABLE_DAP OFF CACHE BOOL "ENABLE_REMOTE_FUNCTIONALITY=NO => ENABLE_DAP=NO" FORCE )
set ( ENABLE_DAP4 OFF CACHE BOOL "ENABLE_REMOTE_FUNCTIONALITY=NO => ENABLE_DAP4=NO" FORCE )
endif ( )
2023-06-11 04:08:04 +08:00
2012-10-03 04:56:46 +08:00
# Option to Build DLL
2024-01-18 06:07:22 +08:00
if ( WIN32 )
option ( ENABLE_DLL "Build a Windows DLL." ${ BUILD_SHARED_LIBS } )
if ( ENABLE_DLL )
set ( BUILD_DLL ON CACHE BOOL "" )
2024-01-27 04:44:19 +08:00
add_compile_definitions ( -DDLL_NETCDF )
add_compile_definitions ( -DDLL_EXPORT )
2024-01-18 06:07:22 +08:00
endif ( )
endif ( )
2012-10-03 04:56:46 +08:00
# Did the user specify a default minimum blocksize for posixio?
2024-01-18 06:07:22 +08:00
set ( NCIO_MINBLOCKSIZE 256 CACHE STRING "Minimum I/O Blocksize for netCDF classic and 64-bit offset format files." )
if ( ENABLE_NETCDF_4 )
set ( USE_NETCDF4 ON CACHE BOOL "" )
set ( ENABLE_NETCDF_4 ON CACHE BOOL "" )
set ( ENABLE_NETCDF4 ON CACHE BOOL "" )
else ( )
set ( USE_HDF4_FILE_TESTS OFF )
set ( USE_HDF4 OFF )
set ( ENABLE_HDF4_FILE_TESTS OFF )
set ( ENABLE_HDF4 OFF )
endif ( )
2012-10-03 04:56:46 +08:00
2018-05-13 23:48:20 +08:00
# Option Logging, only valid for netcdf4.
2024-01-18 06:07:22 +08:00
option ( ENABLE_LOGGING "Enable Logging." OFF )
if ( NOT ENABLE_NETCDF_4 )
set ( ENABLE_LOGGING OFF )
endif ( )
if ( ENABLE_LOGGING )
2024-01-27 04:28:16 +08:00
target_compile_definitions ( netcdf PRIVATE -DLOGGING )
target_compile_definitions ( netcdf PRIVATE -DENABLE_SET_LOG_LEVEL )
2024-01-18 06:07:22 +08:00
set ( LOGGING ON )
set ( ENABLE_SET_LOG_LEVEL ON )
endif ( )
option ( ENABLE_SET_LOG_LEVEL_FUNC "Enable definition of nc_set_log_level()." ON )
if ( ENABLE_NETCDF_4 AND NOT ENABLE_LOGGING AND ENABLE_SET_LOG_LEVEL_FUNC )
2024-01-27 04:28:16 +08:00
target_compile_definitions ( netcdf PRIVATE -DENABLE_SET_LOG_LEVEL )
2024-01-18 06:07:22 +08:00
set ( ENABLE_SET_LOG_LEVEL ON )
endif ( )
2018-05-13 23:48:20 +08:00
2017-11-21 04:52:06 +08:00
# Option to allow for strict null file padding.
# See https://github.com/Unidata/netcdf-c/issues/657 for more information
2024-01-18 06:07:22 +08:00
option ( ENABLE_STRICT_NULL_BYTE_HEADER_PADDING "Enable strict null byte header padding." OFF )
2017-11-21 08:02:16 +08:00
2024-01-18 06:07:22 +08:00
if ( ENABLE_STRICT_NULL_BYTE_HEADER_PADDING )
set ( USE_STRICT_NULL_BYTE_HEADER_PADDING ON CACHE BOOL "" )
endif ( ENABLE_STRICT_NULL_BYTE_HEADER_PADDING )
2017-11-21 04:52:06 +08:00
Enhance/Fix filter support
re: Discussion https://github.com/Unidata/netcdf-c/discussions/2214
The primary change is to support so-called "standard filters".
A standard filter is one that is defined by the following
netcdf-c API:
````
int nc_def_var_XXX(int ncid, int varid, size_t nparams, unsigned* params);
int nc_inq_var_XXXX(int ncid, int varid, int* usefilterp, unsigned* params);
````
So for example, zstandard would be a standard filter by defining
the functions *nc_def_var_zstandard* and *nc_inq_var_zstandard*.
In order to define these functions, we need a new dispatch function:
````
int nc_inq_filter_avail(int ncid, unsigned filterid);
````
This function, combined with the existing filter API can be used
to implement arbitrary standard filters using a simple code pattern.
Note that I would have preferred that this function return a list
of all available filters, but HDF5 does not support that functionality.
So this PR implements the dispatch function and implements
the following standard functions:
+ bzip2
+ zstandard
+ blosc
Specific test cases are also provided for HDF5 and NCZarr.
Over time, other specific standard filters will be defined.
## Primary Changes
* Add nc_inq_filter_avail() to netcdf-c API.
* Add standard filter implementations to test use of *nc_inq_filter_avail*.
* Bump the dispatch table version number and add to all the relevant
dispatch tables (libsrc, libsrcp, etc).
* Create a program to invoke nc_inq_filter_avail so that it is accessible
to shell scripts.
* Cleanup szip support to properly support szip
when HDF5 is disabled. This involves detecting
libsz separately from testing if HDF5 supports szip.
* Integrate shuffle and fletcher32 into the existing
filter API. This means that, for example, nc_def_var_fletcher32
is now a wrapper around nc_def_var_filter.
* Extend the Codec defaulting to allow multiple default shared libraries.
## Misc. Changes
* Modify configure.ac/CMakeLists.txt to look for the relevant
libraries implementing standard filters.
* Modify libnetcdf.settings to list available standard filters
(including deflate and szip).
* Add CMake test modules to locate libbz2 and libzstd.
* Cleanup the HDF5 memory manager function use in the plugins.
* remove unused file include//ncfilter.h
* remove tests for the HDF5 memory operations e.g. H5allocate_memory.
* Add flag to ncdump to force use of _Filter instead of _Deflate
or _Shuffle or _Fletcher32. Used for testing.
2022-03-15 02:39:37 +08:00
# Note that szip management is tricky.
# This is because we have three things to consider:
# 1. is libsz available?
# 2. is szip enabled in HDF5?
# 3. is nczarr enabled?
# We need separate flags for cases 1 and 2
2024-01-18 06:07:22 +08:00
set ( USE_HDF5 ${ ENABLE_HDF5 } )
2020-04-11 03:42:27 +08:00
2024-01-18 06:07:22 +08:00
if ( ENABLE_DAP )
set ( USE_DAP ON CACHE BOOL "" )
set ( ENABLE_DAP2 ON CACHE BOOL "" )
2023-01-19 10:47:29 +08:00
2024-01-18 06:07:22 +08:00
if ( ENABLE_HDF5 )
message ( STATUS "Enabling DAP4" )
set ( ENABLE_DAP4 ON CACHE BOOL "" )
else ( )
message ( STATUS "Disabling DAP4" )
set ( ENABLE_DAP4 OFF CACHE BOOL "" )
endif ( ENABLE_HDF5 )
2017-09-27 05:26:34 +08:00
2024-01-18 06:07:22 +08:00
else ( )
set ( ENABLE_DAP2 OFF CACHE BOOL "" )
set ( ENABLE_DAP4 OFF CACHE BOOL "" )
endif ( )
2012-10-03 04:56:46 +08:00
Provide byte-range reading of remote datasets
re: issue https://github.com/Unidata/netcdf-c/issues/1251
Assume that you have the URL to a remote dataset
which is a normal netcdf-3 or netcdf-4 file.
This PR allows the netcdf-c to read that dataset's
contents as a netcdf file using HTTP byte ranges
if the remote server supports byte-range access.
Originally, this PR was set up to access Amazon S3 objects,
but it can also access other remote datasets such as those
provided by a Thredds server via the HTTPServer access protocol.
It may also work for other kinds of servers.
Note that this is not intended as a true production
capability because, as is known, this kind of access to
can be quite slow. In addition, the byte-range IO drivers
do not currently do any sort of optimization or caching.
An additional goal here is to gain some experience with
the Amazon S3 REST protocol.
This architecture and its use documented in
the file docs/byterange.dox.
There are currently two test cases:
1. nc_test/tst_s3raw.c - this does a simple open, check format, close cycle
for a remote netcdf-3 file and a remote netcdf-4 file.
2. nc_test/test_s3raw.sh - this uses ncdump to investigate some remote
datasets.
This PR also incorporates significantly changed model inference code
(see the superceded PR https://github.com/Unidata/netcdf-c/pull/1259).
1. It centralizes the code that infers the dispatcher.
2. It adds support for byte-range URLs
Other changes:
1. NC_HDF5_finalize was not being properly called by nc_finalize().
2. Fix minor bug in ncgen3.l
3. fix memory leak in nc4info.c
4. add code to walk the .daprc triples and to replace protocol=
fragment tag with a more general mode= tag.
Final Note:
Th inference code is still way too complicated. We need to move
to the validfile() model used by netcdf Java, where each
dispatcher is asked if it can process the file. This decentralizes
the inference code. This will be done after all the major new
dispatchers (PIO, Zarr, etc) have been implemented.
2019-01-02 09:27:36 +08:00
# Option to support byte-range reading of remote datasets
2024-01-18 06:07:22 +08:00
option ( ENABLE_BYTERANGE "Enable byte-range access to remote datasets.." ON )
2023-06-13 05:23:44 +08:00
if ( NOT ENABLE_REMOTE_FUNCTIONALITY )
2024-01-18 06:07:22 +08:00
message ( WARNING "ENABLE_REMOTE_FUNCTIONALITY=NO => ENABLE_BYTERANGE=NO" )
set ( ENABLE_BYTERANGE OFF CACHE BOOL "ENABLE_REMOTE_FUNCTIONALITY=NO => ENABLE_BYTERANGE=NO" FORCE )
endif ( )
2023-06-11 04:08:04 +08:00
2012-10-03 04:56:46 +08:00
# Option to Enable DAP long tests, remote tests.
2024-01-18 06:07:22 +08:00
option ( ENABLE_DAP_REMOTE_TESTS "Enable DAP remote tests." ON )
option ( ENABLE_EXTERNAL_SERVER_TESTS "Enable external Server remote tests." OFF )
option ( ENABLE_DAP_LONG_TESTS "Enable DAP long tests." OFF )
2014-03-08 04:58:09 +08:00
2023-06-11 04:08:04 +08:00
if ( NOT ENABLE_DAP )
2024-01-18 06:07:22 +08:00
set ( ENABLE_DAP_REMOTE_TESTS OFF CACHE BOOL "" FORCE )
set ( ENABLE_EXTERNAL_SERVER_TESTS OFF CACHE BOOL "" FORCE )
set ( ENABLE_DAP_LONG_TESTS OFF CACHE BOOL "" FORCE )
endif ( )
2023-06-11 04:08:04 +08:00
2024-01-18 06:07:22 +08:00
set ( REMOTETESTSERVERS "remotetest.unidata.ucar.edu" CACHE STRING "test servers to use for remote test" )
set ( REMOTETESTSERVERS "remotetest.unidata.ucar.edu" CACHE STRING "test servers to use for remote test" )
2022-08-28 10:21:13 +08:00
Enhance/Fix filter support
re: Discussion https://github.com/Unidata/netcdf-c/discussions/2214
The primary change is to support so-called "standard filters".
A standard filter is one that is defined by the following
netcdf-c API:
````
int nc_def_var_XXX(int ncid, int varid, size_t nparams, unsigned* params);
int nc_inq_var_XXXX(int ncid, int varid, int* usefilterp, unsigned* params);
````
So for example, zstandard would be a standard filter by defining
the functions *nc_def_var_zstandard* and *nc_inq_var_zstandard*.
In order to define these functions, we need a new dispatch function:
````
int nc_inq_filter_avail(int ncid, unsigned filterid);
````
This function, combined with the existing filter API can be used
to implement arbitrary standard filters using a simple code pattern.
Note that I would have preferred that this function return a list
of all available filters, but HDF5 does not support that functionality.
So this PR implements the dispatch function and implements
the following standard functions:
+ bzip2
+ zstandard
+ blosc
Specific test cases are also provided for HDF5 and NCZarr.
Over time, other specific standard filters will be defined.
## Primary Changes
* Add nc_inq_filter_avail() to netcdf-c API.
* Add standard filter implementations to test use of *nc_inq_filter_avail*.
* Bump the dispatch table version number and add to all the relevant
dispatch tables (libsrc, libsrcp, etc).
* Create a program to invoke nc_inq_filter_avail so that it is accessible
to shell scripts.
* Cleanup szip support to properly support szip
when HDF5 is disabled. This involves detecting
libsz separately from testing if HDF5 supports szip.
* Integrate shuffle and fletcher32 into the existing
filter API. This means that, for example, nc_def_var_fletcher32
is now a wrapper around nc_def_var_filter.
* Extend the Codec defaulting to allow multiple default shared libraries.
## Misc. Changes
* Modify configure.ac/CMakeLists.txt to look for the relevant
libraries implementing standard filters.
* Modify libnetcdf.settings to list available standard filters
(including deflate and szip).
* Add CMake test modules to locate libbz2 and libzstd.
* Cleanup the HDF5 memory manager function use in the plugins.
* remove unused file include//ncfilter.h
* remove tests for the HDF5 memory operations e.g. H5allocate_memory.
* Add flag to ncdump to force use of _Filter instead of _Deflate
or _Shuffle or _Fletcher32. Used for testing.
2022-03-15 02:39:37 +08:00
# Locate some compressors
2024-01-18 06:07:22 +08:00
option ( ENABLE_FILTER_SZIP "Enable use of Szip compression library if it is available. Required if ENABLE_NCZARR is true." ON )
option ( ENABLE_FILTER_BZ2 "Enable use of Bz2 compression library if it is available." ON )
option ( ENABLE_FILTER_BLOSC "Enable use of blosc compression library if it is available." ON )
option ( ENABLE_FILTER_ZSTD "Enable use of Zstd compression library if it is available." ON )
Support installation of filters into user-specified location
re: https://github.com/Unidata/netcdf-c/issues/2294
Ed Hartnett suggested that the netcdf library installation process
be extended to install the standard filters into a user specified
location. The user can then set HDF5_PLUGIN_PATH to that location.
This PR provides that capability using:
````
configure option: --with-plugin-dir=<absolute directory path>
cmake option: -DPLUGIN_INSTALL_DIR=<absolute directory path>
````
Currently, the following plugins are always installed, if
available: bzip2, zstd, blosc.
If NCZarr is enabled, then additional plugins are installed:
fletcher32, shuffle, deflate, szip.
Additionally, the necessary codec support is installed
for each of the above filters that is installed.
## Changes:
1. Cleanup handling of built-in bzip2.
2. Add documentation to docs/filters.md
3. Re-factor the NCZarr codec libraries
4. Add a test, although it can only be exercised after
the library is installed, so it cannot be used during
normal testing.
5. Cleanup use of HDF5_PLUGIN_PATH in the filter test cases.
2022-04-30 04:31:55 +08:00
2022-05-25 10:05:19 +08:00
# If user wants, then install selected plugins (default on)
2024-01-18 06:07:22 +08:00
set ( PLUGIN_INSTALL_DIR "NO" CACHE STRING "Whether and where we should install plugins; defaults to yes" )
2022-06-26 06:47:49 +08:00
# This is ugly, but seems necessary because of CMake's boolean structure
2024-01-18 06:07:22 +08:00
set ( boolval FALSE )
if ( DEFINED PLUGIN_INSTALL_DIR )
2022-06-26 06:47:49 +08:00
booleanize ( ${ PLUGIN_INSTALL_DIR } boolval )
2024-01-18 06:07:22 +08:00
if ( boolval )
set ( ENABLE_PLUGIN_INSTALL YES )
2022-06-26 06:47:49 +08:00
# No actual value was specified
2024-02-02 03:37:06 +08:00
unset ( PLUGIN_INSTALL_DIR CACHE )
2024-01-18 06:07:22 +08:00
else ( )
if ( boolval STREQUAL "NOTFOUND" )
2022-06-26 06:47:49 +08:00
# Must be an actual value
2024-01-18 06:07:22 +08:00
set ( ENABLE_PLUGIN_INSTALL YES )
else ( )
set ( ENABLE_PLUGIN_INSTALL NO )
endif ( )
endif ( )
else ( )
set ( ENABLE_PLUGIN_INSTALL NO )
endif ( )
2022-05-20 12:00:40 +08:00
2022-06-26 06:47:49 +08:00
# Ensure no defined plugin dir if not enabled
2024-01-18 06:07:22 +08:00
if ( NOT ENABLE_PLUGIN_INSTALL )
2024-02-02 03:37:06 +08:00
unset ( PLUGIN_INSTALL_DIR CACHE )
2024-01-18 06:07:22 +08:00
endif ( )
2022-06-26 06:47:49 +08:00
2024-01-18 06:07:22 +08:00
if ( ENABLE_PLUGIN_INSTALL )
if ( NOT DEFINED PLUGIN_INSTALL_DIR )
2022-06-26 06:47:49 +08:00
# Default to HDF5_PLUGIN_PATH or its default directories
2024-01-18 06:07:22 +08:00
if ( DEFINED ENV{HDF5_PLUGIN_PATH} )
set ( PLUGIN_INSTALL_DIR "$ENV{HDF5_PLUGIN_PATH}" )
else ( )
if ( ISMSVC OR ISMINGW )
set ( PLUGIN_INSTALL_DIR "$ENV{ALLUSERSPROFILE}\\hdf5\\lib\\plugin" )
else ( )
set ( PLUGIN_INSTALL_DIR "/usr/local/hdf5/lib/plugin" )
endif ( )
endif ( )
message ( "Defaulting to -DPLUGIN_INSTALL_DIR=${PLUGIN_INSTALL_DIR}" )
endif ( )
endif ( )
if ( ENABLE_PLUGIN_INSTALL )
2022-05-25 10:05:19 +08:00
# Use the lowest priority dir in the path
2024-01-18 06:07:22 +08:00
if ( NOT ISMSVC AND NOT ISMINGW )
string ( REPLACE ":" ";" PATH_LIST ${ PLUGIN_INSTALL_DIR } )
else ( )
set ( PATH_LIST ${ PLUGIN_INSTALL_DIR } )
endif ( )
2022-05-20 12:00:40 +08:00
2022-05-25 10:05:19 +08:00
# Get last element
2024-01-18 06:07:22 +08:00
list ( GET PATH_LIST -1 PLUGIN_INSTALL_DIR )
set ( PLUGIN_INSTALL_DIR_SETTING "${PLUGIN_INSTALL_DIR}" )
message ( "Final value of-DPLUGIN_INSTALL_DIR=${PLUGIN_INSTALL_DIR}" )
else ( ) # No option specified
2024-02-02 03:37:06 +08:00
unset ( PLUGIN_INSTALL_DIR )
unset ( PLUGIN_INSTALL_DIR CACHE )
2024-01-18 06:07:22 +08:00
set ( PLUGIN_INSTALL_DIR_SETTING "N.A." )
endif ( )
2021-09-03 07:04:26 +08:00
2023-01-19 16:37:24 +08:00
# Try to enable NCZarr zip support
2024-01-18 06:07:22 +08:00
option ( ENABLE_NCZARR_ZIP "Enable NCZarr ZIP support." OFF )
2021-01-29 11:11:01 +08:00
2021-09-03 07:04:26 +08:00
# libdl is always available; built-in in Windows and OSX
2024-01-18 06:07:22 +08:00
option ( ENABLE_PLUGINS "Enable dynamically loaded plugins (default on)." ON )
if ( MINGW )
set ( ENABLE_PLUGINS OFF CACHE BOOL "Disable plugins" FORCE )
else ( )
if ( NOT WIN32 )
if ( HAVE_DLFCN_H )
include_directories ( "dlfcn.h" )
endif ( )
endif ( )
endif ( )
if ( ENABLE_PLUGINS )
set ( USEPLUGINS yes )
endif ( )
2021-09-03 07:04:26 +08:00
2013-01-16 06:43:09 +08:00
# Enable some developer-only tests
2024-01-18 06:07:22 +08:00
option ( ENABLE_EXTRA_TESTS "Enable Extra tests. Some may not work because of known issues. Developers only." OFF )
if ( ENABLE_EXTRA_TESTS )
set ( EXTRA_TESTS ON )
endif ( )
2013-01-16 06:43:09 +08:00
2012-10-03 04:56:46 +08:00
# Option to use bundled XGetopt in place of getopt(). This is mostly useful
2020-05-19 09:36:28 +08:00
# for MSVC builds. If not building utilities or some tests,
# getopt() isn't required at all.
2024-01-18 06:07:22 +08:00
if ( MSVC )
option ( ENABLE_XGETOPT "Enable bundled XGetOpt instead of external getopt()." ON )
if ( ENABLE_XGETOPT )
set ( USE_X_GETOPT ON CACHE BOOL "" )
endif ( )
endif ( )
2012-10-03 04:56:46 +08:00
2024-01-18 06:07:22 +08:00
set ( MATH "" )
if ( NOT WIN32 )
2015-10-10 03:06:32 +08:00
# STDIO instead of posixio.
2024-01-18 06:07:22 +08:00
option ( ENABLE_STDIO "If true, use stdio instead of posixio (ex. on the Cray)" OFF )
if ( ENABLE_STDIO )
set ( USE_STDIO ON CACHE BOOL "" )
endif ( )
2015-10-10 03:06:32 +08:00
2014-03-07 23:46:26 +08:00
# FFIO insteaad of PosixIO
2024-01-18 06:07:22 +08:00
option ( ENABLE_FFIO "If true, use ffio instead of posixio" OFF )
if ( ENABLE_FFIO )
set ( USE_FFIO ON CACHE BOOL "" )
endif ( )
endif ( )
2012-10-03 04:56:46 +08:00
2023-03-03 10:51:02 +08:00
# Options for S3 Support
2024-01-18 06:07:22 +08:00
option ( ENABLE_S3 "Enable S3 support." OFF )
option ( ENABLE_S3_INTERNAL "Enable S3 Internal support." OFF )
option ( ENABLE_NCZARR_S3 "Enable NCZarr S3 support; Deprecated in favor of ENABLE_S3" OFF )
2023-05-10 11:13:49 +08:00
2024-01-18 06:07:22 +08:00
if ( NOT ENABLE_REMOTE_FUNCTIONALITY )
set ( ENABLE_S3 OFF CACHE BOOL "" FORCE )
set ( ENABLE_S3_INTERNAL OFF CACHE BOOL "" FORCE )
set ( ENABLE_NCZARR_S3 OFF CACHE BOOL "" FORCE )
endif ( )
2023-06-11 04:08:04 +08:00
2023-05-10 11:13:49 +08:00
# Control S3 Testing: Multi-valued option
2024-01-18 06:07:22 +08:00
set ( WITH_S3_TESTING OFF CACHE STRING "Control S3 Testing: ON (i.e. all) OFF (i.e. none) PUBLIC" )
2023-05-10 11:13:49 +08:00
SET_PROPERTY ( CACHE WITH_S3_TESTING PROPERTY STRINGS ON OFF PUBLIC ) #
2024-01-18 06:07:22 +08:00
if ( WITH_S3_TESTING STREQUAL "" )
set ( WITH_S3_TESTING OFF CACHE STRING "" ) # Default
endif ( )
2023-05-10 11:13:49 +08:00
2024-01-18 06:07:22 +08:00
if ( WITH_S3_TESTING )
2023-05-10 11:13:49 +08:00
message ( WARNING "**** DO NOT USE WITH_S3_TESTING=ON UNLESS YOU HAVE ACCESS TO THE UNIDATA S3 BUCKET! ***" )
2024-01-18 06:07:22 +08:00
endif ( )
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
2023-03-03 10:51:02 +08:00
# ENABLE_NCZARR_S3 is now an alias for ENABLE_S3 (but...)
if ( NOT ENABLE_S3 AND ENABLE_NCZARR_S3 )
2024-01-18 06:07:22 +08:00
set ( ENABLE_S3 ON CACHE BOOL "NCARR S3" FORCE ) # For back compatibility
endif ( )
2024-02-02 03:37:06 +08:00
unset ( ENABLE_NCZARR_S3 )
2024-01-18 06:07:22 +08:00
if ( NOT ENABLE_REMOTE_FUNCTIONALITY )
message ( WARNING "ENABLE_REMOTE_FUNCTIONALITY=NO => disable all s3 functionality" )
set ( ENABLE_S3 OFF CACHE BOOL "" FORCE )
set ( ENABLE_S3_INTERNAL OFF CACHE BOOL "" FORCE )
set ( ENABLE_NCZARR_S3 OFF CACHE BOOL "" FORCE )
set ( ENABLE_HDF5_ROS3 OFF CACHE BOOL "Use ROS3" FORCE )
set ( WITH_S3_TESTING OFF CACHE STRING "" FORCE )
endif ( )
if ( ENABLE_S3 )
if ( NOT ENABLE_S3_AWS AND NOT ENABLE_S3_INTERNAL )
2023-03-03 10:51:02 +08:00
message ( FATAL_ERROR "S3 support library not found; please specify option -DENABLE_S3=NO" )
2024-01-18 06:07:22 +08:00
set ( ENABLE_S3 OFF CACHE BOOL "S3 support" FORCE )
endif ( )
if ( ENABLE_S3_AWS AND ENABLE_S3_INTERNAL )
2023-04-26 07:15:06 +08:00
message ( WARNING "Both aws-sdk-cpp and s3-internal enabled => use s3-internal" )
2024-01-18 06:07:22 +08:00
set ( ENABLE_S3_AWS OFF CACHE BOOL "S3 AWS" FORCE )
endif ( )
endif ( )
2020-08-18 05:51:01 +08:00
2024-01-18 06:07:22 +08:00
if ( NOT ENABLE_S3 )
if ( WITH_S3_TESTING STREQUAL "PUBLIC" OR WITH_S3_TESTING )
2023-08-30 01:15:15 +08:00
message ( WARNING "S3 support is disabled => WITH_S3_TESTING=OFF" )
2024-01-18 06:07:22 +08:00
set ( WITH_S3_TESTING OFF CACHE STRING "" FORCE )
endif ( )
endif ( )
2021-01-29 11:11:01 +08:00
2024-01-18 06:07:22 +08:00
option ( ENABLE_LIBXML2 "Link against libxml2 if it is available, use the packaged tinyxml2 parser otherwise." ON )
set ( XMLPARSER "tinyxml2 (bundled)" )
2021-12-23 12:04:40 +08:00
2024-01-18 06:07:22 +08:00
if ( NOT ENABLE_BYTERANGE AND ENABLE_HDF5_ROS3 )
message ( WARNING "ROS3 support requires ENABLE_BYTERANGE=TRUE; disabling ROS3 support" )
set ( ENABLE_HDF5_ROS3 OFF CACHE BOOL "ROS3 support" FORCE )
endif ( )
2021-01-29 11:11:01 +08:00
2015-04-30 01:04:26 +08:00
##
2012-10-03 04:56:46 +08:00
# Enable Tests
2015-04-30 01:04:26 +08:00
##
2024-01-18 06:07:22 +08:00
option ( ENABLE_TESTS "Enable basic tests, run with 'make test'." ON )
if ( ENABLE_TESTS )
set ( BUILD_TESTSETS ON CACHE BOOL "" )
2014-04-22 01:15:33 +08:00
# Options for CTest-based tests, dashboards.
2024-01-18 06:07:22 +08:00
set ( NC_CTEST_PROJECT_NAME "netcdf-c" CACHE STRING "Project Name for CTest-based testing purposes." )
set ( NC_CTEST_DROP_SITE "cdash.unidata.ucar.edu:443" CACHE STRING "Dashboard location for CTest-based testing purposes." )
set ( NC_CTEST_DROP_LOC_PREFIX "" CACHE STRING "Prefix for Dashboard location on remote server when using CTest-based testing." )
set ( SUBMIT_URL "https://cdash.unidata.ucar.edu:443" )
find_program ( HOSTNAME_CMD NAMES hostname )
if ( NOT WIN32 )
set ( HOSTNAME_ARG "-s" )
endif ( )
if ( HOSTNAME_CMD )
execute_process ( COMMAND ${ HOSTNAME_CMD } "${HOSTNAME_ARG}" OUTPUT_VARIABLE HOSTNAME OUTPUT_STRIP_TRAILING_WHITESPACE )
set ( NC_CTEST_SITE "${HOSTNAME}" CACHE STRING "Hostname of test machine." )
endif ( )
if ( NC_CTEST_SITE )
set ( SITE "${NC_CTEST_SITE}" CACHE STRING "" )
endif ( )
2015-02-03 06:14:22 +08:00
2015-04-30 01:04:26 +08:00
###
# This option dictates whether or not to turn on
# tests which are known to fail. This is not the
# same thing as an 'expected failure'. Rather, these
# are tests that will need to be fixed eventually.
#
2019-09-18 10:27:43 +08:00
# By placing them here, we can occasionally turn this
2015-04-30 01:04:26 +08:00
# flag on and see if any known failures have been
# fixed in the course of code improvement/other bug
# fixes.
#
# To use this, simply add as a fencepost around tests
# which are known to fail.
###
2024-01-18 06:07:22 +08:00
option ( ENABLE_FAILING_TESTS "Run tests which are known to fail, check to see if any have been fixed." OFF )
2015-04-30 01:04:26 +08:00
2019-08-22 04:50:09 +08:00
###
2019-11-16 22:20:45 +08:00
# Option to turn on unit testing. See
2020-12-07 09:19:53 +08:00
# https://github.com/Unidata/netcdf-c/pull/1472 for more information.
2019-08-22 04:50:09 +08:00
###
2024-01-18 06:07:22 +08:00
option ( ENABLE_UNIT_TESTS "Run Unit Tests." ON )
2023-02-17 02:20:45 +08:00
###
# Option to turn on performance testing.
# See https://github.com/Unidata/netcdf-c/issues/2627 for more information.
###
2024-01-18 06:07:22 +08:00
option ( ENABLE_BENCHMARKS "Run benchmark Tests." OFF )
2023-02-17 07:41:19 +08:00
2020-12-07 09:19:53 +08:00
2015-04-30 01:04:26 +08:00
###
# End known-failures.
###
MARK_AS_ADVANCED ( ENABLE_FAILING_TESTS )
2024-01-18 06:07:22 +08:00
endif ( )
2012-10-03 04:56:46 +08:00
2016-09-28 01:20:41 +08:00
###
# Option to enable extreme numbers during testing.
###
2024-01-18 06:07:22 +08:00
option ( ENABLE_EXTREME_NUMBERS "Enable extreme numbers during testing, such as MAX_INT-1" ON )
if ( ENABLE_EXTREME_NUMBERS )
set ( USE_EXTREME_NUMBERS ON )
endif ( )
2016-09-28 01:20:41 +08:00
2012-10-03 04:56:46 +08:00
# Enable Large file tests
2024-01-18 06:07:22 +08:00
if ( ENABLE_LARGE_FILE_TESTS )
set ( LARGE_FILE_TESTS ON )
endif ( )
2012-10-03 04:56:46 +08:00
2024-01-18 06:07:22 +08:00
option ( ENABLE_METADATA_PERF_TESTS "Enable test of metadata performance." OFF )
if ( ENABLE_METADATA_PERF_TESTS )
set ( ENABLE_METADATA_PERF ON )
endif ( )
2018-07-10 23:25:09 +08:00
2012-12-06 02:35:42 +08:00
# Location for large file tests.
2024-01-18 06:07:22 +08:00
set ( TEMP_LARGE "." CACHE STRING "Location to store large file tests." )
2012-12-06 02:35:42 +08:00
2024-01-18 06:07:22 +08:00
option ( ENABLE_FSYNC "Enable experimental fsync code." OFF )
if ( ENABLE_FSYNC )
set ( USE_FSYNC ON )
endif ( )
2012-10-03 04:56:46 +08:00
2014-06-19 04:33:49 +08:00
# Temporary
2014-04-10 05:20:16 +08:00
OPTION ( ENABLE_JNA "Enable jna bug fix code." OFF )
2024-01-18 06:07:22 +08:00
if ( ENABLE_JNA )
set ( JNA ON )
endif ( )
2014-04-10 05:20:16 +08:00
2012-12-21 05:50:45 +08:00
# Linux specific large file support flags.
# Modelled after check in CMakeLists.txt for hdf5.
2024-01-18 06:07:22 +08:00
option ( ENABLE_LARGE_FILE_SUPPORT "Enable large file support." ON )
if ( ENABLE_LARGE_FILE_SUPPORT )
if ( MSVC )
set ( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LARGEADDRESSAWARE" )
set ( CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /LARGEADDRESSAWARE" )
set ( CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /LARGEADDRESSAWARE" )
else ( )
set ( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64" )
endif ( )
endif ( )
option ( ENABLE_EXAMPLE_TESTS "Run extra example tests. Requires GNU Sed. Ignored if HDF5 is not Enabled" OFF )
if ( NOT ENABLE_HDF5 AND ENABLE_EXAMPLE_TESTS )
set ( ENABLE_EXAMPLE_TESTS OFF )
endif ( )
2012-10-03 04:56:46 +08:00
2024-01-17 01:20:43 +08:00
##################################
# Dependencies
##################################
include ( cmake/dependencies.cmake )
################################
# End Dependencies
################################
2018-08-16 21:33:04 +08:00
# Enable Parallel IO with netCDF-4/HDF5 files using HDF5 parallel I/O.
2024-01-18 06:07:22 +08:00
set ( STATUS_PARALLEL "OFF" )
2023-01-25 04:07:39 +08:00
set ( IMPORT_MPI "" )
2024-01-18 06:07:22 +08:00
option ( ENABLE_PARALLEL4 "Build netCDF-4 with parallel IO" "${HDF5_PARALLEL}" )
if ( ENABLE_PARALLEL4 AND ENABLE_HDF5 )
if ( NOT HDF5_PARALLEL )
set ( USE_PARALLEL OFF CACHE BOOL "" )
message ( STATUS "Cannot find HDF5 library built with parallel support. Disabling parallel build." )
else ( )
set ( HDF5_PARALLEL ON CACHE BOOL "" )
set ( USE_PARALLEL ON CACHE BOOL "" )
set ( USE_PARALLEL4 ON CACHE BOOL "" )
set ( STATUS_PARALLEL "ON" )
2018-08-16 21:33:04 +08:00
configure_file ( "${netCDF_SOURCE_DIR}/nc_test4/run_par_test.sh.in"
2018-08-17 22:52:34 +08:00
" $ { n e t C D F _ B I N A R Y _ D I R } / t m p / r u n _ p a r _ t e s t . s h " @ O N L Y N E W L I N E _ S T Y L E L F )
2024-01-18 06:07:22 +08:00
file ( COPY "${netCDF_BINARY_DIR}/tmp/run_par_test.sh"
2018-08-17 22:52:34 +08:00
D E S T I N A T I O N $ { n e t C D F _ B I N A R Y _ D I R } / n c _ t e s t 4
F I L E _ P E R M I S S I O N S O W N E R _ R E A D O W N E R _ W R I T E O W N E R _ E X E C U T E G R O U P _ R E A D G R O U P _ E X E C U T E W O R L D _ R E A D W O R L D _ E X E C U T E )
2020-02-28 05:06:45 +08:00
configure_file ( "${netCDF_SOURCE_DIR}/h5_test/run_par_tests.sh.in"
" $ { n e t C D F _ B I N A R Y _ D I R } / t m p / r u n _ p a r _ t e s t s . s h " @ O N L Y N E W L I N E _ S T Y L E L F )
2024-01-18 06:07:22 +08:00
file ( COPY "${netCDF_BINARY_DIR}/tmp/run_par_tests.sh"
2020-02-28 05:06:45 +08:00
D E S T I N A T I O N $ { n e t C D F _ B I N A R Y _ D I R } / h 5 _ t e s t
F I L E _ P E R M I S S I O N S O W N E R _ R E A D O W N E R _ W R I T E O W N E R _ E X E C U T E G R O U P _ R E A D G R O U P _ E X E C U T E W O R L D _ R E A D W O R L D _ E X E C U T E )
2023-01-25 04:07:39 +08:00
set ( IMPORT_MPI "include(CMakeFindDependencyMacro)\nfind_dependency(MPI COMPONENTS C)" )
2024-01-18 06:07:22 +08:00
endif ( )
endif ( )
2013-02-07 07:09:19 +08:00
2018-09-21 00:45:25 +08:00
# Options to enable parallel IO for classic formats with PnetCDF library.
2024-01-18 06:07:22 +08:00
set ( STATUS_PNETCDF "OFF" )
if ( ENABLE_PNETCDF )
2014-03-07 23:46:26 +08:00
# Check for ncmpi_create in libpnetcdf, define USE_PNETCDF
2018-09-18 00:47:40 +08:00
# Does the user want to turn on PnetCDF read ability?
2024-01-18 06:07:22 +08:00
set ( USE_PNETCDF ON CACHE BOOL "" )
if ( NOT PNETCDF )
message ( STATUS "Cannot find PnetCDF library. Disabling PnetCDF support." )
set ( USE_PNETCDF OFF CACHE BOOL "" )
else ( NOT PNETCDF )
set ( USE_PARALLEL ON CACHE BOOL "" )
2016-02-09 22:23:09 +08:00
2016-11-13 13:58:09 +08:00
# Check PnetCDF version. Must be >= 1.6.0
2016-02-09 22:23:09 +08:00
set ( pnetcdf_h "${PNETCDF_INCLUDE_DIR}/pnetcdf.h" )
2016-11-13 13:58:09 +08:00
message ( STATUS "PnetCDF include file ${pnetcdf_h} will be searched for version" )
2016-02-09 22:23:09 +08:00
file ( STRINGS "${pnetcdf_h}" pnetcdf_major_string REGEX "^#define PNETCDF_VERSION_MAJOR" )
string ( REGEX REPLACE "[^0-9]" "" pnetcdf_major "${pnetcdf_major_string}" )
file ( STRINGS "${pnetcdf_h}" pnetcdf_minor_string REGEX "^#define PNETCDF_VERSION_MINOR" )
string ( REGEX REPLACE "[^0-9]" "" pnetcdf_minor "${pnetcdf_minor_string}" )
file ( STRINGS "${pnetcdf_h}" pnetcdf_sub_string REGEX "^#define PNETCDF_VERSION_SUB" )
string ( REGEX REPLACE "[^0-9]" "" pnetcdf_sub "${pnetcdf_sub_string}" )
set ( pnetcdf_version "${pnetcdf_major}.${pnetcdf_minor}.${pnetcdf_sub}" )
2016-11-13 13:58:09 +08:00
message ( STATUS "Found PnetCDF version ${pnetcdf_version}" )
2016-02-09 22:23:09 +08:00
2016-02-10 02:23:33 +08:00
if ( ${ pnetcdf_version } VERSION_GREATER "1.6.0" )
2024-01-18 06:07:22 +08:00
set ( STATUS_PNETCDF "ON" )
include_directories ( ${ PNETCDF_INCLUDE_DIR } )
set ( HAVE_LIBPNETCDF ON )
2018-09-18 00:47:40 +08:00
# PnetCDF => parallel
2024-01-18 06:07:22 +08:00
set ( STATUS_PARALLEL ON )
set ( USE_PARALLEL ON )
message ( STATUS "Using PnetCDF Library: ${PNETCDF}" )
else ( )
message ( WARNING "ENABLE_PNETCDF requires version 1.6.1 or later; found version ${pnetcdf_version}. PnetCDF is disabled" )
endif ( )
2022-06-29 04:56:38 +08:00
###
# Generate pnetcdf test.
###
2024-01-18 06:07:22 +08:00
configure_file ( "${netCDF_SOURCE_DIR}/nc_test/run_pnetcdf_tests.sh.in"
2022-06-29 04:56:38 +08:00
" $ { n e t C D F _ B I N A R Y _ D I R } / n c _ t e s t / r u n _ p n e t c d f _ t e s t s . s h " )
2024-01-18 06:07:22 +08:00
endif ( NOT PNETCDF )
endif ( )
2013-02-07 07:09:19 +08:00
2018-03-16 22:38:40 +08:00
# Options to enable use of fill values for elements causing NC_ERANGE
2024-01-18 06:07:22 +08:00
set ( ENABLE_ERANGE_FILL AUTO CACHE STRING "AUTO" )
option ( ENABLE_ERANGE_FILL "Enable use of fill value when out-of-range type conversion causes NC_ERANGE error." OFF )
if ( ENABLE_ERANGE_FILL ) # enable or auto
string ( TOUPPER ${ ENABLE_ERANGE_FILL } ENABLE_ERANGE_FILL )
if ( ENABLE_ERANGE_FILL AND NOT ENABLE_ERANGE_FILL STREQUAL "AUTO" )
2018-07-17 03:34:22 +08:00
# explicitly enabled
2024-01-18 06:07:22 +08:00
set ( ENABLE_ERANGE_FILL ON )
else ( )
if ( NOT ENABLE_ERANGE_FILL STREQUAL "AUTO" )
set ( ENABLE_ERANGE_FILL OFF )
endif ( )
endif ( )
endif ( ENABLE_ERANGE_FILL )
2018-07-17 03:34:22 +08:00
# Now ENABLE_ERANGE_FILL is either AUTO, ON, or OFF
2016-11-15 00:41:10 +08:00
2019-11-26 21:59:03 +08:00
# More relaxed coordinate check is now mandatory for all builds.
2024-01-18 06:07:22 +08:00
set ( ENABLE_ZERO_LENGTH_COORD_BOUND ON )
2016-11-15 00:41:10 +08:00
# check and conform with PnetCDF settings on ERANGE_FILL and RELAX_COORD_BOUND
2024-01-18 06:07:22 +08:00
if ( STATUS_PNETCDF )
2016-11-15 00:41:10 +08:00
file ( STRINGS "${pnetcdf_h}" enable_erange_fill_pnetcdf REGEX "^#define PNETCDF_ERANGE_FILL" )
string ( REGEX REPLACE "[^0-9]" "" erange_fill_pnetcdf "${enable_erange_fill_pnetcdf}" )
2024-01-18 06:07:22 +08:00
if ( "x${erange_fill_pnetcdf}" STREQUAL "x1" )
set ( erange_fill_pnetcdf "ON" )
else ( )
set ( erange_fill_pnetcdf "OFF" )
endif ( )
if ( ENABLE_ERANGE_FILL STREQUAL "AUTO" ) # not set on command line
set ( ENABLE_ERANGE_FILL "${erange_fill_pnetcdf}" )
else ( )
2018-07-17 03:34:22 +08:00
# user explicitly set this option on command line
2024-01-18 06:07:22 +08:00
if ( NOT ENABLE_ERANGE_FILL STREQUAL "${erange_fill_pnetcdf}" )
if ( ENABLE_ERANGE_FILL )
message ( FATAL_ERROR "Enabling erange-fill conflicts with PnetCDF setting" )
else ( )
message ( FATAL_ERROR "Disabling erange-fill conflicts with PnetCDF setting" )
endif ( )
endif ( )
endif ( )
2018-07-17 03:34:22 +08:00
2016-11-13 13:58:09 +08:00
file ( STRINGS "${pnetcdf_h}" relax_coord_bound_pnetcdf REGEX "^#define PNETCDF_RELAX_COORD_BOUND" )
string ( REGEX REPLACE "[^0-9]" "" relax_coord_bound "${relax_coord_bound_pnetcdf}" )
2024-01-18 06:07:22 +08:00
if ( "x${relax_coord_bound}" STREQUAL "x1" )
set ( relax_coord_bound_pnetcdf "ON" )
else ( )
set ( relax_coord_bound_pnetcdf "OFF" )
endif ( )
2019-11-26 21:59:03 +08:00
# pnetcdf must have relaxed coord bounds to build with netCDF-4
2024-01-18 06:07:22 +08:00
if ( NOT ENABLE_ZERO_LENGTH_COORD_BOUND STREQUAL "${relax_coord_bound_pnetcdf}" )
message ( FATAL_ERROR "Pnetcdf must be built with relax-coord-bound enabled" )
endif ( )
endif ( )
2016-11-15 00:41:10 +08:00
2024-01-18 06:07:22 +08:00
if ( ENABLE_ERANGE_FILL )
message ( STATUS "Enabling use of fill value when NC_ERANGE" )
set ( M4FLAGS "-DERANGE_FILL" CACHE STRING "" )
endif ( )
2016-11-15 00:41:10 +08:00
2024-01-18 06:07:22 +08:00
if ( ENABLE_ZERO_LENGTH_COORD_BOUND )
message ( STATUS "Enabling a more relaxed check for NC_EINVALCOORDS" )
2024-01-27 04:28:16 +08:00
target_compile_definitions ( netcdf PRIVATE -DRELAX_COORD_BOUND )
2024-01-18 06:07:22 +08:00
endif ( )
2016-11-13 13:58:09 +08:00
2013-10-01 05:51:34 +08:00
# Enable Parallel Tests.
2024-01-18 06:07:22 +08:00
option ( ENABLE_PARALLEL_TESTS "Enable Parallel IO Tests. Requires HDF5/NetCDF4 with parallel I/O Support." "${HDF5_PARALLEL}" )
if ( ENABLE_PARALLEL_TESTS AND USE_PARALLEL )
set ( TEST_PARALLEL ON CACHE BOOL "" )
if ( USE_NETCDF4 )
set ( TEST_PARALLEL4 ON CACHE BOOL "" )
endif ( )
endif ( )
2018-02-27 08:16:15 +08:00
IF ( ENABLE_PARALLEL_TESTS AND NOT USE_PARALLEL )
2024-01-18 06:07:22 +08:00
message ( FATAL_ERROR "Parallel tests requested, but no parallel HDF5 installation detected." )
endif ( )
2012-10-03 04:56:46 +08:00
2017-11-24 01:55:24 +08:00
# Enable special filter test; experimental when using cmake.
2024-01-18 06:07:22 +08:00
option ( ENABLE_FILTER_TESTING "Enable filter testing. Ignored if shared libraries or netCDF4 are not enabled" ON )
2021-09-03 07:04:26 +08:00
2024-01-18 06:07:22 +08:00
if ( ENABLE_FILTER_TESTING )
2021-09-03 07:04:26 +08:00
if ( NOT ENABLE_HDF5 AND NOT ENABLE_NCZARR )
2024-01-18 06:07:22 +08:00
message ( WARNING "ENABLE_FILTER_TESTING requires HDF5 and/or NCZarr. Disabling." )
set ( ENABLE_FILTER_TESTING OFF CACHE BOOL "Enable Filter Testing" FORCE )
endif ( )
endif ( )
2021-09-03 07:04:26 +08:00
2024-01-18 06:07:22 +08:00
if ( NOT BUILD_SHARED_LIBS )
message ( WARNING "ENABLE_FILTER_TESTING requires shared libraries. Disabling." )
set ( ENABLE_FILTER_TESTING OFF )
endif ( )
2020-05-11 23:42:31 +08:00
2024-01-18 06:07:22 +08:00
option ( ENABLE_NCZARR_FILTERS "Enable NCZarr filters" ON )
option ( ENABLE_NCZARR_FILTERS_TESTING "Enable NCZarr filter testing." ON )
Enhance/Fix filter support
re: Discussion https://github.com/Unidata/netcdf-c/discussions/2214
The primary change is to support so-called "standard filters".
A standard filter is one that is defined by the following
netcdf-c API:
````
int nc_def_var_XXX(int ncid, int varid, size_t nparams, unsigned* params);
int nc_inq_var_XXXX(int ncid, int varid, int* usefilterp, unsigned* params);
````
So for example, zstandard would be a standard filter by defining
the functions *nc_def_var_zstandard* and *nc_inq_var_zstandard*.
In order to define these functions, we need a new dispatch function:
````
int nc_inq_filter_avail(int ncid, unsigned filterid);
````
This function, combined with the existing filter API can be used
to implement arbitrary standard filters using a simple code pattern.
Note that I would have preferred that this function return a list
of all available filters, but HDF5 does not support that functionality.
So this PR implements the dispatch function and implements
the following standard functions:
+ bzip2
+ zstandard
+ blosc
Specific test cases are also provided for HDF5 and NCZarr.
Over time, other specific standard filters will be defined.
## Primary Changes
* Add nc_inq_filter_avail() to netcdf-c API.
* Add standard filter implementations to test use of *nc_inq_filter_avail*.
* Bump the dispatch table version number and add to all the relevant
dispatch tables (libsrc, libsrcp, etc).
* Create a program to invoke nc_inq_filter_avail so that it is accessible
to shell scripts.
* Cleanup szip support to properly support szip
when HDF5 is disabled. This involves detecting
libsz separately from testing if HDF5 supports szip.
* Integrate shuffle and fletcher32 into the existing
filter API. This means that, for example, nc_def_var_fletcher32
is now a wrapper around nc_def_var_filter.
* Extend the Codec defaulting to allow multiple default shared libraries.
## Misc. Changes
* Modify configure.ac/CMakeLists.txt to look for the relevant
libraries implementing standard filters.
* Modify libnetcdf.settings to list available standard filters
(including deflate and szip).
* Add CMake test modules to locate libbz2 and libzstd.
* Cleanup the HDF5 memory manager function use in the plugins.
* remove unused file include//ncfilter.h
* remove tests for the HDF5 memory operations e.g. H5allocate_memory.
* Add flag to ncdump to force use of _Filter instead of _Deflate
or _Shuffle or _Fletcher32. Used for testing.
2022-03-15 02:39:37 +08:00
# Constraints
2022-02-08 00:14:57 +08:00
IF ( NOT ENABLE_PLUGINS )
2024-01-18 06:07:22 +08:00
message ( WARNING "ENABLE_FILTER_TESTING requires shared libraries. Disabling." )
set ( ENABLE_NCZARR_FILTERS OFF CACHE BOOL "Enable NCZarr Filters." FORCE )
endif ( )
Enhance/Fix filter support
re: Discussion https://github.com/Unidata/netcdf-c/discussions/2214
The primary change is to support so-called "standard filters".
A standard filter is one that is defined by the following
netcdf-c API:
````
int nc_def_var_XXX(int ncid, int varid, size_t nparams, unsigned* params);
int nc_inq_var_XXXX(int ncid, int varid, int* usefilterp, unsigned* params);
````
So for example, zstandard would be a standard filter by defining
the functions *nc_def_var_zstandard* and *nc_inq_var_zstandard*.
In order to define these functions, we need a new dispatch function:
````
int nc_inq_filter_avail(int ncid, unsigned filterid);
````
This function, combined with the existing filter API can be used
to implement arbitrary standard filters using a simple code pattern.
Note that I would have preferred that this function return a list
of all available filters, but HDF5 does not support that functionality.
So this PR implements the dispatch function and implements
the following standard functions:
+ bzip2
+ zstandard
+ blosc
Specific test cases are also provided for HDF5 and NCZarr.
Over time, other specific standard filters will be defined.
## Primary Changes
* Add nc_inq_filter_avail() to netcdf-c API.
* Add standard filter implementations to test use of *nc_inq_filter_avail*.
* Bump the dispatch table version number and add to all the relevant
dispatch tables (libsrc, libsrcp, etc).
* Create a program to invoke nc_inq_filter_avail so that it is accessible
to shell scripts.
* Cleanup szip support to properly support szip
when HDF5 is disabled. This involves detecting
libsz separately from testing if HDF5 supports szip.
* Integrate shuffle and fletcher32 into the existing
filter API. This means that, for example, nc_def_var_fletcher32
is now a wrapper around nc_def_var_filter.
* Extend the Codec defaulting to allow multiple default shared libraries.
## Misc. Changes
* Modify configure.ac/CMakeLists.txt to look for the relevant
libraries implementing standard filters.
* Modify libnetcdf.settings to list available standard filters
(including deflate and szip).
* Add CMake test modules to locate libbz2 and libzstd.
* Cleanup the HDF5 memory manager function use in the plugins.
* remove unused file include//ncfilter.h
* remove tests for the HDF5 memory operations e.g. H5allocate_memory.
* Add flag to ncdump to force use of _Filter instead of _Deflate
or _Shuffle or _Fletcher32. Used for testing.
2022-03-15 02:39:37 +08:00
IF ( NOT ENABLE_NCZARR )
2024-01-18 06:07:22 +08:00
message ( WARNING "ENABLE_NCZARR==NO => ENABLE_NCZARR_FILTERS==NO AND ENABLE_NCZARR_FILTER_TESTING==NO" )
set ( ENABLE_NCZARR_FILTERS OFF CACHE BOOL "Disable NCZARR_FILTERS" FORCE )
endif ( )
2021-12-24 13:18:56 +08:00
2022-02-08 00:14:57 +08:00
IF ( NOT ENABLE_NCZARR_FILTERS )
2024-01-18 06:07:22 +08:00
set ( ENABLE_NCZARR_FILTER_TESTING OFF CACHE BOOL "Enable NCZarr Filter Testing" FORCE )
endif ( )
2021-12-24 13:18:56 +08:00
2024-01-18 06:07:22 +08:00
set ( ENABLE_CLIENTSIDE_FILTERS OFF )
2017-11-24 01:55:24 +08:00
2013-10-01 05:51:34 +08:00
# Determine whether or not to generate documentation.
2024-01-18 06:07:22 +08:00
option ( ENABLE_DOXYGEN "Enable generation of doxygen-based documentation." OFF )
if ( ENABLE_DOXYGEN )
2014-07-16 16:25:27 +08:00
# Offer the option to build internal documentation.
2024-01-18 06:07:22 +08:00
option ( ENABLE_INTERNAL_DOCS "Build internal documentation. This is of interest to developers only." OFF )
if ( ENABLE_INTERNAL_DOCS )
set ( BUILD_INTERNAL_DOCS yes CACHE STRING "" )
else ( )
set ( BUILD_INTERNAL_DOCS no CACHE STRING "" )
endif ( )
2014-06-19 04:33:49 +08:00
2015-08-18 00:59:32 +08:00
###
#
# If we are building release documentation, we need to set some
# variables that will be used in the Doxygen.in template.
###
2024-01-18 06:07:22 +08:00
option ( ENABLE_DOXYGEN_BUILD_RELEASE_DOCS "Build release documentation. This is of interest only to the netCDF developers." OFF )
if ( ENABLE_DOXYGEN_BUILD_RELEASE_DOCS )
set ( DOXYGEN_CSS_FILE "${CMAKE_SOURCE_DIR}/docs/release.css" CACHE STRING "" )
set ( DOXYGEN_HEADER_FILE "${CMAKE_SOURCE_DIR}/docs/release_header.html" CACHE STRING "" )
set ( DOXYGEN_SEARCHENGINE "NO" CACHE STRING "" )
set ( ENABLE_DOXYGEN_SERVER_BASED_SEARCH NO CACHE STRING "" )
else ( )
set ( DOXYGEN_CSS_FILE "" CACHE STRING "" )
set ( DOXYGEN_HEADER_FILE "" CACHE STRING "" )
set ( DOXYGEN_SEARCHENGINE "YES" CACHE STRING "" )
2015-11-03 03:46:09 +08:00
# If not using release document configuration,
# provide an option for server-based search.
2024-01-18 06:07:22 +08:00
option ( ENABLE_DOXYGEN_SERVER_SIDE_SEARCH "Configure Doxygen with server-based search." OFF )
if ( ENABLE_DOXYGEN_SERVER_SIDE_SEARCH )
set ( DOXYGEN_SERVER_BASED_SEARCH "YES" CACHE STRING "" )
else ( )
set ( DOXYGEN_SERVER_BASED_SEARCH "NO" CACHE STRING "" )
endif ( ENABLE_DOXYGEN_SERVER_SIDE_SEARCH )
2015-11-03 03:46:09 +08:00
2024-01-18 06:07:22 +08:00
endif ( ENABLE_DOXYGEN_BUILD_RELEASE_DOCS )
2014-07-16 16:25:27 +08:00
# Option to turn on the TODO list in the doxygen-generated documentation.
2024-01-18 06:07:22 +08:00
option ( DOXYGEN_ENABLE_TASKS "Turn on test, todo, bug lists in documentation. This is of interest to developers only." OFF )
if ( DOXYGEN_ENABLE_TASKS )
set ( SHOW_DOXYGEN_TAG_LIST YES CACHE STRING "" )
else ( DOXYGEN_ENABLE_TASKS )
set ( SHOW_DOXYGEN_TODO_LIST NO CACHE STRING "" )
endif ( DOXYGEN_ENABLE_TASKS )
option ( ENABLE_DOXYGEN_PDF_OUTPUT "[EXPERIMENTAL] Turn on PDF output for Doxygen-generated documentation." OFF )
if ( ENABLE_DOXYGEN_PDF_OUTPUT )
set ( NC_ENABLE_DOXYGEN_PDF_OUTPUT "YES" CACHE STRING "" )
else ( )
set ( NC_ENABLE_DOXYGEN_PDF_OUTPUT "NO" CACHE STRING "" )
endif ( )
find_program ( NC_DOT NAMES dot )
2014-07-16 16:25:27 +08:00
# Specify whether or not 'dot' was found on the system path.
2024-01-18 06:07:22 +08:00
if ( NC_DOT )
set ( HAVE_DOT YES CACHE STRING "" )
else ( NC_DOT )
set ( HAVE_DOT NO CACHE STRING "" )
endif ( NC_DOT )
endif ( )
2012-10-03 04:56:46 +08:00
2018-05-12 05:30:19 +08:00
# Always enable DISKLESS
2024-01-18 06:07:22 +08:00
option ( ENABLE_DISKLESS "Enable in-memory files" ON )
2018-05-12 05:30:19 +08:00
2021-08-24 14:45:38 +08:00
# Always enable quantization.
2024-01-18 06:07:22 +08:00
option ( ENABLE_QUANTIZE "Enable variable quantization" ON )
2021-08-24 14:45:38 +08:00
2013-10-10 06:02:13 +08:00
# By default, MSVC has a stack size of 1000000.
# Allow a user to override this.
2024-01-18 06:07:22 +08:00
if ( MSVC )
set ( NC_MSVC_STACK_SIZE 40000000 CACHE STRING "Default stack size for MSVC-based projects." )
2013-10-10 06:05:50 +08:00
# By default, CMake sets the stack to 1000000.
# Remove this limitation.
# See here for more details:
# http://www.cmake.org/pipermail/cmake/2009-April/028710.html
2024-01-18 06:07:22 +08:00
set ( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:${NC_MSVC_STACK_SIZE}" )
set ( CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /STACK:${NC_MSVC_STACK_SIZE}" )
set ( CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /STACK:${NC_MSVC_STACK_SIZE}" )
endif ( )
2013-10-10 06:02:13 +08:00
2012-10-03 04:56:46 +08:00
# Set some of the options as advanced.
2015-11-03 03:58:29 +08:00
MARK_AS_ADVANCED ( ENABLE_INTERNAL_DOCS VALGRIND_TESTS ENABLE_COVERAGE_TESTS )
2022-08-28 10:21:13 +08:00
MARK_AS_ADVANCED ( ENABLE_DAP_REMOTE_TESTS ENABLE_DAP_LONG_TESTS USE_REMOTE_CDASH ENABLE_EXTERNAL_SERVER_TESTS )
2015-11-03 03:58:29 +08:00
MARK_AS_ADVANCED ( ENABLE_DOXYGEN_BUILD_RELEASE_DOCS DOXYGEN_ENABLE_TASKS ENABLE_DOXYGEN_SERVER_SIDE_SEARCH )
2016-03-08 05:04:52 +08:00
MARK_AS_ADVANCED ( ENABLE_SHARED_LIBRARY_VERSION )
2016-05-04 11:17:06 +08:00
2013-10-01 05:51:34 +08:00
################################
# Option checks
################################
2012-10-03 04:56:46 +08:00
# Library include checks
2024-01-18 06:07:22 +08:00
CHECK_INCLUDE_file ( "math.h" HAVE_MATH_H )
CHECK_INCLUDE_file ( "unistd.h" HAVE_UNISTD_H )
2013-01-16 06:43:09 +08:00
# Solve a compatibility issue in ncgen/, which checks
# for NO_UNISTD_H
2024-01-18 06:07:22 +08:00
if ( NOT HAVE_UNISTD_H )
set ( YY_NO_UNISTD_H TRUE )
endif ( )
CHECK_INCLUDE_file ( "alloca.h" HAVE_ALLOCA_H )
CHECK_INCLUDE_file ( "malloc.h" HAVE_MALLOC_H )
CHECK_INCLUDE_file ( "fcntl.h" HAVE_FCNTL_H )
CHECK_INCLUDE_file ( "getopt.h" HAVE_GETOPT_H )
CHECK_INCLUDE_file ( "locale.h" HAVE_LOCALE_H )
CHECK_INCLUDE_file ( "stdint.h" HAVE_STDINT_H )
CHECK_INCLUDE_file ( "stdio.h" HAVE_STDIO_H )
if ( MSVC )
CHECK_INCLUDE_file ( "io.h" HAVE_IO_H )
endif ( MSVC )
CHECK_INCLUDE_file ( "stdlib.h" HAVE_STDLIB_H )
CHECK_INCLUDE_file ( "ctype.h" HAVE_CTYPE_H )
CHECK_INCLUDE_file ( "stdarg.h" HAVE_STDARG_H )
CHECK_INCLUDE_file ( "strings.h" HAVE_STRINGS_H )
CHECK_INCLUDE_file ( "signal.h" HAVE_SIGNAL_H )
CHECK_INCLUDE_file ( "sys/param.h" HAVE_SYS_PARAM_H )
CHECK_INCLUDE_file ( "sys/stat.h" HAVE_SYS_STAT_H )
CHECK_INCLUDE_file ( "sys/time.h" HAVE_SYS_TIME_H )
CHECK_INCLUDE_file ( "sys/types.h" HAVE_SYS_TYPES_H )
CHECK_INCLUDE_file ( "sys/mman.h" HAVE_SYS_MMAN_H )
CHECK_INCLUDE_file ( "sys/resource.h" HAVE_SYS_RESOURCE_H )
CHECK_INCLUDE_file ( "fcntl.h" HAVE_FCNTL_H )
CHECK_INCLUDE_file ( "inttypes.h" HAVE_INTTYPES_H )
CHECK_INCLUDE_file ( "pstdint.h" HAVE_PSTDINT_H )
CHECK_INCLUDE_file ( "endian.h" HAVE_ENDIAN_H )
CHECK_INCLUDE_file ( "BaseTsd.h" HAVE_BASETSD_H )
CHECK_INCLUDE_file ( "stddef.h" HAVE_STDDEF_H )
CHECK_INCLUDE_file ( "string.h" HAVE_STRING_H )
CHECK_INCLUDE_file ( "winsock2.h" HAVE_WINSOCK2_H )
CHECK_INCLUDE_file ( "ftw.h" HAVE_FTW_H )
CHECK_INCLUDE_file ( "libgen.h" HAVE_LIBGEN_H )
CHECK_INCLUDE_file ( "execinfo.h" HAVE_EXECINFO_H )
CHECK_INCLUDE_file ( "dirent.h" HAVE_DIRENT_H )
CHECK_INCLUDE_file ( "time.h" HAVE_TIME_H )
CHECK_INCLUDE_file ( "dlfcn.h" HAVE_DLFCN_H )
2017-01-28 08:41:03 +08:00
# Symbol Exists
CHECK_SYMBOL_EXISTS ( isfinite "math.h" HAVE_DECL_ISFINITE )
CHECK_SYMBOL_EXISTS ( isnan "math.h" HAVE_DECL_ISNAN )
CHECK_SYMBOL_EXISTS ( isinf "math.h" HAVE_DECL_ISINF )
CHECK_SYMBOL_EXISTS ( st_blksize "sys/stat.h" HAVE_STRUCT_STAT_ST_BLKSIZE )
CHECK_SYMBOL_EXISTS ( alloca "alloca.h" HAVE_ALLOCA )
2017-01-31 05:54:00 +08:00
CHECK_SYMBOL_EXISTS ( snprintf "stdio.h" HAVE_SNPRINTF )
2012-10-03 04:56:46 +08:00
# Type checks
2017-04-04 11:39:44 +08:00
# Aliases for automake consistency
2024-01-18 06:07:22 +08:00
set ( SIZEOF_VOIDSTAR ${ CMAKE_SIZEOF_VOID_P } )
set ( SIZEOF_VOIDP ${ SIZEOF_VOIDSTAR } )
2014-03-29 07:11:26 +08:00
CHECK_TYPE_SIZE ( "char" SIZEOF_CHAR )
CHECK_TYPE_SIZE ( "double" SIZEOF_DOUBLE )
CHECK_TYPE_SIZE ( "float" SIZEOF_FLOAT )
CHECK_TYPE_SIZE ( "int" SIZEOF_INT )
2017-01-28 08:41:03 +08:00
CHECK_TYPE_SIZE ( "uint" SIZEOF_UINT )
2024-01-18 06:07:22 +08:00
if ( SIZEOF_UINT )
set ( HAVE_UINT TRUE )
endif ( SIZEOF_UINT )
2017-01-28 08:41:03 +08:00
2017-12-21 10:53:30 +08:00
CHECK_TYPE_SIZE ( "schar" SIZEOF_SCHAR )
2024-01-18 06:07:22 +08:00
if ( SIZEOF_SCHAR )
set ( HAVE_SCHAR TRUE )
endif ( SIZEOF_SCHAR )
2017-12-21 10:53:30 +08:00
2014-03-29 07:11:26 +08:00
CHECK_TYPE_SIZE ( "long" SIZEOF_LONG )
CHECK_TYPE_SIZE ( "long long" SIZEOF_LONG_LONG )
2024-01-18 06:07:22 +08:00
if ( SIZEOF_LONG_LONG )
set ( HAVE_LONG_LONG_INT TRUE )
endif ( SIZEOF_LONG_LONG )
2017-01-28 08:41:03 +08:00
CHECK_TYPE_SIZE ( "unsigned long long" SIZEOF_UNSIGNED_LONG_LONG )
2014-03-29 07:11:26 +08:00
CHECK_TYPE_SIZE ( "off_t" SIZEOF_OFF_T )
CHECK_TYPE_SIZE ( "off64_t" SIZEOF_OFF64_T )
CHECK_TYPE_SIZE ( "short" SIZEOF_SHORT )
2017-01-28 08:41:03 +08:00
CHECK_TYPE_SIZE ( "ushort" SIZEOF_USHORT )
2024-01-18 06:07:22 +08:00
if ( SIZEOF_USHORT )
set ( HAVE_USHORT TRUE )
endif ( SIZEOF_USHORT )
2017-01-28 08:41:03 +08:00
CHECK_TYPE_SIZE ( "_Bool" SIZEOF__BOOL )
2014-03-29 07:11:26 +08:00
CHECK_TYPE_SIZE ( "size_t" SIZEOF_SIZE_T )
2017-01-28 08:41:03 +08:00
2018-07-02 08:42:03 +08:00
# Check whether to turn on or off CDF5 support.
2024-01-18 06:07:22 +08:00
if ( SIZEOF_SIZE_T EQUAL 4 )
if ( ENABLE_CDF5 ) # enable or auto
string ( TOUPPER ${ ENABLE_CDF5 } ENABLE_CDF5 )
if ( ENABLE_CDF5 AND NOT ENABLE_CDF5 STREQUAL "AUTO" ) # explicitly enabled
message ( FATAL_ERROR "Unable to support CDF5 feature because size_t is less than 8 bytes" )
endif ( ENABLE_CDF5 AND NOT ENABLE_CDF5 STREQUAL "AUTO" )
set ( ENABLE_CDF5 OFF ) # cannot support CDF5
set ( USE_CDF5 OFF CACHE BOOL "" ) # cannot support CDF5
endif ( ENABLE_CDF5 )
else ( SIZEOF_SIZE_T EQUAL 4 )
if ( ENABLE_CDF5 ) # explicitly set by user or not set
set ( USE_CDF5 ON CACHE BOOL "" )
else ( ENABLE_CDF5 ) # explicitly disabled by user
set ( USE_CDF5 OFF CACHE BOOL "" )
endif ( ENABLE_CDF5 )
endif ( SIZEOF_SIZE_T EQUAL 4 )
2018-07-02 08:42:03 +08:00
2018-03-17 01:46:18 +08:00
CHECK_TYPE_SIZE ( "ssize_t" SIZEOF_SSIZE_T )
2024-01-18 06:07:22 +08:00
if ( SIZEOF_SSIZE_T )
set ( HAVE_SSIZE_T TRUE )
endif ( SIZEOF_SSIZE_T )
2018-03-17 01:46:18 +08:00
CHECK_TYPE_SIZE ( "ptrdiff_t" SIZEOF_PTRDIFF_T )
2024-01-18 06:07:22 +08:00
if ( SIZEOF_PTRDIFF_T )
set ( HAVE_PTRDIFF_T TRUE )
endif ( SIZEOF_PTRDIFF_T )
2018-03-17 01:46:18 +08:00
CHECK_TYPE_SIZE ( "uintptr_t" SIZEOF_UINTPTR_T )
2024-01-18 06:07:22 +08:00
if ( SIZEOF_UINTPTR_T )
set ( HAVE_UINTPTR_T TRUE )
endif ( SIZEOF_UINTPTR_T )
Mitigate S3 test interference + Unlimited Dimensions in NCZarr
This PR started as an attempt to add unlimited dimensions to NCZarr.
It did that, but this exposed significant problems with test interference.
So this PR is mostly about fixing -- well mitigating anyway -- test
interference.
The problem of test interference is now documented in the document docs/internal.md.
The solutions implemented here are also describe in that document.
The solution is somewhat fragile but multiple cleanup mechanisms
are provided. Note that this feature requires that the
AWS command line utility must be installed.
## Unlimited Dimensions.
The existing NCZarr extensions to Zarr are modified to support unlimited dimensions.
NCzarr extends the Zarr meta-data for the ".zgroup" object to include netcdf-4 model extensions. This information is stored in ".zgroup" as dictionary named "_nczarr_group".
Inside "_nczarr_group", there is a key named "dims" that stores information about netcdf-4 named dimensions. The value of "dims" is a dictionary whose keys are the named dimensions. The value associated with each dimension name has one of two forms
Form 1 is a special case of form 2, and is kept for backward compatibility. Whenever a new file is written, it uses format 1 if possible, otherwise format 2.
* Form 1: An integer representing the size of the dimension, which is used for simple named dimensions.
* Form 2: A dictionary with the following keys and values"
- "size" with an integer value representing the (current) size of the dimension.
- "unlimited" with a value of either "1" or "0" to indicate if this dimension is an unlimited dimension.
For Unlimited dimensions, the size is initially zero, and as variables extend the length of that dimension, the size value for the dimension increases.
That dimension size is shared by all arrays referencing that dimension, so if one array extends an unlimited dimension, it is implicitly extended for all other arrays that reference that dimension.
This is the standard semantics for unlimited dimensions.
Adding unlimited dimensions required a number of other changes to the NCZarr code-base. These included the following.
* Did a partial refactor of the slice handling code in zwalk.c to clean it up.
* Added a number of tests for unlimited dimensions derived from the same test in nc_test4.
* Added several NCZarr specific unlimited tests; more are needed.
* Add test of endianness.
## Misc. Other Changes
* Modify libdispatch/ncs3sdk_aws.cpp to optionally support use of the
AWS Transfer Utility mechanism. This is controlled by the
```#define TRANSFER```` command in that file. It defaults to being disabled.
* Parameterize both the standard Unidata S3 bucket (S3TESTBUCKET) and the netcdf-c test data prefix (S3TESTSUBTREE).
* Fixed an obscure memory leak in ncdump.
* Removed some obsolete unit testing code and test cases.
* Uncovered a bug in the netcdf-c handling of big-endian floats and doubles. Have not fixed yet. See tst_h5_endians.c.
* Renamed some nczarr_tests testcases to avoid name conflicts with nc_test4.
* Modify the semantics of zmap\#ncsmap_write to only allow total rewrite of objects.
* Modify the semantics of zodom to properly handle stride > 1.
* Add a truncate operation to the libnczarr zmap code.
2023-09-27 06:56:48 +08:00
CHECK_TYPE_SIZE ( "mode_t" SIZEOF_MODE_T )
2024-01-18 06:07:22 +08:00
if ( SIZEOF_MODE_T )
set ( HAVE_MODE_T TRUE )
endif ( SIZEOF_MODE_T )
2017-01-28 08:41:03 +08:00
2013-08-31 05:16:17 +08:00
# __int64 is used on Windows for large file support.
2014-03-29 07:11:26 +08:00
CHECK_TYPE_SIZE ( "__int64" SIZEOF___INT_64 )
CHECK_TYPE_SIZE ( "int64_t" SIZEOF_INT64_T )
2020-11-20 08:01:04 +08:00
CHECK_TYPE_SIZE ( "uint64" SIZEOF_UINT64 )
2016-10-29 07:16:49 +08:00
CHECK_TYPE_SIZE ( "unsigned char" SIZEOF_UCHAR )
2017-03-14 05:12:47 +08:00
CHECK_TYPE_SIZE ( "unsigned short int" SIZEOF_UNSIGNED_SHORT_INT )
CHECK_TYPE_SIZE ( "unsigned int" SIZEOF_UNSIGNED_INT )
2016-10-29 07:16:49 +08:00
CHECK_TYPE_SIZE ( "long long" SIZEOF_LONGLONG )
CHECK_TYPE_SIZE ( "unsigned long long" SIZEOF_ULONGLONG )
2012-10-03 04:56:46 +08:00
2020-11-20 13:24:13 +08:00
CHECK_TYPE_SIZE ( "uint64_t" SIZEOF_UINT64_T )
2024-01-18 06:07:22 +08:00
if ( SIZEOF_UINT64_T )
set ( HAVE_UINT64_T TRUE )
endif ( SIZEOF_UINT64_T )
2020-11-20 13:24:13 +08:00
2013-08-31 05:25:28 +08:00
# On windows systems, we redefine off_t as __int64
# to enable LFS. This is true on 32 and 64 bit system.s
# We must redefine SIZEOF_OFF_T to match.
2024-01-18 06:07:22 +08:00
if ( MSVC AND SIZEOF___INT_64 )
set ( SIZEOF_OFF_T ${ SIZEOF___INT_64 } )
endif ( )
2013-08-31 05:16:17 +08:00
2014-03-07 23:40:50 +08:00
# Check for various functions.
2014-03-07 23:46:26 +08:00
CHECK_FUNCTION_EXISTS ( fsync HAVE_FSYNC )
CHECK_FUNCTION_EXISTS ( strlcat HAVE_STRLCAT )
2023-04-26 07:15:06 +08:00
CHECK_FUNCTION_EXISTS ( strlcpy HAVE_STRLCPY )
2014-03-07 23:46:26 +08:00
CHECK_FUNCTION_EXISTS ( strdup HAVE_STRDUP )
2014-08-08 07:03:27 +08:00
CHECK_FUNCTION_EXISTS ( strndup HAVE_STRNDUP )
2014-03-07 23:46:26 +08:00
CHECK_FUNCTION_EXISTS ( strtoll HAVE_STRTOLL )
2022-03-16 05:33:13 +08:00
CHECK_FUNCTION_EXISTS ( strcasecmp HAVE_STRCASECMP )
2017-01-28 08:41:03 +08:00
CHECK_FUNCTION_EXISTS ( strtoull HAVE_STRTOULL )
2014-03-07 23:46:26 +08:00
CHECK_FUNCTION_EXISTS ( mkstemp HAVE_MKSTEMP )
2017-09-03 08:09:36 +08:00
CHECK_FUNCTION_EXISTS ( mktemp HAVE_MKTEMP )
2016-04-12 06:07:27 +08:00
CHECK_FUNCTION_EXISTS ( random HAVE_RANDOM )
2014-03-07 23:46:26 +08:00
CHECK_FUNCTION_EXISTS ( gettimeofday HAVE_GETTIMEOFDAY )
2017-01-28 08:41:03 +08:00
CHECK_FUNCTION_EXISTS ( MPI_Comm_f2c HAVE_MPI_COMM_F2C )
2018-09-22 06:32:36 +08:00
CHECK_FUNCTION_EXISTS ( MPI_Info_f2c HAVE_MPI_INFO_F2C )
2014-03-07 23:46:26 +08:00
CHECK_FUNCTION_EXISTS ( memmove HAVE_MEMMOVE )
CHECK_FUNCTION_EXISTS ( getpagesize HAVE_GETPAGESIZE )
CHECK_FUNCTION_EXISTS ( sysconf HAVE_SYSCONF )
CHECK_FUNCTION_EXISTS ( getrlimit HAVE_GETRLIMIT )
2013-11-05 03:58:33 +08:00
CHECK_FUNCTION_EXISTS ( _filelengthi64 HAVE_FILE_LENGTH_I64 )
2018-10-16 21:44:20 +08:00
CHECK_FUNCTION_EXISTS ( mmap HAVE_MMAP )
2017-01-28 08:41:03 +08:00
CHECK_FUNCTION_EXISTS ( mremap HAVE_MREMAP )
2019-03-23 05:16:47 +08:00
CHECK_FUNCTION_EXISTS ( fileno HAVE_FILENO )
2017-01-28 08:41:03 +08:00
2020-12-07 09:19:53 +08:00
CHECK_FUNCTION_EXISTS ( clock_gettime HAVE_CLOCK_GETTIME )
CHECK_SYMBOL_EXISTS ( "struct timespec" "time.h" HAVE_STRUCT_TIMESPEC )
2021-10-30 10:06:37 +08:00
CHECK_FUNCTION_EXISTS ( atexit HAVE_ATEXIT )
2020-12-07 09:19:53 +08:00
2021-10-30 10:06:37 +08:00
# Control invoking nc_finalize at exit
2024-01-18 06:07:22 +08:00
option ( ENABLE_ATEXIT_FINALIZE "Invoke nc_finalize at exit." ON )
if ( NOT HAVE_ATEXIT )
if ( ENABLE_ATEXIT_FINALIZE AND NOT HAVE_ATEXIT )
set ( ENABLE_ATEXIT_FINALIZE OFF CACHE BOOL "Enable ATEXIT" FORCE )
message ( WARNING "ENABLE_ATEXIT_FINALIZE set but atexit() function not defined" )
endif ( )
endif ( )
2021-11-03 05:35:04 +08:00
2018-12-06 10:20:43 +08:00
# Check to see if MAP_ANONYMOUS is defined.
2024-01-18 06:07:22 +08:00
if ( MSVC )
message ( WARNING "mmap not supported under visual studio: disabling MMAP support." )
set ( ENABLE_MMAP OFF )
else ( )
2023-08-31 00:17:00 +08:00
CHECK_C_SOURCE_COMPILES ( "
#include <sys/mman.h>
i n t main ( ) { i n t x = M A P _ A N O N Y M O U S ; } " H A V E _ M A P A N O N )
2024-01-18 06:07:22 +08:00
if ( NOT HAVE_MMAP OR NOT HAVE_MAPANON )
message ( WARNING "mmap or MAP_ANONYMOUS not found: disabling MMAP support." )
set ( ENABLE_MMAP OFF )
endif ( )
endif ( )
2018-10-11 03:32:17 +08:00
2024-01-18 06:07:22 +08:00
if ( ENABLE_MMAP )
2019-04-20 10:32:26 +08:00
# Aliases
2024-01-18 06:07:22 +08:00
set ( BUILD_MMAP ON )
set ( USE_MMAP ON )
endif ( ENABLE_MMAP )
2013-03-28 03:15:00 +08:00
2017-01-28 08:41:03 +08:00
#CHECK_FUNCTION_EXISTS(alloca HAVE_ALLOCA)
2022-02-09 11:53:30 +08:00
# Used in the `configure_file` calls below
2024-01-18 06:07:22 +08:00
set ( ISCMAKE "yes" )
if ( MSVC )
set ( ISMSVC ON CACHE BOOL "" FORCE )
set ( REGEDIT ON CACHE BOOL "" FORCE )
2023-03-20 09:02:58 +08:00
# Get windows major version and build number
2024-01-18 06:07:22 +08:00
execute_process ( COMMAND "systeminfo" OUTPUT_VARIABLE WININFO )
if ( WININFO STREQUAL "" )
set ( WVM 0 )
set ( WVB 0 )
else ( )
string ( REGEX MATCH "\nOS Version:[ \t]+[0-9.]+" WINVERLINE "${WININFO}" )
string ( REGEX REPLACE "[^0-9]*([0-9]+)[.]([0-9])+[.]([0-9]+)" "\\1" WVM "${WINVERLINE}" )
string ( REGEX REPLACE "[^0-9]*([0-9]+)[.]([0-9])+[.]([0-9]+)" "\\3" WVB "${WINVERLINE}" )
endif ( )
set ( WINVERMAJOR ${ WVM } CACHE STRING "" FORCE )
set ( WINVERBUILD ${ WVB } CACHE STRING "" FORCE )
endif ( )
2022-02-09 11:53:30 +08:00
2012-10-03 04:56:46 +08:00
#####
# End system inspection checks.
#####
2014-10-08 18:47:58 +08:00
# A basic script used to convert m4 files
2024-01-18 06:07:22 +08:00
find_program ( NC_M4 NAMES m4 m4.exe )
if ( NC_M4 )
message ( STATUS "Found m4: ${NC_M4}" )
set ( HAVE_M4 TRUE )
else ( )
message ( STATUS "m4 not found." )
set ( HAVE_M4 FALSE )
endif ( )
2015-01-13 01:42:33 +08:00
2023-05-26 02:23:21 +08:00
##specific
2014-10-08 18:47:58 +08:00
# Shell script Macro
2015-01-28 04:57:51 +08:00
##
# Determine if 'bash' is on the system.
##
2024-01-18 06:07:22 +08:00
option ( ENABLE_BASH_SCRIPT_TESTING "Detection is typically automatic, but this option can be used to force enable/disable bash-script based tests." ON )
if ( ENABLE_BASH_SCRIPT_TESTING )
find_program ( HAVE_BASH bash )
if ( HAVE_BASH )
string ( COMPARE EQUAL "${HAVE_BASH}" "C:/Windows/System32/bash.exe" IS_BASH_EXE )
if ( NOT IS_BASH_EXE )
message ( STATUS "Found bash: ${HAVE_BASH}" )
else ( )
message ( STATUS "Ignoring ${HAVE_BASH}" )
set ( HAVE_BASH "" )
endif ( )
else ( )
message ( STATUS "Bash shell not found; disabling shell script tests." )
endif ( )
else ( ENABLE_BASH_SCRIPT_TESTING )
set ( HAVE_BASH "" )
endif ( ENABLE_BASH_SCRIPT_TESTING )
2015-01-28 04:57:51 +08:00
2014-03-09 08:39:09 +08:00
# Create config.h file.
2014-05-30 03:23:24 +08:00
configure_file ( "${netCDF_SOURCE_DIR}/config.h.cmake.in"
2014-03-07 23:46:26 +08:00
" $ { n e t C D F _ B I N A R Y _ D I R } / c o n f i g . h " )
2024-01-27 04:28:16 +08:00
add_compile_definitions ( -DHAVE_CONFIG_H )
2024-01-18 06:07:22 +08:00
include_directories ( ${ netCDF_BINARY_DIR } )
2024-01-17 01:20:43 +08:00
# End autotools-style checks for config.h
2012-10-03 04:56:46 +08:00
#####
# Set the true names of all the libraries, if customized by external project
#####
# Recurse into other subdirectories.
add_subdirectory ( "include" )
add_subdirectory ( libdispatch )
add_subdirectory ( libsrc )
2024-01-18 06:07:22 +08:00
if ( USE_PNETCDF )
2015-08-15 10:38:30 +08:00
add_subdirectory ( libsrcp )
2024-01-18 06:07:22 +08:00
endif ( USE_PNETCDF )
2013-03-16 04:31:07 +08:00
2024-01-18 06:07:22 +08:00
if ( USE_NETCDF4 )
2014-04-22 01:15:33 +08:00
add_subdirectory ( libsrc4 )
2024-01-18 06:07:22 +08:00
endif ( )
2020-08-18 09:15:47 +08:00
2024-01-18 06:07:22 +08:00
if ( USE_HDF5 )
2018-05-09 01:58:01 +08:00
add_subdirectory ( libhdf5 )
2024-01-18 06:07:22 +08:00
endif ( USE_HDF5 )
2012-10-03 04:56:46 +08:00
2024-01-18 06:07:22 +08:00
if ( USE_HDF4 )
2018-02-08 21:20:58 +08:00
add_subdirectory ( libhdf4 )
2018-03-05 19:22:15 +08:00
add_subdirectory ( hdf4_test )
2024-01-18 06:07:22 +08:00
endif ( USE_HDF4 )
if ( ENABLE_DAP2 )
add_subdirectory ( oc2 )
add_subdirectory ( libdap2 )
endif ( )
if ( ENABLE_DAP4 )
add_subdirectory ( libdap4 )
add_subdirectory ( libncxml )
else ( )
if ( ENABLE_S3_INTERNAL )
add_subdirectory ( libncxml )
endif ( )
endif ( )
if ( ENABLE_PLUGINS )
add_subdirectory ( libncpoco )
endif ( )
if ( ENABLE_NCZARR )
add_subdirectory ( libnczarr )
file ( COPY ${ netCDF_SOURCE_DIR } /unit_test/timer_utils.h
2020-12-17 11:48:02 +08:00
D E S T I N A T I O N $ { n e t C D F _ B I N A R Y _ D I R } / n c z a r r _ t e s t / )
2024-01-18 06:07:22 +08:00
file ( COPY ${ netCDF_SOURCE_DIR } /unit_test/timer_utils.c
2020-12-17 11:48:02 +08:00
D E S T I N A T I O N $ { n e t C D F _ B I N A R Y _ D I R } / n c z a r r _ t e s t / )
2024-01-18 06:07:22 +08:00
file ( COPY ${ netCDF_SOURCE_DIR } /nc_test4/test_filter.c
2021-09-03 07:04:26 +08:00
D E S T I N A T I O N $ { n e t C D F _ B I N A R Y _ D I R } / n c z a r r _ t e s t / )
2024-01-18 06:07:22 +08:00
file ( COPY ${ netCDF_SOURCE_DIR } /nc_test4/test_filter_misc.c
2021-09-03 07:04:26 +08:00
D E S T I N A T I O N $ { n e t C D F _ B I N A R Y _ D I R } / n c z a r r _ t e s t / )
2024-01-18 06:07:22 +08:00
file ( COPY ${ netCDF_SOURCE_DIR } /nc_test4/test_filter_repeat.c
2021-09-03 07:04:26 +08:00
D E S T I N A T I O N $ { n e t C D F _ B I N A R Y _ D I R } / n c z a r r _ t e s t / )
2024-01-18 06:07:22 +08:00
file ( COPY ${ netCDF_SOURCE_DIR } /nc_test4/test_filter_order.c
2021-09-03 07:04:26 +08:00
D E S T I N A T I O N $ { n e t C D F _ B I N A R Y _ D I R } / n c z a r r _ t e s t / )
2024-01-18 06:07:22 +08:00
file ( COPY ${ netCDF_SOURCE_DIR } /nc_test4/tst_multifilter.c
2021-09-03 07:04:26 +08:00
D E S T I N A T I O N $ { n e t C D F _ B I N A R Y _ D I R } / n c z a r r _ t e s t / )
2024-01-18 06:07:22 +08:00
endif ( )
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
2012-10-03 04:56:46 +08:00
add_subdirectory ( liblib )
2024-01-18 06:07:22 +08:00
if ( ENABLE_PLUGINS )
2018-12-19 04:26:49 +08:00
add_subdirectory ( plugins )
2024-01-18 06:07:22 +08:00
endif ( )
2018-12-19 04:26:49 +08:00
2012-10-10 04:56:27 +08:00
# For tests and utilities, we are no longer
# exporting symbols but rather importing them.
2024-01-18 06:07:22 +08:00
if ( BUILD_DLL )
2024-01-19 23:25:24 +08:00
remove_definitions ( -DDLL_EXPORT )
2024-01-18 06:07:22 +08:00
endif ( )
2012-10-03 04:56:46 +08:00
2012-10-10 04:56:27 +08:00
# Enable Utilities.
2024-01-18 06:07:22 +08:00
if ( BUILD_UTILITIES )
include_directories ( ncdump )
add_subdirectory ( ncgen )
add_subdirectory ( ncgen3 )
add_subdirectory ( ncdump )
endif ( )
2012-10-03 04:56:46 +08:00
# Enable tests
2024-01-18 06:07:22 +08:00
if ( ENABLE_TESTS )
if ( ENABLE_V2_API )
add_subdirectory ( nctest )
endif ( )
add_subdirectory ( nc_test )
if ( USE_HDF5 )
include_directories ( h5_test )
add_subdirectory ( nc_test4 )
add_subdirectory ( h5_test )
endif ( )
if ( ENABLE_DAP2 )
add_subdirectory ( ncdap_test )
endif ( )
if ( ENABLE_DAP4 )
add_subdirectory ( dap4_test )
endif ( )
if ( ENABLE_EXAMPLES )
add_subdirectory ( examples )
endif ( )
if ( ENABLE_BENCHMARKS )
add_subdirectory ( nc_perf )
endif ( ENABLE_BENCHMARKS )
if ( ENABLE_UNIT_TESTS )
add_subdirectory ( unit_test )
endif ( ENABLE_UNIT_TESTS )
if ( ENABLE_NCZARR )
add_subdirectory ( nczarr_test )
endif ( )
endif ( )
2012-10-03 04:56:46 +08:00
# Code to generate an export header
2012-10-04 04:47:34 +08:00
#GENERATE_EXPORT_HEADER(netcdf
2014-03-07 23:46:26 +08:00
# BASE_NAME netcdf
# EXPORT_MACRO_NAME netcdf_EXPORT
# EXPORT_FILE_NAME netcdf_Export.h
# STATIC_DEFINE netcdf_BUILT_AS_STATIC
2012-10-04 04:47:34 +08:00
#)
2012-10-03 04:56:46 +08:00
#####
# Build doxygen documentation, if need be.
#####
2024-01-18 06:07:22 +08:00
add_subdirectory ( docs )
2014-12-03 04:58:23 +08:00
2014-12-06 02:19:02 +08:00
##
2018-03-16 22:38:40 +08:00
# Brute force, grab all of the dlls from the dependency directory,
2014-12-06 02:19:02 +08:00
# install them in the binary dir. Grab all of the .libs, put them
# in the libdir.
##
2024-01-18 06:07:22 +08:00
if ( MSVC )
file ( GLOB COPY_FILES ${ CMAKE_PREFIX_PATH } /lib/*.lib )
install ( FILES ${ COPY_FILES }
2014-12-06 02:19:02 +08:00
D E S T I N A T I O N $ { C M A K E _ I N S T A L L _ L I B D I R }
C O M P O N E N T d e p e n d e n c i e s )
2024-01-18 06:07:22 +08:00
file ( GLOB COPY_FILES ${ CMAKE_PREFIX_PATH } /bin/*.dll )
string ( REGEX REPLACE "msv[.*].dll" "" COPY_FILES "${COPY_FILES}" )
install ( FILES ${ COPY_FILES }
2014-12-06 02:19:02 +08:00
D E S T I N A T I O N $ { C M A K E _ I N S T A L L _ B I N D I R }
C O M P O N E N T d e p e n d e n c i e s )
2024-01-18 06:07:22 +08:00
endif ( )
2012-11-20 05:43:12 +08:00
2012-10-03 04:56:46 +08:00
# Subdirectory CMakeLists.txt files should specify their own
# 'install' files.
# Including 'CPack' kicks everything off.
2024-01-18 06:07:22 +08:00
include ( InstallRequiredSystemLibraries )
configure_file (
2012-10-03 04:56:46 +08:00
$ { C M A K E _ C U R R E N T _ S O U R C E _ D I R } / F i x B u n d l e . c m a k e . i n
$ { C M A K E _ C U R R E N T _ B I N A R Y _ D I R } / F i x B u n d l e . c m a k e
@ O N L Y
2014-04-22 01:15:33 +08:00
)
2012-10-03 04:56:46 +08:00
2014-02-07 07:17:30 +08:00
###
# Create pkgconfig files.
###
2024-01-18 06:07:22 +08:00
if ( NOT DEFINED CMAKE_INSTALL_LIBDIR )
set ( CMAKE_INSTALL_LIBDIR lib )
endif ( NOT DEFINED CMAKE_INSTALL_LIBDIR )
2014-02-07 07:17:30 +08:00
2014-03-07 23:40:50 +08:00
# Set
2024-01-18 06:07:22 +08:00
set ( prefix ${ CMAKE_INSTALL_PREFIX } )
set ( exec_prefix ${ CMAKE_INSTALL_PREFIX } )
set ( libdir ${ CMAKE_INSTALL_PREFIX } / ${ CMAKE_INSTALL_LIBDIR } )
set ( includedir ${ CMAKE_INSTALL_PREFIX } / ${ CMAKE_INSTALL_INCLUDEDIR } )
set ( CC ${ CMAKE_C_COMPILER } )
2014-02-20 07:32:26 +08:00
2014-02-07 07:17:30 +08:00
# Process all dependency libraries and create a string
# used when parsing netcdf.pc.in
2024-01-18 06:07:22 +08:00
set ( NC_LIBS "" )
foreach ( _LIB ${ ALL_TLL_LIBS } )
get_filename_component ( _LIB_NAME ${ _LIB } NAME_WE )
string ( REGEX REPLACE "^lib" "" _NAME ${ _LIB_NAME } )
list ( APPEND NC_LIBS "-l${_NAME}" )
get_filename_component ( _LIB_DIR ${ _LIB } PATH )
list ( APPEND LINKFLAGS "-L${_LIB_DIR}" )
2024-01-19 05:07:13 +08:00
endforeach ( )
2024-01-18 06:07:22 +08:00
#set(NC_LIBS "-lnetcdf ${NC_LIBS}")
if ( NC_LIBS )
string ( REPLACE ";" " " NC_LIBS "${NC_LIBS}" )
string ( REPLACE "-lhdf5::hdf5-shared" "-lhdf5" NC_LIBS ${ NC_LIBS } )
string ( REPLACE "-lhdf5::hdf5_hl-shared" "-lhdf5_hl" NC_LIBS ${ NC_LIBS } )
string ( REPLACE "-lhdf5::hdf5-static" "-lhdf5" NC_LIBS ${ NC_LIBS } )
string ( REPLACE "-lhdf5::hdf5_hl-static" "-lhdf5_hl" NC_LIBS ${ NC_LIBS } )
endif ( )
2017-12-05 03:50:43 +08:00
2024-01-18 06:07:22 +08:00
string ( REPLACE ";" " " LINKFLAGS "${LINKFLAGS}" )
2014-02-07 07:17:30 +08:00
2024-01-18 06:07:22 +08:00
list ( REMOVE_DUPLICATES NC_LIBS )
list ( REMOVE_DUPLICATES LINKFLAGS )
2014-02-07 07:17:30 +08:00
2024-01-18 06:07:22 +08:00
set ( LIBS ${ NC_LIBS } )
set ( NC_LIBS "-lnetcdf" )
2019-02-28 05:36:42 +08:00
2014-04-22 06:11:48 +08:00
configure_file (
2014-02-07 07:17:30 +08:00
$ { n e t C D F _ S O U R C E _ D I R } / n e t c d f . p c . i n
$ { n e t C D F _ B I N A R Y _ D I R } / n e t c d f . p c @ O N L Y )
2018-06-30 05:06:27 +08:00
2024-01-18 06:07:22 +08:00
if ( NOT IS_DIRECTORY ${ netCDF_BINARY_DIR } /tmp )
file ( MAKE_DIRECTORY ${ netCDF_BINARY_DIR } /tmp )
endif ( )
2018-06-30 05:06:27 +08:00
2014-05-30 03:23:24 +08:00
configure_file ( "${netCDF_SOURCE_DIR}/nc-config.cmake.in"
2017-03-09 08:01:10 +08:00
" $ { n e t C D F _ B I N A R Y _ D I R } / t m p / n c - c o n f i g " @ O N L Y
N E W L I N E _ S T Y L E L F )
2024-01-18 06:07:22 +08:00
file ( COPY "${netCDF_BINARY_DIR}/tmp/nc-config"
2014-03-09 08:39:09 +08:00
D E S T I N A T I O N $ { n e t C D F _ B I N A R Y _ D I R } /
F I L E _ P E R M I S S I O N S O W N E R _ R E A D O W N E R _ W R I T E O W N E R _ E X E C U T E G R O U P _ R E A D G R O U P _ E X E C U T E W O R L D _ R E A D W O R L D _ E X E C U T E )
2024-01-18 06:07:22 +08:00
install ( FILES ${ netCDF_BINARY_DIR } /netcdf.pc
2014-06-03 04:03:42 +08:00
D E S T I N A T I O N $ { C M A K E _ I N S T A L L _ L I B D I R } / p k g c o n f i g
C O M P O N E N T u t i l i t i e s )
2014-02-07 07:17:30 +08:00
2024-01-18 06:07:22 +08:00
install ( PROGRAMS ${ netCDF_BINARY_DIR } /nc-config
2014-05-22 05:15:56 +08:00
D E S T I N A T I O N $ { C M A K E _ I N S T A L L _ B I N D I R }
C O M P O N E N T u t i l i t i e s )
2014-03-09 08:39:09 +08:00
2014-02-07 07:17:30 +08:00
###
2014-03-09 08:39:09 +08:00
# End pkgconfig, nc-config file creation.
2014-02-07 07:17:30 +08:00
###
2012-10-03 04:56:46 +08:00
##
2014-12-06 02:19:02 +08:00
# Print the configuration summary
2014-03-07 23:40:50 +08:00
##
2012-10-03 04:56:46 +08:00
print_conf_summary ( )
2013-05-25 05:19:07 +08:00
2021-09-03 07:04:26 +08:00
# Enable Makedist files.
ADD_MAKEDIST ( )
ENABLE_MAKEDIST ( README.md COPYRIGHT RELEASE_NOTES.md INSTALL INSTALL.cmake test_prog.c lib_flags.am cmake CMakeLists.txt COMPILE.cmake.txt config.h.cmake.in cmake_uninstall.cmake.in netcdf-config-version.cmake.in netcdf-config.cmake.in FixBundle.cmake.in nc-config.cmake.in configure configure.ac install-sh config.h.in config.sub CTestConfig.cmake.in )
2013-05-25 05:19:07 +08:00
2014-06-03 04:03:42 +08:00
#####
# Configure and print the libnetcdf.settings file.
#####
2014-06-03 05:14:27 +08:00
# Set variables to mirror those used by autoconf.
2014-06-03 04:03:42 +08:00
# This way we don't need to maintain two separate template
# files.
2024-01-18 06:07:22 +08:00
set ( host_cpu "${cpu}" )
set ( host_vendor "${osname}" )
set ( host_os "${osrel}" )
set ( abs_top_builddir "${CMAKE_CURRENT_BINARY_DIR}" )
set ( abs_top_srcdir "${CMAKE_CURRENT_SOURCE_DIR}" )
string ( RANDOM LENGTH 3 ALPHABET "0123456789" PLATFORMUID )
math ( EXPR PLATFORMUID "${PLATFORMUID} + 1" OUTPUT_FORMAT DECIMAL )
Mitigate S3 test interference + Unlimited Dimensions in NCZarr
This PR started as an attempt to add unlimited dimensions to NCZarr.
It did that, but this exposed significant problems with test interference.
So this PR is mostly about fixing -- well mitigating anyway -- test
interference.
The problem of test interference is now documented in the document docs/internal.md.
The solutions implemented here are also describe in that document.
The solution is somewhat fragile but multiple cleanup mechanisms
are provided. Note that this feature requires that the
AWS command line utility must be installed.
## Unlimited Dimensions.
The existing NCZarr extensions to Zarr are modified to support unlimited dimensions.
NCzarr extends the Zarr meta-data for the ".zgroup" object to include netcdf-4 model extensions. This information is stored in ".zgroup" as dictionary named "_nczarr_group".
Inside "_nczarr_group", there is a key named "dims" that stores information about netcdf-4 named dimensions. The value of "dims" is a dictionary whose keys are the named dimensions. The value associated with each dimension name has one of two forms
Form 1 is a special case of form 2, and is kept for backward compatibility. Whenever a new file is written, it uses format 1 if possible, otherwise format 2.
* Form 1: An integer representing the size of the dimension, which is used for simple named dimensions.
* Form 2: A dictionary with the following keys and values"
- "size" with an integer value representing the (current) size of the dimension.
- "unlimited" with a value of either "1" or "0" to indicate if this dimension is an unlimited dimension.
For Unlimited dimensions, the size is initially zero, and as variables extend the length of that dimension, the size value for the dimension increases.
That dimension size is shared by all arrays referencing that dimension, so if one array extends an unlimited dimension, it is implicitly extended for all other arrays that reference that dimension.
This is the standard semantics for unlimited dimensions.
Adding unlimited dimensions required a number of other changes to the NCZarr code-base. These included the following.
* Did a partial refactor of the slice handling code in zwalk.c to clean it up.
* Added a number of tests for unlimited dimensions derived from the same test in nc_test4.
* Added several NCZarr specific unlimited tests; more are needed.
* Add test of endianness.
## Misc. Other Changes
* Modify libdispatch/ncs3sdk_aws.cpp to optionally support use of the
AWS Transfer Utility mechanism. This is controlled by the
```#define TRANSFER```` command in that file. It defaults to being disabled.
* Parameterize both the standard Unidata S3 bucket (S3TESTBUCKET) and the netcdf-c test data prefix (S3TESTSUBTREE).
* Fixed an obscure memory leak in ncdump.
* Removed some obsolete unit testing code and test cases.
* Uncovered a bug in the netcdf-c handling of big-endian floats and doubles. Have not fixed yet. See tst_h5_endians.c.
* Renamed some nczarr_tests testcases to avoid name conflicts with nc_test4.
* Modify the semantics of zmap\#ncsmap_write to only allow total rewrite of objects.
* Modify the semantics of zodom to properly handle stride > 1.
* Add a truncate operation to the libnczarr zmap code.
2023-09-27 06:56:48 +08:00
2024-01-18 06:07:22 +08:00
set ( CC_VERSION "${CMAKE_C_COMPILER}" )
2014-06-03 05:14:27 +08:00
2014-06-12 05:51:31 +08:00
# Build *FLAGS for libnetcdf.settings.
2024-01-18 06:07:22 +08:00
set ( CFLAGS "${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE}}" )
set ( CPPFLAGS "${CMAKE_CPP_FLAGS} ${CMAKE_CPP_FLAGS_${CMAKE_BUILD_TYPE}}" )
set ( LDFLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${CMAKE_SHARED_LINKER_FLAGS_${CMAKE_BUILD_TYPE}}" )
2014-06-04 00:47:01 +08:00
is_disabled ( BUILD_SHARED_LIBS enable_static )
is_enabled ( BUILD_SHARED_LIBS enable_shared )
2014-06-03 05:14:27 +08:00
is_enabled ( ENABLE_V2_API HAS_NC2 )
is_enabled ( ENABLE_NETCDF_4 HAS_NC4 )
is_enabled ( ENABLE_HDF4 HAS_HDF4 )
2020-08-18 09:15:47 +08:00
is_enabled ( USE_HDF5 HAS_HDF5 )
2022-04-26 20:18:52 +08:00
is_enabled ( OFF HAS_BENCHMARKS )
2014-06-03 05:14:27 +08:00
is_enabled ( STATUS_PNETCDF HAS_PNETCDF )
is_enabled ( STATUS_PARALLEL HAS_PARALLEL )
2015-11-18 08:14:22 +08:00
is_enabled ( ENABLE_PARALLEL4 HAS_PARALLEL4 )
2017-03-09 08:01:10 +08:00
is_enabled ( ENABLE_DAP HAS_DAP )
Upgrade the nczarr code to match Zarr V2
Re: https://github.com/zarr-developers/zarr-python/pull/716
The Zarr version 2 spec has been extended to include the ability
to choose the dimension separator in chunk name keys. The legal
separators has been extended from {'.'} to {'.' '/'}. So now it
is possible to use a key like "0/1/2/0" for chunk names.
This PR implements this for NCZarr. The V2 spec now says that
this separator can be set on a per-variable basis. For now, I
have chosen to allow this be set only globally by adding a key
named "ZARR.DIMENSION_SEPARATOR=<char>" in the
.daprc/.dodsrc/ncrc file. Currently, the only legal separator
characters are '.' (the default) and '/'. On writing, this key
will only be written if its value is different than the default.
This change caused problems because supporting a separator of '/'
is difficult to parse when keys/paths use '/' as the path separator.
A test case was added for this.
Additionally, make nczarr be enabled default by default. This required
some additional changes so that if zip and/or AWS S3 sdk are unavailable,
then they are disabled for NCZarr.
In addition the following unrelated changes were made.
1. Tested that pure-zarr mode could read an nczarr formatted store.
1. The .rc file handling now merges all known .rc files (.ncrc,.daprc, and .dodsrc) in that order and using those in HOME first, then in current directory. For duplicate entries, the later ones override the earlier ones. This change is to remove some of the conflicts inherent in the current .rc file load process. A set of test cases was also added.
1. Re-order tests in configure.ac and CMakeLists.txt so that if libcurl
is not found then the other options that depend upon it properly
are disabled.
1. I decided that xarray support should be enabled by default for pure
zarr. In order to allow disabling, I added a new mode flag "noxarray".
1. Certain test in nczarr_test depend on use of .dodsrc. In order for these
to work when testing in parallel, some inter-test dependencies needed to
be added.
1. Improved authorization testing to use changes in thredds.ucar.edu
2021-04-25 09:48:15 +08:00
is_enabled ( ENABLE_DAP2 HAS_DAP2 )
2017-03-09 08:01:10 +08:00
is_enabled ( ENABLE_DAP4 HAS_DAP4 )
2019-02-25 07:54:13 +08:00
is_enabled ( ENABLE_BYTERANGE HAS_BYTERANGE )
2018-05-12 05:30:19 +08:00
is_enabled ( ENABLE_DISKLESS HAS_DISKLESS )
2014-06-03 05:14:27 +08:00
is_enabled ( USE_MMAP HAS_MMAP )
is_enabled ( JNA HAS_JNA )
2018-07-17 03:34:22 +08:00
is_enabled ( ENABLE_ZERO_LENGTH_COORD_BOUND RELAX_COORD_BOUND )
2017-09-27 04:01:21 +08:00
is_enabled ( USE_CDF5 HAS_CDF5 )
2018-01-26 04:07:33 +08:00
is_enabled ( ENABLE_ERANGE_FILL HAS_ERANGE_FILL )
2020-04-02 07:08:24 +08:00
is_enabled ( HDF5_HAS_PAR_FILTERS HAS_PAR_FILTERS )
2023-03-03 10:51:02 +08:00
is_enabled ( ENABLE_S3 HAS_S3 )
2023-04-26 07:15:06 +08:00
is_enabled ( ENABLE_S3_AWS HAS_S3_AWS )
is_enabled ( ENABLE_S3_INTERNAL HAS_S3_INTERNAL )
is_enabled ( HAS_HDF5_ROS3 HAS_HDF5_ROS3 )
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
is_enabled ( ENABLE_NCZARR HAS_NCZARR )
2023-02-28 04:34:26 +08:00
is_enabled ( ENABLE_NCZARR_ZIP HAS_NCZARR_ZIP )
2021-01-29 11:11:01 +08:00
is_enabled ( ENABLE_NCZARR_ZIP DO_NCZARR_ZIP_TESTS )
2021-08-24 14:45:38 +08:00
is_enabled ( ENABLE_QUANTIZE HAS_QUANTIZE )
2021-08-24 14:17:03 +08:00
is_enabled ( ENABLE_LOGGING HAS_LOGGING )
2021-09-03 07:04:26 +08:00
is_enabled ( ENABLE_FILTER_TESTING DO_FILTER_TESTS )
Enhance/Fix filter support
re: Discussion https://github.com/Unidata/netcdf-c/discussions/2214
The primary change is to support so-called "standard filters".
A standard filter is one that is defined by the following
netcdf-c API:
````
int nc_def_var_XXX(int ncid, int varid, size_t nparams, unsigned* params);
int nc_inq_var_XXXX(int ncid, int varid, int* usefilterp, unsigned* params);
````
So for example, zstandard would be a standard filter by defining
the functions *nc_def_var_zstandard* and *nc_inq_var_zstandard*.
In order to define these functions, we need a new dispatch function:
````
int nc_inq_filter_avail(int ncid, unsigned filterid);
````
This function, combined with the existing filter API can be used
to implement arbitrary standard filters using a simple code pattern.
Note that I would have preferred that this function return a list
of all available filters, but HDF5 does not support that functionality.
So this PR implements the dispatch function and implements
the following standard functions:
+ bzip2
+ zstandard
+ blosc
Specific test cases are also provided for HDF5 and NCZarr.
Over time, other specific standard filters will be defined.
## Primary Changes
* Add nc_inq_filter_avail() to netcdf-c API.
* Add standard filter implementations to test use of *nc_inq_filter_avail*.
* Bump the dispatch table version number and add to all the relevant
dispatch tables (libsrc, libsrcp, etc).
* Create a program to invoke nc_inq_filter_avail so that it is accessible
to shell scripts.
* Cleanup szip support to properly support szip
when HDF5 is disabled. This involves detecting
libsz separately from testing if HDF5 supports szip.
* Integrate shuffle and fletcher32 into the existing
filter API. This means that, for example, nc_def_var_fletcher32
is now a wrapper around nc_def_var_filter.
* Extend the Codec defaulting to allow multiple default shared libraries.
## Misc. Changes
* Modify configure.ac/CMakeLists.txt to look for the relevant
libraries implementing standard filters.
* Modify libnetcdf.settings to list available standard filters
(including deflate and szip).
* Add CMake test modules to locate libbz2 and libzstd.
* Cleanup the HDF5 memory manager function use in the plugins.
* remove unused file include//ncfilter.h
* remove tests for the HDF5 memory operations e.g. H5allocate_memory.
* Add flag to ncdump to force use of _Filter instead of _Deflate
or _Shuffle or _Fletcher32. Used for testing.
2022-03-15 02:39:37 +08:00
is_enabled ( HAVE_SZ HAS_SZIP )
is_enabled ( HAVE_SZ HAS_SZLIB_WRITE )
2022-04-11 22:03:24 +08:00
is_enabled ( HAVE_ZSTD HAS_ZSTD )
2022-09-21 05:24:33 +08:00
is_enabled ( HAVE_BLOSC HAS_BLOSC )
is_enabled ( HAVE_BZ2 HAS_BZ2 )
2023-06-13 05:23:44 +08:00
is_enabled ( ENABLE_REMOTE_FUNCTIONALITY DO_REMOTE_FUNCTIONALITY )
2023-05-10 11:13:49 +08:00
if ( ENABLE_S3_INTERNAL )
2024-01-18 06:07:22 +08:00
set ( WHICH_S3_SDK "internal" )
set ( NC_WHICH_S3_SDK "internal" )
2023-05-10 11:13:49 +08:00
elseif ( ENABLE_S3_AWS )
2024-01-18 06:07:22 +08:00
set ( WHICH_S3_SDK "aws-sdk-cpp" )
set ( NC_WHICH_S3_SDK "aws-sdk-cpp" )
2023-04-26 07:15:06 +08:00
else ( )
2024-01-18 06:07:22 +08:00
set ( WHICH_S3_SDK "none" )
set ( NC_WHICH_S3_SDK "none" )
2023-04-26 07:15:06 +08:00
endif ( )
2023-05-10 11:13:49 +08:00
if ( WITH_S3_TESTING STREQUAL PUBLIC )
2024-01-19 23:25:24 +08:00
set ( ENABLE_S3_TESTING "public" )
2023-05-10 11:13:49 +08:00
elseif ( WITH_S3_TESTING )
2024-01-19 23:25:24 +08:00
set ( ENABLE_S3_TESTING "yes" )
set ( ENABLE_S3_TESTALL "yes" )
2023-05-10 11:13:49 +08:00
elseif ( NOT WITH_S3_TESTING )
2024-01-19 23:25:24 +08:00
set ( ENABLE_S3_TESTING "no" )
2023-05-10 11:13:49 +08:00
else ( )
2024-01-19 23:25:24 +08:00
set ( ENABLE_S3_TESTING "no" )
2023-04-26 07:15:06 +08:00
endif ( )
2023-02-28 04:34:26 +08:00
Mitigate S3 test interference + Unlimited Dimensions in NCZarr
This PR started as an attempt to add unlimited dimensions to NCZarr.
It did that, but this exposed significant problems with test interference.
So this PR is mostly about fixing -- well mitigating anyway -- test
interference.
The problem of test interference is now documented in the document docs/internal.md.
The solutions implemented here are also describe in that document.
The solution is somewhat fragile but multiple cleanup mechanisms
are provided. Note that this feature requires that the
AWS command line utility must be installed.
## Unlimited Dimensions.
The existing NCZarr extensions to Zarr are modified to support unlimited dimensions.
NCzarr extends the Zarr meta-data for the ".zgroup" object to include netcdf-4 model extensions. This information is stored in ".zgroup" as dictionary named "_nczarr_group".
Inside "_nczarr_group", there is a key named "dims" that stores information about netcdf-4 named dimensions. The value of "dims" is a dictionary whose keys are the named dimensions. The value associated with each dimension name has one of two forms
Form 1 is a special case of form 2, and is kept for backward compatibility. Whenever a new file is written, it uses format 1 if possible, otherwise format 2.
* Form 1: An integer representing the size of the dimension, which is used for simple named dimensions.
* Form 2: A dictionary with the following keys and values"
- "size" with an integer value representing the (current) size of the dimension.
- "unlimited" with a value of either "1" or "0" to indicate if this dimension is an unlimited dimension.
For Unlimited dimensions, the size is initially zero, and as variables extend the length of that dimension, the size value for the dimension increases.
That dimension size is shared by all arrays referencing that dimension, so if one array extends an unlimited dimension, it is implicitly extended for all other arrays that reference that dimension.
This is the standard semantics for unlimited dimensions.
Adding unlimited dimensions required a number of other changes to the NCZarr code-base. These included the following.
* Did a partial refactor of the slice handling code in zwalk.c to clean it up.
* Added a number of tests for unlimited dimensions derived from the same test in nc_test4.
* Added several NCZarr specific unlimited tests; more are needed.
* Add test of endianness.
## Misc. Other Changes
* Modify libdispatch/ncs3sdk_aws.cpp to optionally support use of the
AWS Transfer Utility mechanism. This is controlled by the
```#define TRANSFER```` command in that file. It defaults to being disabled.
* Parameterize both the standard Unidata S3 bucket (S3TESTBUCKET) and the netcdf-c test data prefix (S3TESTSUBTREE).
* Fixed an obscure memory leak in ncdump.
* Removed some obsolete unit testing code and test cases.
* Uncovered a bug in the netcdf-c handling of big-endian floats and doubles. Have not fixed yet. See tst_h5_endians.c.
* Renamed some nczarr_tests testcases to avoid name conflicts with nc_test4.
* Modify the semantics of zmap\#ncsmap_write to only allow total rewrite of objects.
* Modify the semantics of zodom to properly handle stride > 1.
* Add a truncate operation to the libnczarr zmap code.
2023-09-27 06:56:48 +08:00
# The Unidata testing S3 bucket
# WARNING: this must match the value in configure.ac
2024-01-18 06:07:22 +08:00
set ( S3TESTBUCKET "unidata-zarr-test-data" CACHE STRING "S3 test bucket" )
Mitigate S3 test interference + Unlimited Dimensions in NCZarr
This PR started as an attempt to add unlimited dimensions to NCZarr.
It did that, but this exposed significant problems with test interference.
So this PR is mostly about fixing -- well mitigating anyway -- test
interference.
The problem of test interference is now documented in the document docs/internal.md.
The solutions implemented here are also describe in that document.
The solution is somewhat fragile but multiple cleanup mechanisms
are provided. Note that this feature requires that the
AWS command line utility must be installed.
## Unlimited Dimensions.
The existing NCZarr extensions to Zarr are modified to support unlimited dimensions.
NCzarr extends the Zarr meta-data for the ".zgroup" object to include netcdf-4 model extensions. This information is stored in ".zgroup" as dictionary named "_nczarr_group".
Inside "_nczarr_group", there is a key named "dims" that stores information about netcdf-4 named dimensions. The value of "dims" is a dictionary whose keys are the named dimensions. The value associated with each dimension name has one of two forms
Form 1 is a special case of form 2, and is kept for backward compatibility. Whenever a new file is written, it uses format 1 if possible, otherwise format 2.
* Form 1: An integer representing the size of the dimension, which is used for simple named dimensions.
* Form 2: A dictionary with the following keys and values"
- "size" with an integer value representing the (current) size of the dimension.
- "unlimited" with a value of either "1" or "0" to indicate if this dimension is an unlimited dimension.
For Unlimited dimensions, the size is initially zero, and as variables extend the length of that dimension, the size value for the dimension increases.
That dimension size is shared by all arrays referencing that dimension, so if one array extends an unlimited dimension, it is implicitly extended for all other arrays that reference that dimension.
This is the standard semantics for unlimited dimensions.
Adding unlimited dimensions required a number of other changes to the NCZarr code-base. These included the following.
* Did a partial refactor of the slice handling code in zwalk.c to clean it up.
* Added a number of tests for unlimited dimensions derived from the same test in nc_test4.
* Added several NCZarr specific unlimited tests; more are needed.
* Add test of endianness.
## Misc. Other Changes
* Modify libdispatch/ncs3sdk_aws.cpp to optionally support use of the
AWS Transfer Utility mechanism. This is controlled by the
```#define TRANSFER```` command in that file. It defaults to being disabled.
* Parameterize both the standard Unidata S3 bucket (S3TESTBUCKET) and the netcdf-c test data prefix (S3TESTSUBTREE).
* Fixed an obscure memory leak in ncdump.
* Removed some obsolete unit testing code and test cases.
* Uncovered a bug in the netcdf-c handling of big-endian floats and doubles. Have not fixed yet. See tst_h5_endians.c.
* Renamed some nczarr_tests testcases to avoid name conflicts with nc_test4.
* Modify the semantics of zmap\#ncsmap_write to only allow total rewrite of objects.
* Modify the semantics of zodom to properly handle stride > 1.
* Add a truncate operation to the libnczarr zmap code.
2023-09-27 06:56:48 +08:00
# The working S3 path tree within the Unidata bucket.
# WARNING: this must match the value in configure.ac
2024-01-18 06:07:22 +08:00
set ( S3TESTSUBTREE "netcdf-c" CACHE STRING "Working S3 path." )
Mitigate S3 test interference + Unlimited Dimensions in NCZarr
This PR started as an attempt to add unlimited dimensions to NCZarr.
It did that, but this exposed significant problems with test interference.
So this PR is mostly about fixing -- well mitigating anyway -- test
interference.
The problem of test interference is now documented in the document docs/internal.md.
The solutions implemented here are also describe in that document.
The solution is somewhat fragile but multiple cleanup mechanisms
are provided. Note that this feature requires that the
AWS command line utility must be installed.
## Unlimited Dimensions.
The existing NCZarr extensions to Zarr are modified to support unlimited dimensions.
NCzarr extends the Zarr meta-data for the ".zgroup" object to include netcdf-4 model extensions. This information is stored in ".zgroup" as dictionary named "_nczarr_group".
Inside "_nczarr_group", there is a key named "dims" that stores information about netcdf-4 named dimensions. The value of "dims" is a dictionary whose keys are the named dimensions. The value associated with each dimension name has one of two forms
Form 1 is a special case of form 2, and is kept for backward compatibility. Whenever a new file is written, it uses format 1 if possible, otherwise format 2.
* Form 1: An integer representing the size of the dimension, which is used for simple named dimensions.
* Form 2: A dictionary with the following keys and values"
- "size" with an integer value representing the (current) size of the dimension.
- "unlimited" with a value of either "1" or "0" to indicate if this dimension is an unlimited dimension.
For Unlimited dimensions, the size is initially zero, and as variables extend the length of that dimension, the size value for the dimension increases.
That dimension size is shared by all arrays referencing that dimension, so if one array extends an unlimited dimension, it is implicitly extended for all other arrays that reference that dimension.
This is the standard semantics for unlimited dimensions.
Adding unlimited dimensions required a number of other changes to the NCZarr code-base. These included the following.
* Did a partial refactor of the slice handling code in zwalk.c to clean it up.
* Added a number of tests for unlimited dimensions derived from the same test in nc_test4.
* Added several NCZarr specific unlimited tests; more are needed.
* Add test of endianness.
## Misc. Other Changes
* Modify libdispatch/ncs3sdk_aws.cpp to optionally support use of the
AWS Transfer Utility mechanism. This is controlled by the
```#define TRANSFER```` command in that file. It defaults to being disabled.
* Parameterize both the standard Unidata S3 bucket (S3TESTBUCKET) and the netcdf-c test data prefix (S3TESTSUBTREE).
* Fixed an obscure memory leak in ncdump.
* Removed some obsolete unit testing code and test cases.
* Uncovered a bug in the netcdf-c handling of big-endian floats and doubles. Have not fixed yet. See tst_h5_endians.c.
* Renamed some nczarr_tests testcases to avoid name conflicts with nc_test4.
* Modify the semantics of zmap\#ncsmap_write to only allow total rewrite of objects.
* Modify the semantics of zodom to properly handle stride > 1.
* Add a truncate operation to the libnczarr zmap code.
2023-09-27 06:56:48 +08:00
# Build a unique id based on the date
string ( TIMESTAMP TESTUID "%s" )
if ( ENABLE_S3_TESTING )
2024-01-18 06:07:22 +08:00
file ( APPEND "${CMAKE_CURRENT_BINARY_DIR}/s3cleanup_${PLATFORMUID}.uids" "${TESTUID}\n" )
Mitigate S3 test interference + Unlimited Dimensions in NCZarr
This PR started as an attempt to add unlimited dimensions to NCZarr.
It did that, but this exposed significant problems with test interference.
So this PR is mostly about fixing -- well mitigating anyway -- test
interference.
The problem of test interference is now documented in the document docs/internal.md.
The solutions implemented here are also describe in that document.
The solution is somewhat fragile but multiple cleanup mechanisms
are provided. Note that this feature requires that the
AWS command line utility must be installed.
## Unlimited Dimensions.
The existing NCZarr extensions to Zarr are modified to support unlimited dimensions.
NCzarr extends the Zarr meta-data for the ".zgroup" object to include netcdf-4 model extensions. This information is stored in ".zgroup" as dictionary named "_nczarr_group".
Inside "_nczarr_group", there is a key named "dims" that stores information about netcdf-4 named dimensions. The value of "dims" is a dictionary whose keys are the named dimensions. The value associated with each dimension name has one of two forms
Form 1 is a special case of form 2, and is kept for backward compatibility. Whenever a new file is written, it uses format 1 if possible, otherwise format 2.
* Form 1: An integer representing the size of the dimension, which is used for simple named dimensions.
* Form 2: A dictionary with the following keys and values"
- "size" with an integer value representing the (current) size of the dimension.
- "unlimited" with a value of either "1" or "0" to indicate if this dimension is an unlimited dimension.
For Unlimited dimensions, the size is initially zero, and as variables extend the length of that dimension, the size value for the dimension increases.
That dimension size is shared by all arrays referencing that dimension, so if one array extends an unlimited dimension, it is implicitly extended for all other arrays that reference that dimension.
This is the standard semantics for unlimited dimensions.
Adding unlimited dimensions required a number of other changes to the NCZarr code-base. These included the following.
* Did a partial refactor of the slice handling code in zwalk.c to clean it up.
* Added a number of tests for unlimited dimensions derived from the same test in nc_test4.
* Added several NCZarr specific unlimited tests; more are needed.
* Add test of endianness.
## Misc. Other Changes
* Modify libdispatch/ncs3sdk_aws.cpp to optionally support use of the
AWS Transfer Utility mechanism. This is controlled by the
```#define TRANSFER```` command in that file. It defaults to being disabled.
* Parameterize both the standard Unidata S3 bucket (S3TESTBUCKET) and the netcdf-c test data prefix (S3TESTSUBTREE).
* Fixed an obscure memory leak in ncdump.
* Removed some obsolete unit testing code and test cases.
* Uncovered a bug in the netcdf-c handling of big-endian floats and doubles. Have not fixed yet. See tst_h5_endians.c.
* Renamed some nczarr_tests testcases to avoid name conflicts with nc_test4.
* Modify the semantics of zmap\#ncsmap_write to only allow total rewrite of objects.
* Modify the semantics of zodom to properly handle stride > 1.
* Add a truncate operation to the libnczarr zmap code.
2023-09-27 06:56:48 +08:00
endif ( )
# Copy the CTest customization file into binary directory, as required.
2024-01-18 06:07:22 +08:00
configure_file ( "${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/CTestCustom.cmake" )
Mitigate S3 test interference + Unlimited Dimensions in NCZarr
This PR started as an attempt to add unlimited dimensions to NCZarr.
It did that, but this exposed significant problems with test interference.
So this PR is mostly about fixing -- well mitigating anyway -- test
interference.
The problem of test interference is now documented in the document docs/internal.md.
The solutions implemented here are also describe in that document.
The solution is somewhat fragile but multiple cleanup mechanisms
are provided. Note that this feature requires that the
AWS command line utility must be installed.
## Unlimited Dimensions.
The existing NCZarr extensions to Zarr are modified to support unlimited dimensions.
NCzarr extends the Zarr meta-data for the ".zgroup" object to include netcdf-4 model extensions. This information is stored in ".zgroup" as dictionary named "_nczarr_group".
Inside "_nczarr_group", there is a key named "dims" that stores information about netcdf-4 named dimensions. The value of "dims" is a dictionary whose keys are the named dimensions. The value associated with each dimension name has one of two forms
Form 1 is a special case of form 2, and is kept for backward compatibility. Whenever a new file is written, it uses format 1 if possible, otherwise format 2.
* Form 1: An integer representing the size of the dimension, which is used for simple named dimensions.
* Form 2: A dictionary with the following keys and values"
- "size" with an integer value representing the (current) size of the dimension.
- "unlimited" with a value of either "1" or "0" to indicate if this dimension is an unlimited dimension.
For Unlimited dimensions, the size is initially zero, and as variables extend the length of that dimension, the size value for the dimension increases.
That dimension size is shared by all arrays referencing that dimension, so if one array extends an unlimited dimension, it is implicitly extended for all other arrays that reference that dimension.
This is the standard semantics for unlimited dimensions.
Adding unlimited dimensions required a number of other changes to the NCZarr code-base. These included the following.
* Did a partial refactor of the slice handling code in zwalk.c to clean it up.
* Added a number of tests for unlimited dimensions derived from the same test in nc_test4.
* Added several NCZarr specific unlimited tests; more are needed.
* Add test of endianness.
## Misc. Other Changes
* Modify libdispatch/ncs3sdk_aws.cpp to optionally support use of the
AWS Transfer Utility mechanism. This is controlled by the
```#define TRANSFER```` command in that file. It defaults to being disabled.
* Parameterize both the standard Unidata S3 bucket (S3TESTBUCKET) and the netcdf-c test data prefix (S3TESTSUBTREE).
* Fixed an obscure memory leak in ncdump.
* Removed some obsolete unit testing code and test cases.
* Uncovered a bug in the netcdf-c handling of big-endian floats and doubles. Have not fixed yet. See tst_h5_endians.c.
* Renamed some nczarr_tests testcases to avoid name conflicts with nc_test4.
* Modify the semantics of zmap\#ncsmap_write to only allow total rewrite of objects.
* Modify the semantics of zodom to properly handle stride > 1.
* Add a truncate operation to the libnczarr zmap code.
2023-09-27 06:56:48 +08:00
2014-06-03 05:14:27 +08:00
# Generate file from template.
2024-01-18 06:07:22 +08:00
configure_file ( "${CMAKE_CURRENT_SOURCE_DIR}/libnetcdf.settings.in"
2014-06-03 04:03:42 +08:00
" $ { C M A K E _ C U R R E N T _ B I N A R Y _ D I R } / l i b n e t c d f . s e t t i n g s "
2014-08-21 02:30:11 +08:00
@ O N L Y )
2014-06-03 04:03:42 +08:00
# Read in settings file, print out.
# Avoid using system-specific calls so that this
# might also work on Windows.
2024-01-18 06:07:22 +08:00
file ( READ "${CMAKE_CURRENT_BINARY_DIR}/libnetcdf.settings"
2014-06-03 04:03:42 +08:00
L I B N E T C D F _ S E T T I N G S )
2024-01-18 06:07:22 +08:00
message ( STATUS ${ LIBNETCDF_SETTINGS } )
2014-06-03 04:03:42 +08:00
# Install libnetcdf.settings file into same location
# as the libraries.
2024-01-18 06:07:22 +08:00
install ( FILES "${netCDF_BINARY_DIR}/libnetcdf.settings"
2014-06-18 04:25:49 +08:00
D E S T I N A T I O N " $ { C M A K E _ I N S T A L L _ L I B D I R } "
2014-06-03 04:03:42 +08:00
C O M P O N E N T l i b r a r i e s )
#####
# End libnetcdf.settings section.
#####
2014-09-05 05:42:11 +08:00
#####
# Create 'netcdf_meta.h' include file.
#####
configure_file (
2014-09-11 06:50:45 +08:00
$ { n e t C D F _ S O U R C E _ D I R } / i n c l u d e / n e t c d f _ m e t a . h . i n
2020-08-12 22:09:28 +08:00
$ { n e t C D F _ B I N A R Y _ D I R } / i n c l u d e / n e t c d f _ m e t a . h @ O N L Y )
2014-09-05 05:42:11 +08:00
2021-02-01 12:40:08 +08:00
#####
# Create 'netcdf_dispatch.h' include file.
#####
configure_file (
$ { n e t C D F _ S O U R C E _ D I R } / i n c l u d e / n e t c d f _ d i s p a t c h . h . i n
$ { n e t C D F _ B I N A R Y _ D I R } / i n c l u d e / n e t c d f _ d i s p a t c h . h @ O N L Y N E W L I N E _ S T Y L E L F )
2021-04-01 22:13:13 +08:00
####
2017-03-09 08:01:10 +08:00
# Build test_common.sh
#####
2024-01-18 06:07:22 +08:00
set ( EXTRA_DIST ${ EXTRA_DIST } ${ CMAKE_CURRENT_SOURCE_DIR } /test_common.in )
set ( TOPSRCDIR "${CMAKE_CURRENT_SOURCE_DIR}" )
set ( TOPBUILDDIR "${CMAKE_CURRENT_BINARY_DIR}" )
2019-11-07 00:43:49 +08:00
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /test_common.in ${ CMAKE_CURRENT_BINARY_DIR } /test_common.sh @ONLY NEWLINE_STYLE LF )
2017-03-09 08:01:10 +08:00
Mitigate S3 test interference + Unlimited Dimensions in NCZarr
This PR started as an attempt to add unlimited dimensions to NCZarr.
It did that, but this exposed significant problems with test interference.
So this PR is mostly about fixing -- well mitigating anyway -- test
interference.
The problem of test interference is now documented in the document docs/internal.md.
The solutions implemented here are also describe in that document.
The solution is somewhat fragile but multiple cleanup mechanisms
are provided. Note that this feature requires that the
AWS command line utility must be installed.
## Unlimited Dimensions.
The existing NCZarr extensions to Zarr are modified to support unlimited dimensions.
NCzarr extends the Zarr meta-data for the ".zgroup" object to include netcdf-4 model extensions. This information is stored in ".zgroup" as dictionary named "_nczarr_group".
Inside "_nczarr_group", there is a key named "dims" that stores information about netcdf-4 named dimensions. The value of "dims" is a dictionary whose keys are the named dimensions. The value associated with each dimension name has one of two forms
Form 1 is a special case of form 2, and is kept for backward compatibility. Whenever a new file is written, it uses format 1 if possible, otherwise format 2.
* Form 1: An integer representing the size of the dimension, which is used for simple named dimensions.
* Form 2: A dictionary with the following keys and values"
- "size" with an integer value representing the (current) size of the dimension.
- "unlimited" with a value of either "1" or "0" to indicate if this dimension is an unlimited dimension.
For Unlimited dimensions, the size is initially zero, and as variables extend the length of that dimension, the size value for the dimension increases.
That dimension size is shared by all arrays referencing that dimension, so if one array extends an unlimited dimension, it is implicitly extended for all other arrays that reference that dimension.
This is the standard semantics for unlimited dimensions.
Adding unlimited dimensions required a number of other changes to the NCZarr code-base. These included the following.
* Did a partial refactor of the slice handling code in zwalk.c to clean it up.
* Added a number of tests for unlimited dimensions derived from the same test in nc_test4.
* Added several NCZarr specific unlimited tests; more are needed.
* Add test of endianness.
## Misc. Other Changes
* Modify libdispatch/ncs3sdk_aws.cpp to optionally support use of the
AWS Transfer Utility mechanism. This is controlled by the
```#define TRANSFER```` command in that file. It defaults to being disabled.
* Parameterize both the standard Unidata S3 bucket (S3TESTBUCKET) and the netcdf-c test data prefix (S3TESTSUBTREE).
* Fixed an obscure memory leak in ncdump.
* Removed some obsolete unit testing code and test cases.
* Uncovered a bug in the netcdf-c handling of big-endian floats and doubles. Have not fixed yet. See tst_h5_endians.c.
* Renamed some nczarr_tests testcases to avoid name conflicts with nc_test4.
* Modify the semantics of zmap\#ncsmap_write to only allow total rewrite of objects.
* Modify the semantics of zodom to properly handle stride > 1.
* Add a truncate operation to the libnczarr zmap code.
2023-09-27 06:56:48 +08:00
####
# Build s3cleanup.sh and s3gc.sh
#####
2024-01-18 06:07:22 +08:00
set ( EXTRA_DIST ${ EXTRA_DIST } ${ CMAKE_CURRENT_SOURCE_DIR } /s3cleanup.in ${ CMAKE_CURRENT_SOURCE_DIR } /s3gc.in )
set ( TOPSRCDIR "${CMAKE_CURRENT_SOURCE_DIR}" )
set ( TOPBUILDDIR "${CMAKE_CURRENT_BINARY_DIR}" )
Mitigate S3 test interference + Unlimited Dimensions in NCZarr
This PR started as an attempt to add unlimited dimensions to NCZarr.
It did that, but this exposed significant problems with test interference.
So this PR is mostly about fixing -- well mitigating anyway -- test
interference.
The problem of test interference is now documented in the document docs/internal.md.
The solutions implemented here are also describe in that document.
The solution is somewhat fragile but multiple cleanup mechanisms
are provided. Note that this feature requires that the
AWS command line utility must be installed.
## Unlimited Dimensions.
The existing NCZarr extensions to Zarr are modified to support unlimited dimensions.
NCzarr extends the Zarr meta-data for the ".zgroup" object to include netcdf-4 model extensions. This information is stored in ".zgroup" as dictionary named "_nczarr_group".
Inside "_nczarr_group", there is a key named "dims" that stores information about netcdf-4 named dimensions. The value of "dims" is a dictionary whose keys are the named dimensions. The value associated with each dimension name has one of two forms
Form 1 is a special case of form 2, and is kept for backward compatibility. Whenever a new file is written, it uses format 1 if possible, otherwise format 2.
* Form 1: An integer representing the size of the dimension, which is used for simple named dimensions.
* Form 2: A dictionary with the following keys and values"
- "size" with an integer value representing the (current) size of the dimension.
- "unlimited" with a value of either "1" or "0" to indicate if this dimension is an unlimited dimension.
For Unlimited dimensions, the size is initially zero, and as variables extend the length of that dimension, the size value for the dimension increases.
That dimension size is shared by all arrays referencing that dimension, so if one array extends an unlimited dimension, it is implicitly extended for all other arrays that reference that dimension.
This is the standard semantics for unlimited dimensions.
Adding unlimited dimensions required a number of other changes to the NCZarr code-base. These included the following.
* Did a partial refactor of the slice handling code in zwalk.c to clean it up.
* Added a number of tests for unlimited dimensions derived from the same test in nc_test4.
* Added several NCZarr specific unlimited tests; more are needed.
* Add test of endianness.
## Misc. Other Changes
* Modify libdispatch/ncs3sdk_aws.cpp to optionally support use of the
AWS Transfer Utility mechanism. This is controlled by the
```#define TRANSFER```` command in that file. It defaults to being disabled.
* Parameterize both the standard Unidata S3 bucket (S3TESTBUCKET) and the netcdf-c test data prefix (S3TESTSUBTREE).
* Fixed an obscure memory leak in ncdump.
* Removed some obsolete unit testing code and test cases.
* Uncovered a bug in the netcdf-c handling of big-endian floats and doubles. Have not fixed yet. See tst_h5_endians.c.
* Renamed some nczarr_tests testcases to avoid name conflicts with nc_test4.
* Modify the semantics of zmap\#ncsmap_write to only allow total rewrite of objects.
* Modify the semantics of zodom to properly handle stride > 1.
* Add a truncate operation to the libnczarr zmap code.
2023-09-27 06:56:48 +08:00
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /s3cleanup.in ${ CMAKE_CURRENT_BINARY_DIR } /s3cleanup.sh @ONLY NEWLINE_STYLE LF )
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /s3gc.in ${ CMAKE_CURRENT_BINARY_DIR } /s3gc.sh @ONLY NEWLINE_STYLE LF )
2018-01-17 02:00:09 +08:00
#####
2022-05-15 06:05:48 +08:00
# Build and copy nc_test4/findplugin.sh to various places
2018-01-17 02:00:09 +08:00
#####
2019-11-07 00:43:49 +08:00
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /nc_test4/findplugin.in ${ CMAKE_CURRENT_BINARY_DIR } /nc_test4/findplugin.sh @ONLY NEWLINE_STYLE LF )
2021-09-03 07:04:26 +08:00
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /nc_test4/findplugin.in ${ CMAKE_CURRENT_BINARY_DIR } /nczarr_test/findplugin.sh @ONLY NEWLINE_STYLE LF )
Support installation of filters into user-specified location
re: https://github.com/Unidata/netcdf-c/issues/2294
Ed Hartnett suggested that the netcdf library installation process
be extended to install the standard filters into a user specified
location. The user can then set HDF5_PLUGIN_PATH to that location.
This PR provides that capability using:
````
configure option: --with-plugin-dir=<absolute directory path>
cmake option: -DPLUGIN_INSTALL_DIR=<absolute directory path>
````
Currently, the following plugins are always installed, if
available: bzip2, zstd, blosc.
If NCZarr is enabled, then additional plugins are installed:
fletcher32, shuffle, deflate, szip.
Additionally, the necessary codec support is installed
for each of the above filters that is installed.
## Changes:
1. Cleanup handling of built-in bzip2.
2. Add documentation to docs/filters.md
3. Re-factor the NCZarr codec libraries
4. Add a test, although it can only be exercised after
the library is installed, so it cannot be used during
normal testing.
5. Cleanup use of HDF5_PLUGIN_PATH in the filter test cases.
2022-04-30 04:31:55 +08:00
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /nc_test4/findplugin.in ${ CMAKE_CURRENT_BINARY_DIR } /plugins/findplugin.sh @ONLY NEWLINE_STYLE LF )
2019-11-07 00:43:49 +08:00
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /nc_test4/findplugin.in ${ CMAKE_CURRENT_BINARY_DIR } /examples/C/findplugin.sh @ONLY NEWLINE_STYLE LF )
2018-01-17 02:00:09 +08:00
2024-01-18 06:07:22 +08:00
if ( ENABLE_BENCHMARKS )
if ( ENABLE_PARALLEL4 )
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /nc_perf/run_par_bm_test.sh.in ${ CMAKE_CURRENT_BINARY_DIR } /nc_perf/run_par_bm_test.sh @ONLY NEWLINE_STYLE LF )
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /nc_perf/run_gfs_test.sh.in ${ CMAKE_CURRENT_BINARY_DIR } /nc_perf/run_gfs_test.sh @ONLY NEWLINE_STYLE LF )
endif ( ENABLE_PARALLEL4 )
endif ( ENABLE_BENCHMARKS )
2023-02-17 06:56:25 +08:00
2024-01-18 06:07:22 +08:00
if ( ENABLE_TESTS )
2020-08-12 22:38:00 +08:00
#####
# Build ncdap_test|dap4_test/findtestserver[4].c
#####
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /ncdap_test/findtestserver.c.in ${ CMAKE_CURRENT_BINARY_DIR } /ncdap_test/findtestserver.c @ONLY NEWLINE_STYLE LF )
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /ncdap_test/findtestserver.c.in ${ CMAKE_CURRENT_BINARY_DIR } /dap4_test/findtestserver4.c @ONLY NEWLINE_STYLE LF )
2018-09-05 01:27:47 +08:00
2020-08-12 22:38:00 +08:00
#####
# Build dap4_test/pingurl4.c
#####
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /ncdap_test/pingurl.c ${ CMAKE_CURRENT_BINARY_DIR } /dap4_test/pingurl4.c @ONLY NEWLINE_STYLE LF )
Mitigate S3 test interference + Unlimited Dimensions in NCZarr
This PR started as an attempt to add unlimited dimensions to NCZarr.
It did that, but this exposed significant problems with test interference.
So this PR is mostly about fixing -- well mitigating anyway -- test
interference.
The problem of test interference is now documented in the document docs/internal.md.
The solutions implemented here are also describe in that document.
The solution is somewhat fragile but multiple cleanup mechanisms
are provided. Note that this feature requires that the
AWS command line utility must be installed.
## Unlimited Dimensions.
The existing NCZarr extensions to Zarr are modified to support unlimited dimensions.
NCzarr extends the Zarr meta-data for the ".zgroup" object to include netcdf-4 model extensions. This information is stored in ".zgroup" as dictionary named "_nczarr_group".
Inside "_nczarr_group", there is a key named "dims" that stores information about netcdf-4 named dimensions. The value of "dims" is a dictionary whose keys are the named dimensions. The value associated with each dimension name has one of two forms
Form 1 is a special case of form 2, and is kept for backward compatibility. Whenever a new file is written, it uses format 1 if possible, otherwise format 2.
* Form 1: An integer representing the size of the dimension, which is used for simple named dimensions.
* Form 2: A dictionary with the following keys and values"
- "size" with an integer value representing the (current) size of the dimension.
- "unlimited" with a value of either "1" or "0" to indicate if this dimension is an unlimited dimension.
For Unlimited dimensions, the size is initially zero, and as variables extend the length of that dimension, the size value for the dimension increases.
That dimension size is shared by all arrays referencing that dimension, so if one array extends an unlimited dimension, it is implicitly extended for all other arrays that reference that dimension.
This is the standard semantics for unlimited dimensions.
Adding unlimited dimensions required a number of other changes to the NCZarr code-base. These included the following.
* Did a partial refactor of the slice handling code in zwalk.c to clean it up.
* Added a number of tests for unlimited dimensions derived from the same test in nc_test4.
* Added several NCZarr specific unlimited tests; more are needed.
* Add test of endianness.
## Misc. Other Changes
* Modify libdispatch/ncs3sdk_aws.cpp to optionally support use of the
AWS Transfer Utility mechanism. This is controlled by the
```#define TRANSFER```` command in that file. It defaults to being disabled.
* Parameterize both the standard Unidata S3 bucket (S3TESTBUCKET) and the netcdf-c test data prefix (S3TESTSUBTREE).
* Fixed an obscure memory leak in ncdump.
* Removed some obsolete unit testing code and test cases.
* Uncovered a bug in the netcdf-c handling of big-endian floats and doubles. Have not fixed yet. See tst_h5_endians.c.
* Renamed some nczarr_tests testcases to avoid name conflicts with nc_test4.
* Modify the semantics of zmap\#ncsmap_write to only allow total rewrite of objects.
* Modify the semantics of zodom to properly handle stride > 1.
* Add a truncate operation to the libnczarr zmap code.
2023-09-27 06:56:48 +08:00
#####
# Build CTestCustom.cmake to cleanup S3 after tests are done.
#####
configure_file ( ${ CMAKE_CURRENT_SOURCE_DIR } /CTestCustom.cmake.in ${ CMAKE_CURRENT_BINARY_DIR } /CTestCustom.cmake NEWLINE_STYLE LF )
2024-01-18 06:07:22 +08:00
endif ( )
2020-01-03 02:47:25 +08:00
2022-02-02 02:40:01 +08:00
if ( DEFINED ENV{LIB_FUZZING_ENGINE} )
add_subdirectory ( fuzz )
endif ( DEFINED ENV{LIB_FUZZING_ENGINE} )
2024-01-27 03:51:19 +08:00
####
# Export files
####
# Create CMake package configuration files. With these, other packages using
# cmake should be able to find netcdf using find_package and find_library.
# The EXPORT call is paired with one in liblib.
set ( ConfigPackageLocation ${ CMAKE_INSTALL_LIBDIR } /cmake/netCDF )
install ( EXPORT netCDFTargets
D E S T I N A T I O N $ { C o n f i g P a c k a g e L o c a t i o n }
C O M P O N E N T h e a d e r s
N A M E S P A C E n e t C D F : :
)
2024-01-24 19:15:07 +08:00
export ( EXPORT netCDFTargets FILE netCDFTargets.cmake NAMESPACE netCDF:: )
2023-10-21 00:47:18 +08:00
2024-01-27 03:51:19 +08:00
include ( CMakePackageConfigHelpers )
2024-01-24 19:15:07 +08:00
configure_package_config_file (
2024-01-27 03:51:19 +08:00
" $ { C M A K E _ C U R R E N T _ S O U R C E _ D I R } / n e t C D F C o n f i g . c m a k e . i n "
" $ { C M A K E _ C U R R E N T _ B I N A R Y _ D I R } / n e t C D F C o n f i g . c m a k e "
I N S T A L L _ D E S T I N A T I O N " $ { C o n f i g P a c k a g e L o c a t i o n } "
2024-01-24 19:15:07 +08:00
)
2024-01-27 03:51:19 +08:00
target_include_directories ( netcdf
P U B L I C
$ < B U I L D _ I N T E R F A C E : $ { C M A K E _ C U R R E N T _ S O U R C E _ D I R } / i n c l u d e >
$ < I N S T A L L _ I N T E R F A C E : $ { C M A K E _ I N S T A L L _ I N C L U D E D I R } >
)
# Create export configuration
write_basic_package_version_file (
2023-10-21 00:47:18 +08:00
n e t C D F C o n f i g V e r s i o n . c m a k e
2024-01-27 03:51:19 +08:00
V E R S I O N $ { n e t C D F _ V E R S I O N }
C O M P A T I B I L I T Y S a m e M a j o r V e r s i o n
2024-01-24 19:15:07 +08:00
)
2024-01-27 03:51:19 +08:00
install (
F I L E S
2024-01-24 19:15:07 +08:00
" $ { C M A K E _ C U R R E N T _ B I N A R Y _ D I R } / n e t C D F C o n f i g . c m a k e "
2023-10-21 00:47:18 +08:00
" $ { C M A K E _ C U R R E N T _ B I N A R Y _ D I R } / n e t C D F C o n f i g V e r s i o n . c m a k e "
2024-01-27 03:51:19 +08:00
D E S T I N A T I O N $ { C o n f i g P a c k a g e L o c a t i o n }
C O M P O N E N T h e a d e r s
2024-01-24 19:15:07 +08:00
)
2024-01-27 03:51:19 +08:00
####
# End export files
####
# CPack inclusion must come last.
option ( NETCDF_PACKAGE "Create netCDF-C package " ${ NETCDF_IS_TOP_LEVEL } )
if ( NETCDF_PACKAGE )
include ( CMakeInstallation.cmake )
2023-10-23 17:20:29 +08:00
endif ( )