mirror of
https://github.com/Unidata/netcdf-c.git
synced 2024-12-03 08:01:25 +08:00
231ae96c4b
* re: https://github.com/Unidata/netcdf-c/pull/2278 * re: https://github.com/Unidata/netcdf-c/issues/2485 * re: https://github.com/Unidata/netcdf-c/issues/2474 This PR subsumes PR https://github.com/Unidata/netcdf-c/pull/2278. Actually is a bit an omnibus covering several issues. ## PR https://github.com/Unidata/netcdf-c/pull/2278 Add support for the Zarr string type. Zarr strings are restricted currently to be of fixed size. The primary issue to be addressed is to provide a way for user to specify the size of the fixed length strings. This is handled by providing the following new attributes special: 1. **_nczarr_default_maxstrlen** — This is an attribute of the root group. It specifies the default maximum string length for string types. If not specified, then it has the value of 64 characters. 2. **_nczarr_maxstrlen** — This is a per-variable attribute. It specifies the maximum string length for the string type associated with the variable. If not specified, then it is assigned the value of **_nczarr_default_maxstrlen**. This PR also requires some hacking to handle the existing netcdf-c NC_CHAR type, which does not exist in zarr. The goal was to choose numpy types for both the netcdf-c NC_STRING type and the netcdf-c NC_CHAR type such that if a pure zarr implementation read them, it would still work and an NC_CHAR type would be handled by zarr as a string of length 1. For writing variables and NCZarr attributes, the type mapping is as follows: * "|S1" for NC_CHAR. * ">S1" for NC_STRING && MAXSTRLEN==1 * ">Sn" for NC_STRING && MAXSTRLEN==n Note that it is a bit of a hack to use endianness, but it should be ok since for string/char, the endianness has no meaning. For reading attributes with pure zarr (i.e. with no nczarr atribute types defined), they will always be interpreted as of type NC_CHAR. ## Issue: https://github.com/Unidata/netcdf-c/issues/2474 This PR partly fixes this issue because it provided more comprehensive support for Zarr attributes that are JSON valued expressions. This PR still does not address the problem in that issue where the _ARRAY_DIMENSION attribute is incorrectly set. Than can only be fixed by the creator of the datasets. ## Issue: https://github.com/Unidata/netcdf-c/issues/2485 This PR also fixes the scalar failure shown in this issue. It generally cleans up scalar handling. It also adds a note to the documentation describing that NCZarr supports scalars while Zarr does not and also how scalar interoperability is achieved. ## Misc. Other Changes 1. Convert the nczarr special attributes and keys to be all lower case. So "_NCZARR_ATTR" now used "_nczarr_attr. Support back compatibility for the upper case names. 2. Cleanup my too-clever-by-half handling of scalars in libnczarr.
149 lines
4.6 KiB
Makefile
149 lines
4.6 KiB
Makefile
## This is a automake file, part of Unidata's netCDF package.
|
|
# Copyright 2018, see the COPYRIGHT file for more information.
|
|
|
|
# This file builds and runs the nc_test program, which tests the
|
|
# netCDF-3 API for all formats.
|
|
|
|
# Ed Hartnett, Dennis Heimbigner, Ward Fisher
|
|
|
|
# Un comment to use a more verbose test driver
|
|
#SH_LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver-verbose
|
|
#sh_LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver-verbose
|
|
#LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver-verbose
|
|
#TESTS_ENVIRONMENT = export SETX=1;
|
|
|
|
# Put together AM_CPPFLAGS and AM_LDFLAGS.
|
|
include $(top_srcdir)/lib_flags.am
|
|
AM_CPPFLAGS += -I$(top_srcdir)/libsrc
|
|
AM_CPPFLAGS += -DTOPSRCDIR=${abs_top_srcdir}
|
|
AM_CPPFLAGS += -DTOPBINDIR=${abs_top_builddir}
|
|
LDADD = ${top_builddir}/liblib/libnetcdf.la
|
|
AM_CPPFLAGS += -I$(top_builddir)/liblib -I$(top_builddir)/include -I$(top_srcdir)/libsrc
|
|
|
|
TEST_EXTENSIONS = .sh
|
|
|
|
# These are the tests which are always run.
|
|
TESTPROGRAMS = tst_names tst_nofill2 tst_nofill3 tst_meta \
|
|
tst_inq_type tst_utf8_validate tst_utf8_phrases tst_global_fillval \
|
|
tst_max_var_dims tst_formats tst_def_var_fill tst_err_enddef \
|
|
tst_default_format
|
|
|
|
# These are always built, but for parallel builds are run from a test
|
|
# script, because they are parallel-enabled tests.
|
|
check_PROGRAMS = t_nc tst_atts3 tst_nofill nc_test tst_small
|
|
|
|
# These tests are only run if pnetcdf is enabled.
|
|
if USE_PNETCDF
|
|
check_PROGRAMS += tst_parallel2 tst_pnetcdf tst_addvar \
|
|
tst_formatx_pnetcdf tst_default_format_pnetcdf
|
|
endif
|
|
|
|
if USE_PNETCDF
|
|
if ENABLE_CDF5
|
|
check_PROGRAMS += tst_cdf5format
|
|
endif
|
|
endif
|
|
|
|
# These are the source files for the main workhorse test program,
|
|
# nc_test. If you pass nc_test, you are doing well.
|
|
nc_test_SOURCES = nc_test.c error.c test_get.c test_put.c test_read.c \
|
|
test_write.c util.c error.h tests.h
|
|
|
|
# If the user asked for large file tests, then add them.
|
|
if LARGE_FILE_TESTS
|
|
TESTPROGRAMS += quick_large_files tst_big_var6 tst_big_var2 \
|
|
tst_big_rvar tst_big_var tst_large large_files
|
|
endif # LARGE_FILE_TESTS
|
|
|
|
if BUILD_BENCHMARKS
|
|
TESTPROGRAMS += testnc3perf
|
|
endif
|
|
|
|
if USE_HDF5
|
|
TESTPROGRAMS += tst_diskless6
|
|
endif
|
|
|
|
# Set up the tests.
|
|
check_PROGRAMS += $(TESTPROGRAMS)
|
|
|
|
# Build Diskless test helpers
|
|
check_PROGRAMS += tst_diskless tst_diskless3 tst_diskless4 \
|
|
tst_diskless5 tst_inmemory tst_open_mem
|
|
if USE_HDF5
|
|
check_PROGRAMS += tst_diskless2
|
|
endif
|
|
|
|
TESTS = $(TESTPROGRAMS)
|
|
|
|
if BUILD_UTILITIES
|
|
|
|
if ENABLE_BYTERANGE
|
|
if ENABLE_EXTERNAL_SERVER_TESTS
|
|
tst_byterange_SOURCES = tst_byterange.c
|
|
check_PROGRAMS += tst_byterange
|
|
TESTS += test_byterange.sh
|
|
endif
|
|
endif
|
|
|
|
TESTS += run_diskless.sh run_diskless5.sh run_inmemory.sh
|
|
if LARGE_FILE_TESTS
|
|
if ! ENABLE_PARALLEL
|
|
TESTS += run_diskless2.sh
|
|
endif
|
|
endif
|
|
|
|
if BUILD_MMAP
|
|
TESTS += run_mmap.sh
|
|
run_mmap.log: run_diskless.log
|
|
endif
|
|
|
|
# If pnetcdf is enabled, these tests are run by a test
|
|
# script. Otherwise, the are run by automake in the usual way.
|
|
if USE_PNETCDF
|
|
TESTS += run_pnetcdf_tests.sh
|
|
else
|
|
TESTS += t_nc tst_atts3 tst_nofill nc_test tst_small
|
|
endif
|
|
|
|
endif # BUILD_UTILITIES
|
|
|
|
# The .c files that are generated with m4 are already distributed, but
|
|
# we also include the original m4 files, plus test scripts data.
|
|
EXTRA_DIST = test_get.m4 test_put.m4 run_diskless.sh run_diskless2.sh \
|
|
run_diskless5.sh run_mmap.sh test_read.m4 \
|
|
test_write.m4 ref_tst_diskless2.cdl tst_diskless5.cdl \
|
|
ref_tst_diskless3_create.cdl ref_tst_diskless3_open.cdl \
|
|
run_inmemory.sh run_mmap.sh f03tst_open_mem.nc test_byterange.sh \
|
|
ref_tst_http_nc3.cdl ref_tst_http_nc4a.cdl ref_tst_http_nc4b.cdl \
|
|
ref_tst_http_nc4c.cdl CMakeLists.txt run_pnetcdf_tests.sh.in \
|
|
tst_misc.c tst_norm.c
|
|
|
|
# These files are created by the tests.
|
|
CLEANFILES = nc_test_*.nc tst_*.nc t_nc.nc large_files.nc \
|
|
quick_large_files.nc tst_diskless3_file.cdl \
|
|
tst_diskless4.cdl ref_tst_diskless4.cdl benchmark.nc \
|
|
tst_http_nc3.cdl tst_http_nc4?.cdl tmp*.cdl tmp*.nc
|
|
|
|
EXTRA_DIST += bad_cdf5_begin.nc run_cdf5.sh nc_enddef.cdl
|
|
if ENABLE_CDF5
|
|
# bad_cdf5_begin.nc is a corrupted CDF-5 file with bad variable starting
|
|
# file offsets. It is to be used by tst_open_cdf5.c to check if it can
|
|
# detect and report error code NC_ENOTNC.
|
|
TESTS += run_cdf5.sh
|
|
check_PROGRAMS += tst_open_cdf5
|
|
if LARGE_FILE_TESTS
|
|
TESTPROGRAMS += tst_large_cdf5 tst_cdf5_begin
|
|
endif
|
|
endif
|
|
|
|
# Only clean these on maintainer-clean, because they require m4 to
|
|
# regenerate.
|
|
MAINTAINERCLEANFILES = test_get.c test_put.c
|
|
|
|
# This rule tells make how to turn our .m4 files into .c files.
|
|
.m4.c:
|
|
m4 $(AM_M4FLAGS) $(M4FLAGS) $< >$@
|
|
|
|
# If valgrind is present, add valgrind targets.
|
|
@VALGRIND_CHECK_RULES@
|