2010-06-03 21:24:43 +08:00
|
|
|
## This is a automake file, part of Unidata's netCDF package.
|
2018-12-07 05:56:42 +08:00
|
|
|
# Copyright 2018, see the COPYRIGHT file for more information.
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
# This file builds and runs the nc_test program, which tests the
|
|
|
|
# netCDF-3 API for all formats.
|
|
|
|
|
2018-12-03 21:17:23 +08:00
|
|
|
# Ed Hartnett, Dennis Heimbigner, Ward Fisher
|
|
|
|
|
2019-04-20 10:32:26 +08:00
|
|
|
# Un comment to use a more verbose test driver
|
|
|
|
#SH_LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver-verbose
|
|
|
|
#LOG_DRIVER = $(SHELL) $(top_srcdir)/test-driver-verbose
|
|
|
|
|
2011-05-18 03:14:35 +08:00
|
|
|
# Put together AM_CPPFLAGS and AM_LDFLAGS.
|
|
|
|
include $(top_srcdir)/lib_flags.am
|
2015-08-16 06:26:35 +08:00
|
|
|
AM_CPPFLAGS += -I$(top_srcdir)/libsrc
|
2017-03-09 08:01:10 +08:00
|
|
|
AM_CPPFLAGS += -DTOPSRCDIR=${abs_top_srcdir}
|
|
|
|
AM_CPPFLAGS += -DTOPBINDIR=${abs_top_bindir}
|
2017-11-22 21:53:23 +08:00
|
|
|
LDADD = ${top_builddir}/liblib/libnetcdf.la
|
|
|
|
AM_CPPFLAGS += -I$(top_builddir)/liblib -I$(top_builddir)/include -I$(top_srcdir)/libsrc
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-08 06:55:32 +08:00
|
|
|
TEST_EXTENSIONS = .sh
|
2017-03-09 08:01:10 +08:00
|
|
|
|
2018-12-03 21:17:23 +08:00
|
|
|
check_PROGRAMS =
|
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
# These are the tests which are always run.
|
2018-12-03 21:17:23 +08:00
|
|
|
TESTPROGRAMS = t_nc tst_small nc_test tst_misc tst_norm tst_names \
|
|
|
|
tst_nofill tst_nofill2 tst_nofill3 tst_atts3 tst_meta tst_inq_type \
|
|
|
|
tst_utf8_validate tst_utf8_phrases tst_global_fillval \
|
|
|
|
tst_max_var_dims tst_formats tst_def_var_fill tst_err_enddef \
|
2018-07-28 15:49:07 +08:00
|
|
|
tst_default_format
|
2011-04-26 01:09:14 +08:00
|
|
|
|
2013-03-16 04:31:07 +08:00
|
|
|
if USE_PNETCDF
|
2018-12-03 21:17:23 +08:00
|
|
|
check_PROGRAMS += tst_parallel2 tst_pnetcdf tst_addvar
|
|
|
|
TESTPROGRAMS += tst_formatx_pnetcdf tst_default_format_pnetcdf
|
2013-03-16 04:31:07 +08:00
|
|
|
endif
|
|
|
|
|
2017-11-06 20:35:34 +08:00
|
|
|
if TEST_PARALLEL4
|
|
|
|
if USE_PNETCDF
|
2018-06-30 10:17:07 +08:00
|
|
|
if ENABLE_CDF5
|
2017-11-06 20:35:34 +08:00
|
|
|
TESTPROGRAMS += tst_cdf5format
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
# These are the source files for the main workhorse test program,
|
|
|
|
# nc_test. If you pass nc_test, you are doing well.
|
2017-11-22 21:53:23 +08:00
|
|
|
nc_test_SOURCES = nc_test.c error.c test_get.c test_put.c test_read.c \
|
|
|
|
test_write.c util.c error.h tests.h
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
# If the user asked for large file tests, then add them.
|
|
|
|
if LARGE_FILE_TESTS
|
2010-06-11 05:10:50 +08:00
|
|
|
TESTPROGRAMS += quick_large_files tst_big_var6 tst_big_var2 \
|
2017-09-17 06:35:52 +08:00
|
|
|
tst_big_rvar tst_big_var tst_large large_files
|
2010-06-03 21:24:43 +08:00
|
|
|
endif # LARGE_FILE_TESTS
|
|
|
|
|
2012-02-04 05:30:43 +08:00
|
|
|
if BUILD_BENCHMARKS
|
2012-01-20 04:17:40 +08:00
|
|
|
TESTPROGRAMS += testnc3perf
|
2012-02-04 05:30:43 +08:00
|
|
|
endif
|
2012-01-20 04:17:40 +08:00
|
|
|
|
2018-12-01 22:29:58 +08:00
|
|
|
if USE_HDF5
|
2018-10-12 02:09:42 +08:00
|
|
|
TESTPROGRAMS += tst_diskless6
|
|
|
|
endif
|
|
|
|
|
2020-01-08 08:40:03 +08:00
|
|
|
if ENABLE_DAP_REMOTE_TESTS
|
2019-02-25 07:54:13 +08:00
|
|
|
if ENABLE_BYTERANGE
|
|
|
|
TESTPROGRAMS += tst_byterange
|
|
|
|
tst_byterange_SOURCES = tst_byterange.c
|
Provide byte-range reading of remote datasets
re: issue https://github.com/Unidata/netcdf-c/issues/1251
Assume that you have the URL to a remote dataset
which is a normal netcdf-3 or netcdf-4 file.
This PR allows the netcdf-c to read that dataset's
contents as a netcdf file using HTTP byte ranges
if the remote server supports byte-range access.
Originally, this PR was set up to access Amazon S3 objects,
but it can also access other remote datasets such as those
provided by a Thredds server via the HTTPServer access protocol.
It may also work for other kinds of servers.
Note that this is not intended as a true production
capability because, as is known, this kind of access to
can be quite slow. In addition, the byte-range IO drivers
do not currently do any sort of optimization or caching.
An additional goal here is to gain some experience with
the Amazon S3 REST protocol.
This architecture and its use documented in
the file docs/byterange.dox.
There are currently two test cases:
1. nc_test/tst_s3raw.c - this does a simple open, check format, close cycle
for a remote netcdf-3 file and a remote netcdf-4 file.
2. nc_test/test_s3raw.sh - this uses ncdump to investigate some remote
datasets.
This PR also incorporates significantly changed model inference code
(see the superceded PR https://github.com/Unidata/netcdf-c/pull/1259).
1. It centralizes the code that infers the dispatcher.
2. It adds support for byte-range URLs
Other changes:
1. NC_HDF5_finalize was not being properly called by nc_finalize().
2. Fix minor bug in ncgen3.l
3. fix memory leak in nc4info.c
4. add code to walk the .daprc triples and to replace protocol=
fragment tag with a more general mode= tag.
Final Note:
Th inference code is still way too complicated. We need to move
to the validfile() model used by netcdf Java, where each
dispatcher is asked if it can process the file. This decentralizes
the inference code. This will be done after all the major new
dispatchers (PIO, Zarr, etc) have been implemented.
2019-01-02 09:27:36 +08:00
|
|
|
endif
|
2020-01-08 08:40:03 +08:00
|
|
|
endif
|
Provide byte-range reading of remote datasets
re: issue https://github.com/Unidata/netcdf-c/issues/1251
Assume that you have the URL to a remote dataset
which is a normal netcdf-3 or netcdf-4 file.
This PR allows the netcdf-c to read that dataset's
contents as a netcdf file using HTTP byte ranges
if the remote server supports byte-range access.
Originally, this PR was set up to access Amazon S3 objects,
but it can also access other remote datasets such as those
provided by a Thredds server via the HTTPServer access protocol.
It may also work for other kinds of servers.
Note that this is not intended as a true production
capability because, as is known, this kind of access to
can be quite slow. In addition, the byte-range IO drivers
do not currently do any sort of optimization or caching.
An additional goal here is to gain some experience with
the Amazon S3 REST protocol.
This architecture and its use documented in
the file docs/byterange.dox.
There are currently two test cases:
1. nc_test/tst_s3raw.c - this does a simple open, check format, close cycle
for a remote netcdf-3 file and a remote netcdf-4 file.
2. nc_test/test_s3raw.sh - this uses ncdump to investigate some remote
datasets.
This PR also incorporates significantly changed model inference code
(see the superceded PR https://github.com/Unidata/netcdf-c/pull/1259).
1. It centralizes the code that infers the dispatcher.
2. It adds support for byte-range URLs
Other changes:
1. NC_HDF5_finalize was not being properly called by nc_finalize().
2. Fix minor bug in ncgen3.l
3. fix memory leak in nc4info.c
4. add code to walk the .daprc triples and to replace protocol=
fragment tag with a more general mode= tag.
Final Note:
Th inference code is still way too complicated. We need to move
to the validfile() model used by netcdf Java, where each
dispatcher is asked if it can process the file. This decentralizes
the inference code. This will be done after all the major new
dispatchers (PIO, Zarr, etc) have been implemented.
2019-01-02 09:27:36 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
# Set up the tests.
|
2018-12-03 21:17:23 +08:00
|
|
|
check_PROGRAMS += $(TESTPROGRAMS)
|
2012-05-16 01:48:27 +08:00
|
|
|
|
|
|
|
# Build Diskless test helpers
|
2018-11-04 01:51:10 +08:00
|
|
|
check_PROGRAMS += tst_diskless tst_diskless3 tst_diskless4 \
|
2018-08-16 19:42:51 +08:00
|
|
|
tst_diskless5 tst_inmemory tst_open_mem
|
2018-12-01 22:29:58 +08:00
|
|
|
if USE_HDF5
|
2012-03-26 09:34:32 +08:00
|
|
|
check_PROGRAMS += tst_diskless2
|
|
|
|
endif
|
|
|
|
|
2012-04-10 06:03:02 +08:00
|
|
|
TESTS = $(TESTPROGRAMS)
|
|
|
|
|
2016-09-01 05:38:59 +08:00
|
|
|
if BUILD_UTILITIES
|
2018-02-26 12:45:31 +08:00
|
|
|
TESTS += run_diskless.sh run_diskless5.sh run_inmemory.sh
|
2017-02-17 05:27:54 +08:00
|
|
|
if LARGE_FILE_TESTS
|
2017-03-09 08:01:10 +08:00
|
|
|
TESTS += run_diskless2.sh
|
2017-02-17 05:27:54 +08:00
|
|
|
endif
|
2019-02-25 07:54:13 +08:00
|
|
|
if ENABLE_BYTERANGE
|
|
|
|
TESTS += test_byterange.sh
|
Provide byte-range reading of remote datasets
re: issue https://github.com/Unidata/netcdf-c/issues/1251
Assume that you have the URL to a remote dataset
which is a normal netcdf-3 or netcdf-4 file.
This PR allows the netcdf-c to read that dataset's
contents as a netcdf file using HTTP byte ranges
if the remote server supports byte-range access.
Originally, this PR was set up to access Amazon S3 objects,
but it can also access other remote datasets such as those
provided by a Thredds server via the HTTPServer access protocol.
It may also work for other kinds of servers.
Note that this is not intended as a true production
capability because, as is known, this kind of access to
can be quite slow. In addition, the byte-range IO drivers
do not currently do any sort of optimization or caching.
An additional goal here is to gain some experience with
the Amazon S3 REST protocol.
This architecture and its use documented in
the file docs/byterange.dox.
There are currently two test cases:
1. nc_test/tst_s3raw.c - this does a simple open, check format, close cycle
for a remote netcdf-3 file and a remote netcdf-4 file.
2. nc_test/test_s3raw.sh - this uses ncdump to investigate some remote
datasets.
This PR also incorporates significantly changed model inference code
(see the superceded PR https://github.com/Unidata/netcdf-c/pull/1259).
1. It centralizes the code that infers the dispatcher.
2. It adds support for byte-range URLs
Other changes:
1. NC_HDF5_finalize was not being properly called by nc_finalize().
2. Fix minor bug in ncgen3.l
3. fix memory leak in nc4info.c
4. add code to walk the .daprc triples and to replace protocol=
fragment tag with a more general mode= tag.
Final Note:
Th inference code is still way too complicated. We need to move
to the validfile() model used by netcdf Java, where each
dispatcher is asked if it can process the file. This decentralizes
the inference code. This will be done after all the major new
dispatchers (PIO, Zarr, etc) have been implemented.
2019-01-02 09:27:36 +08:00
|
|
|
endif
|
2019-04-20 10:32:26 +08:00
|
|
|
if BUILD_MMAP
|
|
|
|
TESTS += run_mmap.sh
|
2019-05-22 04:50:43 +08:00
|
|
|
run_mmap.log: run_diskless.log
|
2019-04-20 10:32:26 +08:00
|
|
|
endif
|
2017-02-17 05:27:54 +08:00
|
|
|
endif
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2013-03-16 04:31:07 +08:00
|
|
|
if USE_PNETCDF
|
|
|
|
TESTS += run_pnetcdf_test.sh
|
|
|
|
endif
|
|
|
|
|
2018-10-17 21:16:00 +08:00
|
|
|
# The .c files that are generated with m4 are already distributed, but
|
|
|
|
# we also include the original m4 files, plus test scripts data.
|
2018-04-14 03:27:43 +08:00
|
|
|
EXTRA_DIST = test_get.m4 test_put.m4 run_diskless.sh run_diskless2.sh \
|
|
|
|
run_diskless5.sh run_mmap.sh run_pnetcdf_test.sh test_read.m4 \
|
2019-04-20 10:32:26 +08:00
|
|
|
test_write.m4 ref_tst_diskless2.cdl tst_diskless5.cdl \
|
|
|
|
ref_tst_diskless3_create.cdl ref_tst_diskless3_open.cdl \
|
|
|
|
run_inmemory.sh run_mmap.sh \
|
2018-05-31 04:47:37 +08:00
|
|
|
f03tst_open_mem.nc \
|
2019-04-20 10:32:26 +08:00
|
|
|
test_byterange.sh ref_tst_http_nc3.cdl ref_tst_http_nc4.cdl \
|
2018-04-14 03:27:43 +08:00
|
|
|
CMakeLists.txt
|
2012-03-26 09:34:32 +08:00
|
|
|
|
2017-11-22 22:05:10 +08:00
|
|
|
# These files are created by the tests.
|
2018-10-17 21:16:00 +08:00
|
|
|
CLEANFILES = nc_test_*.nc tst_*.nc t_nc.nc large_files.nc \
|
2019-04-20 10:32:26 +08:00
|
|
|
quick_large_files.nc tst_diskless3_file.cdl \
|
Provide byte-range reading of remote datasets
re: issue https://github.com/Unidata/netcdf-c/issues/1251
Assume that you have the URL to a remote dataset
which is a normal netcdf-3 or netcdf-4 file.
This PR allows the netcdf-c to read that dataset's
contents as a netcdf file using HTTP byte ranges
if the remote server supports byte-range access.
Originally, this PR was set up to access Amazon S3 objects,
but it can also access other remote datasets such as those
provided by a Thredds server via the HTTPServer access protocol.
It may also work for other kinds of servers.
Note that this is not intended as a true production
capability because, as is known, this kind of access to
can be quite slow. In addition, the byte-range IO drivers
do not currently do any sort of optimization or caching.
An additional goal here is to gain some experience with
the Amazon S3 REST protocol.
This architecture and its use documented in
the file docs/byterange.dox.
There are currently two test cases:
1. nc_test/tst_s3raw.c - this does a simple open, check format, close cycle
for a remote netcdf-3 file and a remote netcdf-4 file.
2. nc_test/test_s3raw.sh - this uses ncdump to investigate some remote
datasets.
This PR also incorporates significantly changed model inference code
(see the superceded PR https://github.com/Unidata/netcdf-c/pull/1259).
1. It centralizes the code that infers the dispatcher.
2. It adds support for byte-range URLs
Other changes:
1. NC_HDF5_finalize was not being properly called by nc_finalize().
2. Fix minor bug in ncgen3.l
3. fix memory leak in nc4info.c
4. add code to walk the .daprc triples and to replace protocol=
fragment tag with a more general mode= tag.
Final Note:
Th inference code is still way too complicated. We need to move
to the validfile() model used by netcdf Java, where each
dispatcher is asked if it can process the file. This decentralizes
the inference code. This will be done after all the major new
dispatchers (PIO, Zarr, etc) have been implemented.
2019-01-02 09:27:36 +08:00
|
|
|
tst_diskless4.cdl ref_tst_diskless4.cdl benchmark.nc \
|
2019-05-22 04:50:43 +08:00
|
|
|
tst_http_nc3.cdl tst_http_nc4.cdl tmp*.cdl tmp*.nc
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-09-17 04:22:59 +08:00
|
|
|
EXTRA_DIST += bad_cdf5_begin.nc run_cdf5.sh
|
2017-09-17 06:35:52 +08:00
|
|
|
if ENABLE_CDF5
|
|
|
|
# bad_cdf5_begin.nc is a corrupted CDF-5 file with bad variable starting
|
|
|
|
# file offsets. It is to be used by tst_open_cdf5.c to check if it can
|
|
|
|
# detect and report error code NC_ENOTNC.
|
|
|
|
TESTS += run_cdf5.sh
|
|
|
|
check_PROGRAMS += tst_open_cdf5
|
2017-09-17 08:15:09 +08:00
|
|
|
if LARGE_FILE_TESTS
|
2017-09-17 06:35:52 +08:00
|
|
|
TESTPROGRAMS += tst_large_cdf5 tst_cdf5_begin
|
2017-09-17 08:15:09 +08:00
|
|
|
endif
|
2017-09-17 06:35:52 +08:00
|
|
|
endif
|
2017-09-17 03:04:39 +08:00
|
|
|
|
2017-03-09 08:01:10 +08:00
|
|
|
# Only clean these on maintainer-clean, because they require m4 to
|
2010-06-03 21:24:43 +08:00
|
|
|
# regenerate.
|
2019-11-26 21:20:34 +08:00
|
|
|
MAINTAINERCLEANFILES = test_get.c test_put.c
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
# This rule tells make how to turn our .m4 files into .c files.
|
|
|
|
.m4.c:
|
2015-10-10 07:08:46 +08:00
|
|
|
m4 $(AM_M4FLAGS) $(M4FLAGS) $< >$@
|
2018-03-29 03:54:05 +08:00
|
|
|
|
|
|
|
# If valgrind is present, add valgrind targets.
|
|
|
|
@VALGRIND_CHECK_RULES@
|