2012-04-10 06:03:02 +08:00
|
|
|
# Test c output
|
2018-10-31 10:48:12 +08:00
|
|
|
T=tst_diskless5
|
re e-support UBS-599337
re pull request https://github.com/Unidata/netcdf-c/pull/405
re pull request https://github.com/Unidata/netcdf-c/pull/446
Notes:
1. This branch is a cleanup of the magic.dmh branch.
2. magic.dmh was originally merged, but caused problems with parallel IO.
It was re-issued as pull request https://github.com/Unidata/netcdf-c/pull/446.
3. This branch + pull request replace any previous pull requests and magic.dmh branch.
Given an otherwise valid netCDF file that has a corrupted header,
the netcdf library currently crashes. Instead, it should return
NC_ENOTNC.
Additionally, the NC_check_file_type code does not do the
forward search required by hdf5 files. It currently only looks
at file position 0 instead of 512, 1024, 2048,... Also, it turns
out that the HDF4 magic number is assumed to always be at the
beginning of the file (unlike HDF5).
The change is localized to libdispatch/dfile.c See
https://support.hdfgroup.org/release4/doc/DSpec_html/DS.pdf
Also, it turns out that the code in NC_check_file_type is duplicated
(mostly) in the function libsrc4/nc4file.c#nc_check_for_hdf.
This branch does the following.
1. Make NC_check_file_type return NC_ENOTNC instead of crashing.
2. Remove nc_check_for_hdf and centralize all file format checking
NC_check_file_type.
3. Add proper forward search for HDF5 files (but not HDF4 files)
to look for the magic number at offsets of 0, 512, 1024...
4. Add test tst_hdf5_offset.sh. This tests that hdf5 files with
an offset are properly recognized. It does so by prefixing
a legal file with some number of zero bytes: 512, 1024, etc.
5. Off-topic: Added -N flag to ncdump to force a specific output dataset name.
2017-10-25 06:25:09 +08:00
|
|
|
|
2018-05-31 04:47:37 +08:00
|
|
|
#H58=8
|
|
|
|
H510=10
|
|
|
|
|
2018-10-31 10:48:12 +08:00
|
|
|
#ARGS=diskless persist
|
re e-support UBS-599337
re pull request https://github.com/Unidata/netcdf-c/pull/405
re pull request https://github.com/Unidata/netcdf-c/pull/446
Notes:
1. This branch is a cleanup of the magic.dmh branch.
2. magic.dmh was originally merged, but caused problems with parallel IO.
It was re-issued as pull request https://github.com/Unidata/netcdf-c/pull/446.
3. This branch + pull request replace any previous pull requests and magic.dmh branch.
Given an otherwise valid netCDF file that has a corrupted header,
the netcdf library currently crashes. Instead, it should return
NC_ENOTNC.
Additionally, the NC_check_file_type code does not do the
forward search required by hdf5 files. It currently only looks
at file position 0 instead of 512, 1024, 2048,... Also, it turns
out that the HDF4 magic number is assumed to always be at the
beginning of the file (unlike HDF5).
The change is localized to libdispatch/dfile.c See
https://support.hdfgroup.org/release4/doc/DSpec_html/DS.pdf
Also, it turns out that the code in NC_check_file_type is duplicated
(mostly) in the function libsrc4/nc4file.c#nc_check_for_hdf.
This branch does the following.
1. Make NC_check_file_type return NC_ENOTNC instead of crashing.
2. Remove nc_check_for_hdf and centralize all file format checking
NC_check_file_type.
3. Add proper forward search for HDF5 files (but not HDF4 files)
to look for the magic number at offsets of 0, 512, 1024...
4. Add test tst_hdf5_offset.sh. This tests that hdf5 files with
an offset are properly recognized. It does so by prefixing
a legal file with some number of zero bytes: 512, 1024, etc.
5. Off-topic: Added -N flag to ncdump to force a specific output dataset name.
2017-10-25 06:25:09 +08:00
|
|
|
|
2018-10-11 03:32:17 +08:00
|
|
|
#SRC=
|
2018-05-23 06:50:52 +08:00
|
|
|
|
2018-09-05 01:27:47 +08:00
|
|
|
#CMD=env HDF5_DEBUG=trace
|
2018-03-17 01:46:18 +08:00
|
|
|
#CMD=export NETCDF_LOG_LEVEL=5 ;gdb --args
|
2018-09-05 01:27:47 +08:00
|
|
|
CMD=valgrind --leak-check=full
|
2018-10-31 10:48:12 +08:00
|
|
|
#CMD=gdb --args
|
2015-11-06 04:40:35 +08:00
|
|
|
|
2018-02-26 12:45:31 +08:00
|
|
|
#PAR=1
|
2015-08-16 06:26:35 +08:00
|
|
|
|
2018-05-31 04:47:37 +08:00
|
|
|
ifdef H58
|
|
|
|
H5L=/usr/local
|
|
|
|
endif
|
|
|
|
ifdef H510
|
|
|
|
H5L=/opt
|
|
|
|
endif
|
|
|
|
|
2018-02-26 12:45:31 +08:00
|
|
|
CFLAGS=-Wall -Wno-unused-variable -Wno-unused-function -g -O0 -I.. -I../include
|
2012-04-10 06:03:02 +08:00
|
|
|
|
2016-05-04 11:17:06 +08:00
|
|
|
ifdef PAR
|
|
|
|
CC=mpicc
|
|
|
|
#CC=/usr/local/bin/mpicc
|
2018-05-31 04:47:37 +08:00
|
|
|
LDFLAGS=../liblib/.libs/libnetcdf.a -L${H5L}/lib -lhdf5_hl -lhdf5 -lz -ldl -lcurl -lpnetcdf -lmpich -lm
|
2016-05-04 11:17:06 +08:00
|
|
|
else
|
|
|
|
CC=gcc
|
2018-05-31 04:47:37 +08:00
|
|
|
#LDFLAGS=../liblib/.libs/libnetcdf.a -L${H5L}/lib -lhdf5_hl -lhdf5 -lz -lm -lcurl
|
|
|
|
LDFLAGS=../liblib/.libs/libnetcdf.a -L${H5L}/lib -lhdf5_hl -lhdf5 -lz -ldl -lm -lcurl
|
2016-05-04 11:17:06 +08:00
|
|
|
endif
|
2012-04-10 06:03:02 +08:00
|
|
|
|
|
|
|
# cd .. ; ${MAKE} all
|
|
|
|
|
2018-05-31 04:47:37 +08:00
|
|
|
LLP=${H5L}/lib:${LD_LIBRARY_PATH}
|
2015-08-16 06:26:35 +08:00
|
|
|
|
2016-05-04 11:17:06 +08:00
|
|
|
all:: cmp
|
2015-08-16 06:26:35 +08:00
|
|
|
export LD_LIBRARY_PATH=${LLP}; export CFLAGS; export LDFLAGS; \
|
re e-support UBS-599337
re pull request https://github.com/Unidata/netcdf-c/pull/405
re pull request https://github.com/Unidata/netcdf-c/pull/446
Notes:
1. This branch is a cleanup of the magic.dmh branch.
2. magic.dmh was originally merged, but caused problems with parallel IO.
It was re-issued as pull request https://github.com/Unidata/netcdf-c/pull/446.
3. This branch + pull request replace any previous pull requests and magic.dmh branch.
Given an otherwise valid netCDF file that has a corrupted header,
the netcdf library currently crashes. Instead, it should return
NC_ENOTNC.
Additionally, the NC_check_file_type code does not do the
forward search required by hdf5 files. It currently only looks
at file position 0 instead of 512, 1024, 2048,... Also, it turns
out that the HDF4 magic number is assumed to always be at the
beginning of the file (unlike HDF5).
The change is localized to libdispatch/dfile.c See
https://support.hdfgroup.org/release4/doc/DSpec_html/DS.pdf
Also, it turns out that the code in NC_check_file_type is duplicated
(mostly) in the function libsrc4/nc4file.c#nc_check_for_hdf.
This branch does the following.
1. Make NC_check_file_type return NC_ENOTNC instead of crashing.
2. Remove nc_check_for_hdf and centralize all file format checking
NC_check_file_type.
3. Add proper forward search for HDF5 files (but not HDF4 files)
to look for the magic number at offsets of 0, 512, 1024...
4. Add test tst_hdf5_offset.sh. This tests that hdf5 files with
an offset are properly recognized. It does so by prefixing
a legal file with some number of zero bytes: 512, 1024, etc.
5. Off-topic: Added -N flag to ncdump to force a specific output dataset name.
2017-10-25 06:25:09 +08:00
|
|
|
${CMD} ./t ${ARGS}
|
2015-08-16 06:26:35 +08:00
|
|
|
|
2016-05-04 11:17:06 +08:00
|
|
|
cmp::
|
|
|
|
export LD_LIBRARY_PATH=${LLP}; export CFLAGS; export LDFLAGS; \
|
|
|
|
${CC} -o t ${CFLAGS} ${T}.c ${SRC} ${LDFLAGS}; \
|
|
|
|
|
2015-08-16 06:26:35 +08:00
|
|
|
cpp::
|
|
|
|
${CC} -E ${CFLAGS} ${T}.c > ${T}.txt
|
2018-10-31 10:48:12 +08:00
|
|
|
|
|
|
|
#TS = tst_diskless tst_diskless2 tst_diskless3 tst_diskless4 tst_diskless5 tst_diskless6
|
|
|
|
TS = tst_diskless5
|
|
|
|
several::
|
|
|
|
export LD_LIBRARY_PATH=${LLP}; export CFLAGS; export LDFLAGS; \
|
|
|
|
for f in ${TS} ; do ${CC} -o ${TS} ${CFLAGS} ${TS}.c ${SRC} ${LDFLAGS}; done
|
|
|
|
|