mirror of
https://github.com/Unidata/netcdf-c.git
synced 2025-01-30 16:10:44 +08:00
conflicts
This commit is contained in:
commit
39ccdc5ee3
@ -678,7 +678,7 @@ ELSE()
|
||||
ENDIF()
|
||||
|
||||
# Enable some developer-only tests
|
||||
OPTION(ENABLE_EXTRA_TESTS "Enable Extra tests. Some may not work because of known issues. Developers only." ON)
|
||||
OPTION(ENABLE_EXTRA_TESTS "Enable Extra tests. Some may not work because of known issues. Developers only." OFF)
|
||||
IF(ENABLE_EXTRA_TESTS)
|
||||
SET(EXTRA_TESTS ON)
|
||||
ENDIF()
|
||||
|
@ -7,12 +7,13 @@ This file contains a high-level description of this package's evolution. Release
|
||||
|
||||
## 4.4.0 Released TBD
|
||||
|
||||
### 4.4.0-RC3 In Progress
|
||||
### 4.4.0-RC3 2015-10-08
|
||||
|
||||
* Addressed an inefficiency in how bytes would be swapped when converting between `LITTLE` and `BIG` ENDIANNESS. See [NCF-338](https://bugtracking.unidata.ucar.edu/browse/NCF-338) for more information.
|
||||
* Addressed an issue where an interrupted read on a `POSIX` system would return an error even if errno had been properly set to `EINTR`. This issue was initially reported by David Knaak at Cray. More information may be found at [NCF-337](https://bugtracking.unidata.ucar.edu/browse/NCF-337).
|
||||
* Added a note to the install directions pointing out that parallel make
|
||||
cannot be used for 'make check'.
|
||||
* Many miscellaneous bug fixes.
|
||||
|
||||
|
||||
### 4.4.0-RC2 Released 2015-07-09
|
||||
|
20
cf
20
cf
@ -2,6 +2,7 @@
|
||||
#X="-x"
|
||||
#NB=1
|
||||
DB=1
|
||||
|
||||
FAST=1
|
||||
|
||||
HDF5=1
|
||||
@ -139,6 +140,20 @@ else
|
||||
FLAGS="$FLAGS --enable-shared"
|
||||
fi
|
||||
|
||||
TMP=
|
||||
rm -f ./test_mpi.tmp
|
||||
if test -f $stddir/lib/libhdf5.a ; then
|
||||
nm $stddir/lib/libhdf5.a | grep mpich_mpi_float$ >./test_mpi.tmp
|
||||
else
|
||||
if test -f $stddir/lib/libhdf5.so ; then
|
||||
nm $stddir/lib/libhdf5.so | grep mpich_mpi_float$ >./test_mpi.tmp
|
||||
fi
|
||||
fi
|
||||
if test -s ./test_mpi.tmp ; then
|
||||
PAR=1
|
||||
fi
|
||||
rm -f ./test_mpi.tmp
|
||||
|
||||
if test "x${PAR}" != x ; then
|
||||
FLAGS="$FLAGS --enable-parallel"
|
||||
fi
|
||||
@ -159,7 +174,7 @@ if test "x$RPC" = "x1" ; then
|
||||
FLAGS="$FLAGS --enable-rpc"
|
||||
fi
|
||||
|
||||
if test "x$PNETCDF" = x1 ; then
|
||||
if test "x$PAR" = x1 -o "x$PNETCDF" = x1 ; then
|
||||
if test -f /machine/local_mpich2 ; then
|
||||
MPI1=/machine/local_mpich2
|
||||
MPI2=/machine/local_par7
|
||||
@ -173,7 +188,7 @@ PATH=${PATH}:${MPI1}/bin
|
||||
CC="${MPI1}/bin/mpicc"
|
||||
CPPFLAGS="-I${MPI2}/include -I${MPI1}/include -I${MPI3}/include"
|
||||
LDFLAGS="-L${MPI2}/lib -L${MPI1}/lib -L${MPI3}/lib"
|
||||
LDLIBS="-lmpich -lmpl"
|
||||
LDLIBS="-lmpich"
|
||||
FLAGS="$FLAGS --enable-pnetcdf"
|
||||
FLAGS="$FLAGS --enable-parallel-tests"
|
||||
fi
|
||||
@ -204,6 +219,7 @@ if test -z "$FAST" ; then
|
||||
fi
|
||||
sh $X ./configure ${FLAGS}
|
||||
for c in $cmds; do
|
||||
printenv LD_LIBRARY_PATH
|
||||
${MAKE} ${FORCE} $c
|
||||
done
|
||||
exit 0
|
||||
|
24
configure.ac
24
configure.ac
@ -439,8 +439,8 @@ AM_CONDITIONAL(INTERNAL_OCLIB,[test "x" = "x"])
|
||||
# Does the user want to do some extra tests?
|
||||
AC_MSG_CHECKING([whether netCDF extra tests should be run (developers only)])
|
||||
AC_ARG_ENABLE([extra-tests],
|
||||
[AS_HELP_STRING([--disable-extra-tests],
|
||||
[disable some extra tests that may not pass because of known issues])])
|
||||
[AS_HELP_STRING([--enable-extra-tests],
|
||||
[enable some extra tests that may not pass because of known issues])])
|
||||
test "x$enable_extra_tests" = xno || enable_extra_tests=yes
|
||||
AC_MSG_RESULT($enable_extra_tests)
|
||||
if test "x$enable_extra_tests" = xyes; then
|
||||
@ -845,13 +845,33 @@ AC_TYPE_OFF_T
|
||||
AC_CHECK_TYPES([ssize_t, ptrdiff_t, uchar, longlong])
|
||||
AC_C_CHAR_UNSIGNED
|
||||
AC_C_BIGENDIAN
|
||||
|
||||
###
|
||||
# Crude hack to work around an issue
|
||||
# in Cygwin.
|
||||
###
|
||||
SLEEPCMD=""
|
||||
PLTFORMOUT="$(uname | cut -d '_' -f 1)"
|
||||
if test "$PLTFORMOUT" = "CYGWIN"; then
|
||||
SLEEPCMD="sleep 5"
|
||||
AC_MSG_NOTICE([Pausing between sizeof() checks to mitigate a Cygwin issue.])
|
||||
fi
|
||||
|
||||
$SLEEPCMD
|
||||
AC_CHECK_SIZEOF(short)
|
||||
$SLEEPCMD
|
||||
AC_CHECK_SIZEOF(int)
|
||||
$SLEEPCMD
|
||||
AC_CHECK_SIZEOF(long)
|
||||
$SLEEPCMD
|
||||
AC_CHECK_SIZEOF(long long)
|
||||
$SLEEPCMD
|
||||
AC_CHECK_SIZEOF(float)
|
||||
$SLEEPCMD
|
||||
AC_CHECK_SIZEOF(double)
|
||||
$SLEEPCMD
|
||||
AC_CHECK_SIZEOF(off_t)
|
||||
$SLEEPCMD
|
||||
AC_CHECK_SIZEOF(size_t)
|
||||
AC_CHECK_SIZEOF(ptrdiff_t)
|
||||
|
||||
|
@ -762,6 +762,7 @@ INPUT = \
|
||||
@abs_top_srcdir@/docs/all-error-codes.md \
|
||||
@abs_top_srcdir@/docs/FAQ.md \
|
||||
@abs_top_srcdir@/docs/software.md \
|
||||
@abs_top_srcdir@/docs/known_problems.md \
|
||||
@abs_top_srcdir@/docs/COPYRIGHT.dox \
|
||||
@abs_top_srcdir@/docs/credits.md \
|
||||
@abs_top_srcdir@/include/netcdf.h \
|
||||
|
@ -36,7 +36,7 @@ NetCDF data is:
|
||||
The netCDF software was developed by Glenn Davis, Russ Rew, Ed Hartnett,
|
||||
John Caron, Dennis Heimbigner, Steve Emmerson, Harvey Davies, and Ward
|
||||
Fisher at the Unidata Program Center in Boulder, Colorado, with
|
||||
[contributions](/netcdf/credits.html) from many other netCDF users.
|
||||
[contributions](http://www.unidata.ucar.edu/software/netcdf/docs/credits.html) from many other netCDF users.
|
||||
|
||||
----------
|
||||
|
||||
@ -1176,7 +1176,7 @@ Was it possible to create netCDF files larger than 2 GiBytes before version 3.6?
|
||||
|
||||
Yes, but there are significant restrictions on the structure of large
|
||||
netCDF files that result from the 32-bit relative offsets that are part
|
||||
of the classic netCDF format. For details, see [NetCDF Classic Format Limitations](netcdf/NetCDF-Classic-Format-Limitations.html#NetCDF-Classic-Format-Limitations)
|
||||
of the classic netCDF format. For details, see [NetCDF Classic Format Limitations](http://www.unidata.ucar.edu/software/netcdf/documentation/historic/netcdf/NetCDF-Classic-Format-Limitations.html#NetCDF-Classic-Format-Limitations)
|
||||
in the User's Guide.
|
||||
|
||||
----------
|
||||
@ -1441,7 +1441,7 @@ purposes.
|
||||
|
||||
It is also possible to overcome the 4 GiB variable restriction for a
|
||||
single fixed size variable, when there are no record variables, by
|
||||
making it the last variable, as explained in the example in [NetCDF Classic Format Limitations](netcdf/NetCDF-Classic-Format-Limitations.html#NetCDF-Classic-Format-Limitations).
|
||||
making it the last variable, as explained in the example in [NetCDF Classic Format Limitations](http://www.unidata.ucar.edu/software/netcdf/documentation/historic/netcdf/NetCDF-Classic-Format-Limitations.html#NetCDF-Classic-Format-Limitations).
|
||||
|
||||
----------
|
||||
|
||||
@ -1560,7 +1560,7 @@ Data Center at Goddard, and is freely available. It was originally a VMS
|
||||
FORTRAN interface for scientific data access. Unidata reimplemented the
|
||||
library from scratch to use [XDR](http://www.faqs.org/rfcs/rfc1832.html)
|
||||
for a machine-independent representation, designed the
|
||||
[CDL](netcdf/CDL-Syntax.htm) (network Common Data form Language) text
|
||||
[CDL](http://www.unidata.ucar.edu/software/netcdf/documentation/historic/netcdf/CDL-Syntax.htm) (network Common Data form Language) text
|
||||
representation for netCDF data, and added aggregate data access, a
|
||||
single-file implementation, named dimensions, and variable-specific
|
||||
attributes.
|
||||
|
@ -25,9 +25,7 @@ while converting it into texinfo, and is the primary developer for
|
||||
netCDF-4. Dennis Heimbigner wrote the netCDF-4 version of ncgen, the C
|
||||
OPeNDAP client, the dispatch layer, and the implementation of diskless
|
||||
files. The nccopy utility was added by Russ Rew. Lynton Appel developed
|
||||
the C++ implementation for netCDF-4. Ward Fisher developed the netCDF
|
||||
Windows/Microsoft Visual Studio port and implemented the portable
|
||||
CMake-based netCDF build and test system.
|
||||
the C++ implementation for netCDF-4. Ward Fisher overhauled netCDF release-engineering, developed a new build-and-test framework using CMake, virtualization, and container technologies, moved sources to GitHub, developed a Windows/Microsoft Visual Studio port, refactored documentation for improved web access, and merged all the documentation into the sources for maintaining with Markdown and Doxygen.
|
||||
|
||||
The following people have contributed related software, bug reports,
|
||||
fixes, valuable suggestions, and other kinds of useful support:
|
||||
|
1463
docs/known_problems.md
Normal file
1463
docs/known_problems.md
Normal file
File diff suppressed because it is too large
Load Diff
@ -28,8 +28,9 @@ $extrastylesheet
|
||||
"</form>";
|
||||
|
||||
var $linkMenu = "<select id=\"versions\">" +
|
||||
" <option value=\"http://www.unidata.ucar.edu\">$projectnumber</option>" +
|
||||
" <option value=\"http://www.unidata.ucar.edu/software/netcdf\">some other project number</option>" +
|
||||
" <option value=\"http://www.unidata.ucar.edu/software/netcdf/docs\">Current</option>" +
|
||||
" <option value=\"http://www.unidata.ucar.edu/software/netcdf/documentation/$projectnumber\">$projectnumber</option>" +
|
||||
" <option value=\"http://www.unidata.ucar.edu/software/netcdf/documentation/historic\">Historic Documentation</option>" +
|
||||
"</select>";
|
||||
|
||||
$("#navrow1 ul.tablist").append('<li class=\"linkMenu\"><span class="tab">' + $linkMenu + '</span></li>');
|
||||
|
@ -1127,7 +1127,7 @@ command line operators that work on generic netCDF or HDF4 files:
|
||||
- ncrename - renamer
|
||||
- ncwa - weighted averager
|
||||
|
||||
All operators may now be [OPeNDAP](www.opendao.org) clients. OPeNDAP
|
||||
All operators may now be [OPeNDAP](http://www.opendap.org) clients. OPeNDAP
|
||||
enables network transparent data access to any OPeNDAP server. Thus
|
||||
OPeNDAP-enabled NCO can operate on remote files accessible through any
|
||||
OPeNDAP server without transferring the files. Only the required data
|
||||
|
@ -31,7 +31,7 @@ if USE_FFIO
|
||||
libnetcdf3_la_SOURCES += ffio.c
|
||||
else !USE_FFIO
|
||||
if USE_STDIO
|
||||
libnetcdf3_la_SOURCES += stdio.c
|
||||
libnetcdf3_la_SOURCES += ncstdio.c
|
||||
else !USE_STDIO
|
||||
libnetcdf3_la_SOURCES += posixio.c
|
||||
endif !USE_STDIO
|
||||
@ -43,14 +43,14 @@ noinst_LTLIBRARIES = libnetcdf3.la
|
||||
# with m4), but they are included in the distribution so that the user
|
||||
# does not have to have m4.
|
||||
MAINTAINERCLEANFILES = $(man_MANS) attrx.c putgetx.c
|
||||
EXTRA_DIST = attr.m4 ncx.m4 putget.m4 $(man_MANS) CMakeLists.txt XGetopt.c
|
||||
EXTRA_DIST = attr.m4 ncx.m4 putget.m4 $(man_MANS) CMakeLists.txt XGetopt.c
|
||||
|
||||
# This tells make how to turn .m4 files into .c files.
|
||||
.m4.c:
|
||||
m4 $(AM_M4FLAGS) $(M4FLAGS) -s $< >$@
|
||||
|
||||
# The C API man page.
|
||||
man_MANS = netcdf.3
|
||||
man_MANS = netcdf.3
|
||||
|
||||
# Decide what goes in the man page, based on user configure options.
|
||||
ARGS_MANPAGE = -DAPI=C
|
||||
@ -65,7 +65,5 @@ ARGS_MANPAGE += -DPARALLEL_IO=TRUE
|
||||
endif
|
||||
|
||||
# This rule generates the C manpage.
|
||||
netcdf.3: $(top_srcdir)/docs/netcdf.m4
|
||||
netcdf.3: $(top_srcdir)/docs/netcdf.m4
|
||||
m4 $(M4FLAGS) $(ARGS_MANPAGE) $? >$@ || rm $@
|
||||
|
||||
|
||||
|
@ -97,9 +97,8 @@ static int memio_close(ncio* nciop, int);
|
||||
|
||||
static long pagesize = 0;
|
||||
|
||||
/* Create a new ncio struct to hold info about the file. */
|
||||
static int
|
||||
memio_new(const char* path, int ioflags, off_t initialsize, void* memory, ncio** nciopp, NCMEMIO** memiop)
|
||||
/*! Create a new ncio struct to hold info about the file. */
|
||||
static int memio_new(const char* path, int ioflags, off_t initialsize, void* memory, ncio** nciopp, NCMEMIO** memiop)
|
||||
{
|
||||
int status = NC_NOERR;
|
||||
ncio* nciop = NULL;
|
||||
@ -126,6 +125,14 @@ memio_new(const char* path, int ioflags, off_t initialsize, void* memory, ncio**
|
||||
#endif
|
||||
}
|
||||
|
||||
/* We need to catch errors.
|
||||
sysconf, at least, can return a negative value
|
||||
when there is an error. */
|
||||
if(pagesize < 0) {
|
||||
status = NC_EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
errno = 0;
|
||||
|
||||
/* Always force the allocated size to be a multiple of pagesize */
|
||||
@ -135,7 +142,7 @@ memio_new(const char* path, int ioflags, off_t initialsize, void* memory, ncio**
|
||||
|
||||
nciop = (ncio* )calloc(1,sizeof(ncio));
|
||||
if(nciop == NULL) {status = NC_ENOMEM; goto fail;}
|
||||
|
||||
|
||||
nciop->ioflags = ioflags;
|
||||
*((int*)&nciop->fd) = -1; /* caller will fix */
|
||||
|
||||
@ -146,7 +153,7 @@ memio_new(const char* path, int ioflags, off_t initialsize, void* memory, ncio**
|
||||
*((ncio_filesizefunc**)&nciop->filesize) = memio_filesize;
|
||||
*((ncio_pad_lengthfunc**)&nciop->pad_length) = memio_pad_length;
|
||||
*((ncio_closefunc**)&nciop->close) = memio_close;
|
||||
|
||||
|
||||
memio = (NCMEMIO*)calloc(1,sizeof(NCMEMIO));
|
||||
if(memio == NULL) {status = NC_ENOMEM; goto fail;}
|
||||
*((void* *)&nciop->pvt) = memio;
|
||||
@ -165,7 +172,7 @@ memio_new(const char* path, int ioflags, off_t initialsize, void* memory, ncio**
|
||||
free(nciop);
|
||||
}
|
||||
if(inmemory) {
|
||||
memio->memory = memory;
|
||||
memio->memory = memory;
|
||||
} else {
|
||||
/* malloc memory */
|
||||
memio->memory = (char*)malloc(memio->alloc);
|
||||
@ -190,10 +197,10 @@ fail:
|
||||
ioflags - flags from nc_create
|
||||
initialsz - From the netcdf man page: "The argument
|
||||
initialsize sets the initial size of the file at creation time."
|
||||
igeto -
|
||||
igetsz -
|
||||
igeto -
|
||||
igetsz -
|
||||
sizehintp - the size of a page of data for buffered reads and writes.
|
||||
parameters - arbitrary data
|
||||
parameters - arbitrary data
|
||||
nciopp - pointer to a pointer that will get location of newly
|
||||
created and inited ncio struct.
|
||||
mempp - pointer to pointer to the initial memory read.
|
||||
@ -221,7 +228,7 @@ memio_create(const char* path, int ioflags,
|
||||
|
||||
if(persist) {
|
||||
/* Open the file just tomake sure we can write it if needed */
|
||||
oflags = (persist ? O_RDWR : O_RDONLY);
|
||||
oflags = (persist ? O_RDWR : O_RDONLY);
|
||||
#ifdef O_BINARY
|
||||
fSet(oflags, O_BINARY);
|
||||
#endif
|
||||
@ -243,7 +250,7 @@ fprintf(stderr,"memio_create: initial memory: %lu/%lu\n",(unsigned long)memio->m
|
||||
#endif
|
||||
|
||||
fd = nc__pseudofd();
|
||||
*((int* )&nciop->fd) = fd;
|
||||
*((int* )&nciop->fd) = fd;
|
||||
|
||||
fSet(nciop->ioflags, NC_WRITE);
|
||||
|
||||
@ -274,11 +281,11 @@ unwind_open:
|
||||
ioflags - flags passed into nc_open.
|
||||
igeto - looks like this function can do an initial page get, and
|
||||
igeto is going to be the offset for that. But it appears to be
|
||||
unused
|
||||
unused
|
||||
igetsz - the size in bytes of initial page get (a.k.a. extent). Not
|
||||
ever used in the library.
|
||||
sizehintp - the size of a page of data for buffered reads and writes.
|
||||
parameters - arbitrary data
|
||||
parameters - arbitrary data
|
||||
nciopp - pointer to pointer that will get address of newly created
|
||||
and inited ncio struct.
|
||||
mempp - pointer to pointer to the initial memory read.
|
||||
@ -307,13 +314,13 @@ memio_open(const char* path,
|
||||
return NC_EINVAL;
|
||||
|
||||
assert(sizehintp != NULL);
|
||||
sizehint = *sizehintp;
|
||||
sizehint = *sizehintp;
|
||||
|
||||
if(inmemory) {
|
||||
filesize = meminfo->size;
|
||||
} else {
|
||||
/* Open the file,and make sure we can write it if needed */
|
||||
oflags = (persist ? O_RDWR : O_RDONLY);
|
||||
oflags = (persist ? O_RDWR : O_RDONLY);
|
||||
#ifdef O_BINARY
|
||||
fSet(oflags, O_BINARY);
|
||||
#endif
|
||||
@ -345,7 +352,7 @@ memio_open(const char* path,
|
||||
else
|
||||
status = memio_new(path, ioflags, filesize, NULL, &nciop, &memio);
|
||||
if(status != NC_NOERR) {
|
||||
if(fd >= 0)
|
||||
if(fd >= 0)
|
||||
close(fd);
|
||||
return status;
|
||||
}
|
||||
@ -371,10 +378,10 @@ fprintf(stderr,"memio_open: initial memory: %lu/%lu\n",(unsigned long)memio->mem
|
||||
}
|
||||
|
||||
/* Use half the filesize as the blocksize ; why? */
|
||||
sizehint = filesize/2;
|
||||
sizehint = filesize/2;
|
||||
|
||||
fd = nc__pseudofd();
|
||||
*((int* )&nciop->fd) = fd;
|
||||
*((int* )&nciop->fd) = fd;
|
||||
|
||||
if(igetsz != 0)
|
||||
{
|
||||
@ -397,7 +404,7 @@ unwind_open:
|
||||
return status;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Get file size in bytes.
|
||||
*/
|
||||
static int
|
||||
@ -451,29 +458,31 @@ fprintf(stderr,"realloc: %lu/%lu -> %lu/%lu\n",
|
||||
#endif
|
||||
memio->memory = newmem;
|
||||
memio->alloc = newsize;
|
||||
}
|
||||
}
|
||||
memio->size = length;
|
||||
return NC_NOERR;
|
||||
}
|
||||
|
||||
/* Write out any dirty buffers to disk and
|
||||
ensure that next read will get data from disk.
|
||||
Sync any changes, then close the open file associated with the ncio
|
||||
struct, and free its memory.
|
||||
nciop - pointer to ncio to close.
|
||||
doUnlink - if true, unlink file
|
||||
/*! Write out any dirty buffers to disk.
|
||||
|
||||
Write out any dirty buffers to disk and ensure that next read will get data from disk. Sync any changes, then close the open file associated with the ncio struct, and free its memory.
|
||||
|
||||
@param[in] nciop pointer to ncio to close.
|
||||
@param[in] doUnlink if true, unlink file
|
||||
@return NC_NOERR on success, error code on failure.
|
||||
*/
|
||||
|
||||
static int
|
||||
static int
|
||||
memio_close(ncio* nciop, int doUnlink)
|
||||
{
|
||||
int status = NC_NOERR;
|
||||
NCMEMIO* memio;
|
||||
NCMEMIO* memio ;
|
||||
int fd = -1;
|
||||
int inmemory = (fIsSet(nciop->ioflags,NC_INMEMORY));
|
||||
int inmemory = 0;
|
||||
|
||||
if(nciop == NULL || nciop->pvt == NULL) return NC_NOERR;
|
||||
|
||||
inmemory = (fIsSet(nciop->ioflags,NC_INMEMORY));
|
||||
memio = (NCMEMIO*)nciop->pvt;
|
||||
assert(memio != NULL);
|
||||
|
||||
@ -507,7 +516,7 @@ done:
|
||||
if(!inmemory && memio->memory != NULL)
|
||||
free(memio->memory);
|
||||
/* do cleanup */
|
||||
if(fd >= 0) (void)close(fd);
|
||||
if(fd >= 0) (void)close(fd);
|
||||
if(memio != NULL) free(memio);
|
||||
if(nciop->path != NULL) free((char*)nciop->path);
|
||||
free(nciop);
|
||||
|
@ -529,10 +529,11 @@ int NC_lookupvar(NC3_INFO* ncp, int varid, NC_var **varp)
|
||||
return NC_EGLOBAL;
|
||||
}
|
||||
|
||||
*varp = elem_NC_vararray(&ncp->vars, (size_t)varid);
|
||||
if(varp)
|
||||
*varp = elem_NC_vararray(&ncp->vars, (size_t)varid);
|
||||
else
|
||||
return NC_ENOTVAR;
|
||||
|
||||
if(varp == NULL)
|
||||
return NC_ENOTVAR;
|
||||
if(*varp == NULL)
|
||||
return NC_ENOTVAR;
|
||||
|
||||
|
@ -19,8 +19,8 @@
|
||||
#include <H5DSpublic.h>
|
||||
#include <math.h>
|
||||
|
||||
#if 0 /*def USE_PNETCDF*/
|
||||
#include <pnetcdf.h>
|
||||
#ifdef USE_PARALLEL
|
||||
#include "netcdf_par.h"
|
||||
#endif
|
||||
|
||||
#define NC3_STRICT_ATT_NAME "_nc3_strict"
|
||||
|
@ -266,8 +266,22 @@ nc4_find_default_chunksizes2(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var)
|
||||
var->chunksizes[d] = 1; /* overwritten below, if all dims are unlimited */
|
||||
}
|
||||
}
|
||||
|
||||
if (var->ndims > 0 && var->ndims == num_unlim) { /* all dims unlimited */
|
||||
/* Special case to avoid 1D vars with unlim dim taking huge amount
|
||||
of space (DEFAULT_CHUNK_SIZE bytes). Instead we limit to about
|
||||
4KB */
|
||||
#define DEFAULT_1D_UNLIM_SIZE (4096) /* TODO: make build-time parameter? */
|
||||
if (var->ndims == 1 && num_unlim == 1) {
|
||||
if (DEFAULT_CHUNK_SIZE / type_size <= 0)
|
||||
suggested_size = 1;
|
||||
else if (DEFAULT_CHUNK_SIZE / type_size > DEFAULT_1D_UNLIM_SIZE)
|
||||
suggested_size = DEFAULT_1D_UNLIM_SIZE;
|
||||
else
|
||||
suggested_size = DEFAULT_CHUNK_SIZE / type_size;
|
||||
var->chunksizes[0] = suggested_size / type_size;
|
||||
LOG((4, "%s: name %s dim %d DEFAULT_CHUNK_SIZE %d num_values %f type_size %d "
|
||||
"chunksize %ld", __func__, var->name, d, DEFAULT_CHUNK_SIZE, num_values, type_size, var->chunksizes[0]));
|
||||
}
|
||||
if (var->ndims > 1 && var->ndims == num_unlim) { /* all dims unlimited */
|
||||
suggested_size = pow((double)DEFAULT_CHUNK_SIZE/type_size, 1.0/(double)(var->ndims));
|
||||
for (d = 0; d < var->ndims; d++)
|
||||
{
|
||||
@ -878,7 +892,7 @@ nc_def_var_extra(int ncid, int varid, int *shuffle, int *deflate,
|
||||
var->contiguous = NC_FALSE;
|
||||
}
|
||||
|
||||
/* Fltcher32 checksum error protection? */
|
||||
/* Fletcher32 checksum error protection? */
|
||||
if (fletcher32)
|
||||
{
|
||||
var->fletcher32 = *fletcher32;
|
||||
@ -995,7 +1009,7 @@ NC4_def_var_fletcher32(int ncid, int varid, int fletcher32)
|
||||
and before nc_enddef.
|
||||
|
||||
Chunking is required in any dataset with one or more unlimited
|
||||
dimension in HDF5, or any dataset using a filter.
|
||||
dimensions in HDF5, or any dataset using a filter.
|
||||
|
||||
Where chunksize is a pointer to an array of size ndims, with the
|
||||
chunksize in each dimension.
|
||||
|
@ -102,4 +102,4 @@ all:
|
||||
|
||||
# This rule tells make how to turn our .m4 files into .c files.
|
||||
.m4.c:
|
||||
m4 $(AM_M4FLAGS) $(M4FLAGS) -s $< >$@
|
||||
m4 $(AM_M4FLAGS) $(M4FLAGS) $< >$@
|
||||
|
Loading…
Reference in New Issue
Block a user