Merge pull request #1582 from NOAA-GSD/ejh_parallel_zlib

Allow user to turn on zlib, shuffle, and/or fletcher32 filters with parallel I/O for HDF5-1.10.2+
This commit is contained in:
Ward Fisher 2020-01-13 16:06:51 -07:00 committed by GitHub
commit 8771d0bdf4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 461 additions and 57 deletions

View File

@ -743,6 +743,9 @@ IF(USE_HDF5 OR ENABLE_NETCDF_4)
SET(HDF5_CC h5cc)
ENDIF()
# Check to see if this is hdf5-1.10.2 or later.
CHECK_LIBRARY_EXISTS(${HDF5_C_LIBRARY_hdf5} H5DOread_chunk "" HDF5_SUPPORTS_PAR_FILTERS)
SET(H5_USE_16_API 1)
OPTION(NC_ENABLE_HDF_16_API "Enable HDF5 1.6.x Compatibility(Required)" ON)
IF(NOT NC_ENABLE_HDF_16_API)

View File

@ -1039,7 +1039,7 @@ if test "x$enable_hdf5" = xyes; then
# H5Pset_fapl_mpiposix and H5Pget_fapl_mpiposix have been removed since HDF5 1.8.12.
# Use H5Pset_fapl_mpio and H5Pget_fapl_mpio, instead.
AC_CHECK_FUNCS([H5Pget_fapl_mpio H5Pset_deflate H5Z_SZIP H5free_memory H5resize_memory H5allocate_memory H5Pset_libver_bounds H5Pset_all_coll_metadata_ops H5Z_SZIP])
AC_CHECK_FUNCS([H5Pget_fapl_mpio H5Pset_deflate H5Z_SZIP H5free_memory H5resize_memory H5allocate_memory H5Pset_libver_bounds H5Pset_all_coll_metadata_ops H5Z_SZIP H5DOread_chunk])
# Check to see if HDF5 library has collective metadata APIs, (HDF5 >= 1.10.0)
if test "x$ac_cv_func_H5Pset_all_coll_metadata_ops" = xyes; then
@ -1054,7 +1054,14 @@ if test "x$enable_hdf5" = xyes; then
AC_MSG_CHECKING([whether parallel io is enabled in hdf5])
AC_MSG_RESULT([$hdf5_parallel])
# Check to see if we need to search for and link against szlib.
# Check to see if HDF5 library is 1.10.2 or greater. If so, allows parallel_zip.
if test "x$ac_cv_func_H5DOread_chunk" = xyes; then
AC_DEFINE([HDF5_SUPPORTS_PAR_FILTERS], [1], [if true, HDF5 is at least version 1.10.2 and allows parallel I/O with zip])
fi
AC_MSG_CHECKING([whether HDF5 is version 1.10.2 or greater])
AC_MSG_RESULT([$ac_cv_func_H5DOread_chunk])
# Check to see if we need to search for and link against szlib.
if test "x$ac_cv_func_H5Z_SZIP" = xyes; then
AC_SEARCH_LIBS([SZ_BufftoBuffCompress], [szip sz], [],
[AC_MSG_ERROR([libhdf5 installed with szip support, but cannot find or link to the szip library.])])

View File

@ -22,15 +22,15 @@
#include <mpi.h>
#if defined(__cplusplus)
extern "C" {
#endif
/** Use with nc_var_par_access() to set parallel access to independent. */
#define NC_INDEPENDENT 0
/** Use with nc_var_par_access() to set parallel access to collective. */
#define NC_COLLECTIVE 1
#if defined(__cplusplus)
extern "C" {
#endif
/* Create a file and enable parallel I/O. */
EXTERNL int
nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,

View File

@ -305,6 +305,13 @@ To obtain a good I/O performance, users are recommended to use collective mode.
In addition, switching between collective and independent I/O mode can be
expensive.
In netcdf-c-4.7.4 or later, using hdf5-1.10.2 or later, the zlib and
fletcher32 filters may be used when writing data with parallel
I/O. The use of these filters require collective access. Turning on
the zlib (deflate) or fletcher32 filter for a variable will
automatically set its access to collective. Attempts to set access to
independent will return ::NC_EINVAL.
\param ncid NetCDF or group ID, from a previous call to nc_open_par(),
nc_create_par(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
@ -317,7 +324,8 @@ nc_inq_ncid().
\returns ::NC_EBADID Invalid ncid passed.
\returns ::NC_ENOTVAR Invalid varid passed.
\returns ::NC_ENOPAR File was not opened with nc_open_par/nc_create_var.
\returns ::NC_EINVAL Invalid par_access specified.
\returns ::NC_EINVAL Invalid par_access specified, or attempt to set
filtered variable to independent access.
<h1>Example</h1>

View File

@ -312,7 +312,7 @@ nc_def_var_fill(int ncid, int varid, int no_fill, const void *fill_value)
}
/**
Set the compression settings for a netCDF-4/HDF5 variable.
Set the zlib compression settings for a netCDF-4/HDF5 variable.
This function must be called after nc_def_var and before nc_enddef
or any functions which writes data to the file.
@ -324,15 +324,22 @@ nc_def_var_fill(int ncid, int varid, int no_fill, const void *fill_value)
If this function is called on a scalar variable, it is ignored.
@note Parallel I/O reads work with compressed data. Parallel I/O
writes work with compressed data in netcdf-c-4.7.4 and later
releases, using hdf5-1.10.2 and later releases. Using the zlib,
shuffle (or any other) filter requires that collective access be
used with the variable. Turning on deflate and/or shuffle for a
variable in a file opened for parallel I/O will automatically
switch the access for that variable to collective access.
@param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
@param varid Variable ID
@param shuffle True to turn on the shuffle filter. The shuffle
filter can assist with the compression of integer data by changing
the byte order in the data stream. It makes no sense to use the
shuffle filter without setting a deflate level, or to use shuffle
on non-integer data.
filter can assist with the compression of data by changing the byte
order in the data stream. It makes no sense to use the shuffle
filter without setting a deflate level.
@param deflate True to turn on deflation for this variable.
@param deflate_level A number between 0 (no compression) and 9
(maximum compression).
@ -347,11 +354,8 @@ nc_def_var_fill(int ncid, int varid, int no_fill, const void *fill_value)
@return ::NC_ELATEDEF Too late to change settings for this variable.
@return ::NC_ENOTINDEFINE Not in define mode.
@return ::NC_EPERM File is read only.
@return ::NC_EMAXDIMS Classic model file exceeds ::NC_MAX_VAR_DIMS.
@return ::NC_ESTRICTNC3 Attempting to create netCDF-4 type var in
classic model file
@return ::NC_EBADTYPE Bad type.
@return ::NC_ENOMEM Out of memory.
@return ::NC_EHDFERR Error returned by HDF5 layer.
@return ::NC_EINVAL Invalid input. Deflate can't be set unless
variable storage is NC_CHUNK.
@ -422,6 +426,14 @@ nc_def_var_deflate(int ncid, int varid, int shuffle, int deflate, int deflate_le
data, with default chunksizes. Use nc_def_var_chunking() to tune
performance with user-defined chunksizes.
@note Parallel I/O reads work with fletcher32 encoded
data. Parallel I/O writes work with fletcher32 in netcdf-c-4.7.4
and later releases, using hdf5-1.10.2 and later releases. Using the
fletcher32 (or any) filter requires that collective access be used
with the variable. Turning on fletcher32 for a variable in a file
opened for parallel I/O will automatically switch the access for
that variable to collective access.
@param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().

View File

@ -661,10 +661,13 @@ nc_def_var_extra(int ncid, int varid, int *shuffle, int *deflate,
return NC_ENOTVAR;
assert(var && var->hdr.id == varid);
/* Can't turn on parallel and deflate/fletcher32/szip/shuffle (for now). */
/* Can't turn on parallel and deflate/fletcher32/szip/shuffle
* before HDF5 1.10.2. */
#ifndef HDF5_SUPPORTS_PAR_FILTERS
if (h5->parallel == NC_TRUE)
if (deflate || fletcher32 || shuffle)
return NC_EINVAL;
#endif
/* If the HDF5 dataset has already been created, then it is too
* late to set all the extra stuff. */
@ -687,8 +690,7 @@ nc_def_var_extra(int ncid, int varid, int *shuffle, int *deflate,
if (!var->ndims)
return NC_NOERR;
/* Well, if we couldn't find any errors, I guess we have to take
* the users settings. Darn! */
/* Set the deflate settings. */
var->contiguous = NC_FALSE;
var->deflate = *deflate;
if (*deflate)
@ -710,6 +712,18 @@ nc_def_var_extra(int ncid, int varid, int *shuffle, int *deflate,
var->contiguous = NC_FALSE;
}
#ifdef USE_PARALLEL
/* If deflate, shuffle, or fletcher32 was turned on with
* parallel I/O writes, then switch to collective access. HDF5
* requires collevtive access for filter use with parallel
* I/O. */
if (deflate || shuffle || fletcher32)
{
if (h5->parallel && (var->deflate || var->shuffle || var->fletcher32))
var->parallel_access = NC_COLLECTIVE;
}
#endif /* USE_PARALLEL */
/* Handle storage settings. */
if (contiguous)
{

View File

@ -432,6 +432,13 @@ NC4_var_par_access(int ncid, int varid, int par_access)
if (!var) return NC_ENOTVAR;
assert(var->hdr.id == varid);
/* If zlib, shuffle, or fletcher32 filters are in use, then access
* must be collective. Fail an attempt to set such a variable to
* independent access. */
if ((var->deflate || var->shuffle || var->fletcher32) &&
par_access == NC_INDEPENDENT)
return NC_EINVAL;
if (par_access)
var->parallel_access = NC_COLLECTIVE;
else

View File

@ -85,6 +85,8 @@ IF(TEST_PARALLEL4)
build_bin_test(tst_parallel3)
build_bin_test(tst_parallel4)
build_bin_test(tst_parallel5)
build_bin_test(tst_parallel_zlib)
build_bin_test(tst_parallel_zlib2)
build_bin_test(tst_nc4perf)
build_bin_test(tst_mode)
build_bin_test(tst_simplerw_coll_r)

View File

@ -84,7 +84,7 @@ endif # BUILD_UTILITIES
if TEST_PARALLEL4
check_PROGRAMS += tst_mpi_parallel tst_parallel tst_parallel3 \
tst_parallel4 tst_parallel5 tst_nc4perf tst_mode tst_simplerw_coll_r \
tst_mode
tst_mode tst_parallel_zlib tst_parallel_zlib2
TESTS += run_par_test.sh
endif

View File

@ -42,3 +42,13 @@ echo "Parallel I/O test for Collective I/O, contributed by HDF Group."
@MPIEXEC@ -n 1 ./tst_simplerw_coll_r
@MPIEXEC@ -n 2 ./tst_simplerw_coll_r
@MPIEXEC@ -n 4 ./tst_simplerw_coll_r
echo
echo "Parallel I/O test with zlib."
@MPIEXEC@ -n 1 ./tst_parallel_zlib
@MPIEXEC@ -n 4 ./tst_parallel_zlib
echo
echo "Parallel I/O more tests with zlib."
@MPIEXEC@ -n 1 ./tst_parallel_zlib2
@MPIEXEC@ -n 4 ./tst_parallel_zlib2

View File

@ -1,16 +1,16 @@
/*! \file
Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014,
2015, 2016, 2017, 2018
University Corporation for Atmospheric Research/Unidata.
Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014,
2015, 2016, 2017, 2018
University Corporation for Atmospheric Research/Unidata.
See \ref copyright file for more info.
See \ref copyright file for more info.
* Test some illegal mode combinations
*
*/
* Test some illegal mode combinations
*
*/
#include "nc_tests.h"
#include "err_macros.h"
@ -21,37 +21,45 @@ See \ref copyright file for more info.
int
main(int argc, char** argv)
{
int ncid,varid;
int retval;
int ncid,varid;
int retval;
printf("\n*** Testing illegal mode combinations\n");
printf("\n*** Testing illegal mode combinations\n");
MPI_Init(&argc,&argv);
MPI_Init(&argc,&argv);
printf("*** Testing create + MPIO + fletcher32\n");
if ((retval = nc_create_par(FILE_NAME, NC_CLOBBER|NC_NETCDF4, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid))) ERR;
if ((retval = nc_def_var(ncid,"whatever",NC_INT,0,NULL,&varid))) ERR;
retval = nc_def_var_fletcher32(ncid,varid,NC_FLETCHER32);
if(retval != NC_EINVAL) ERR;
if ((retval = nc_abort(ncid)))
{
fprintf(stderr,"XXX: err=%d\n",retval);
fflush(stderr);
ERR;
}
printf("*** Testing create + MPIO + deflation\n");
if ((retval = nc_create_par(FILE_NAME, NC_CLOBBER|NC_NETCDF4, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid))) ERR;
if ((retval = nc_def_var(ncid,"whatever",NC_INT,0,NULL,&varid))) ERR;
retval = nc_def_var_deflate(ncid,varid, NC_NOSHUFFLE, 1, 1);
if(retval != NC_EINVAL) ERR;
if ((retval = nc_abort(ncid))) {
fprintf(stderr,"XXX: nc_abort: %d\n",retval); fflush(stderr);
ERR;
}
MPI_Finalize();
SUMMARIZE_ERR;
FINAL_RESULTS;
printf("*** Testing create + MPIO + fletcher32\n");
if ((retval = nc_create_par(FILE_NAME, NC_CLOBBER|NC_NETCDF4, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid))) ERR;
if ((retval = nc_def_var(ncid,"whatever",NC_INT,0,NULL,&varid))) ERR;
retval = nc_def_var_fletcher32(ncid,varid,NC_FLETCHER32);
#ifdef HDF5_SUPPORTS_PAR_FILTERS
if(retval != NC_NOERR) ERR;
#else
if(retval != NC_EINVAL) ERR;
#endif
if ((retval = nc_abort(ncid)))
{
fprintf(stderr,"XXX: err=%d\n",retval);
fflush(stderr);
ERR;
}
printf("*** Testing create + MPIO + deflation\n");
if ((retval = nc_create_par(FILE_NAME, NC_CLOBBER|NC_NETCDF4, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid))) ERR;
if ((retval = nc_def_var(ncid,"whatever",NC_INT,0,NULL,&varid))) ERR;
retval = nc_def_var_deflate(ncid,varid, NC_NOSHUFFLE, 1, 1);
#ifdef HDF5_SUPPORTS_PAR_FILTERS
if(retval != NC_NOERR) ERR;
#else
if(retval != NC_EINVAL) ERR;
#endif
if ((retval = nc_abort(ncid))) {
fprintf(stderr,"XXX: nc_abort: %d\n",retval); fflush(stderr);
ERR;
}
MPI_Finalize();
SUMMARIZE_ERR;
FINAL_RESULTS;
}

View File

@ -0,0 +1,174 @@
/*
Copyright 2019, UCAR/Unidata
See COPYRIGHT file for copying and redistribution conditions.
This program tests netcdf-4 parallel I/O using the zlib fliter while
writing with parallel I/O. This works for HDF5-1.10.2 and later. In
this case HDF5_SUPPORTS_PAR_FILTERS will be defined during
configure.
Ed Hartnett, 12/19/2019
*/
/* Defining USE_MPE causes the MPE trace library to be used (and you
* must also relink with -llmpe -lmpe). This causes clog2 output to be
* written, which can be converted to slog2 (by the program
* clog2TOslog2) and then used in the analysis program jumpshot. */
/*#define USE_MPE 1*/
#include <nc_tests.h>
#include "err_macros.h"
#include <mpi.h>
#ifdef USE_MPE
#include <mpe.h>
#endif /* USE_MPE */
#define FILE "tst_parallel_zlib.nc"
#define NDIMS 3
#define DIMSIZE 24
#define QTR_DATA (DIMSIZE * DIMSIZE / 4)
#define NUM_PROC 4
#define NUM_SLABS 10
int
main(int argc, char **argv)
{
/* MPI stuff. */
int mpi_namelen;
char mpi_name[MPI_MAX_PROCESSOR_NAME];
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
/* Netcdf-4 stuff. */
int ncid, v1id, dimids[NDIMS];
size_t start[NDIMS], count[NDIMS];
int i, res;
int slab_data[DIMSIZE * DIMSIZE / 4]; /* one slab */
char file_name[NC_MAX_NAME + 1];
#ifdef USE_MPE
int s_init, e_init, s_define, e_define, s_write, e_write, s_close, e_close;
#endif /* USE_MPE */
/* Initialize MPI. */
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Get_processor_name(mpi_name, &mpi_namelen);
/*printf("mpi_name: %s size: %d rank: %d\n", mpi_name,
mpi_size, mpi_rank);*/
#ifdef USE_MPE
MPE_Init_log();
s_init = MPE_Log_get_event_number();
e_init = MPE_Log_get_event_number();
s_define = MPE_Log_get_event_number();
e_define = MPE_Log_get_event_number();
s_write = MPE_Log_get_event_number();
e_write = MPE_Log_get_event_number();
s_close = MPE_Log_get_event_number();
e_close = MPE_Log_get_event_number();
MPE_Describe_state(s_init, e_init, "Init", "red");
MPE_Describe_state(s_define, e_define, "Define", "yellow");
MPE_Describe_state(s_write, e_write, "Write", "green");
MPE_Describe_state(s_close, e_close, "Close", "purple");
MPE_Start_log();
MPE_Log_event(s_init, 0, "start init");
#endif /* USE_MPE */
if (mpi_rank == 0)
{
printf("\n*** Testing parallel writes with zlib.\n");
printf("*** testing simple write with zlib...");
}
/* Create phony data. We're going to write a 24x24 array of ints,
in 4 sets of 144. */
for (i = 0; i < DIMSIZE * DIMSIZE / 4; i++)
slab_data[i] = mpi_rank;
#ifdef USE_MPE
MPE_Log_event(e_init, 0, "end init");
MPE_Log_event(s_define, 0, "start define file");
#endif /* USE_MPE */
/* Create a parallel netcdf-4 file. */
/*nc_set_log_level(3);*/
/* sprintf(file_name, "%s/%s", TEMP_LARGE, FILE); */
sprintf(file_name, "%s", FILE);
if ((res = nc_create_par(file_name, NC_NETCDF4, comm, info, &ncid))) ERR;
/* Create three dimensions. */
if (nc_def_dim(ncid, "d1", DIMSIZE, dimids)) ERR;
if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR;
if (nc_def_dim(ncid, "d3", NUM_SLABS, &dimids[2])) ERR;
/* Create one var. */
if ((res = nc_def_var(ncid, "v1", NC_INT, NDIMS, dimids, &v1id))) ERR;
/* Setting deflate only will work for HDF5-1.10.2 and later
* versions. */
res = nc_def_var_deflate(ncid, 0, 0, 1, 1);
#ifdef HDF5_SUPPORTS_PAR_FILTERS
if (res) ERR;
#else
if (res != NC_EINVAL) ERR;
#endif
/* Write metadata to file. */
if ((res = nc_enddef(ncid))) ERR;
#ifdef USE_MPE
MPE_Log_event(e_define, 0, "end define file");
if (mpi_rank)
sleep(mpi_rank);
#endif /* USE_MPE */
/* Set up slab for this process. */
start[0] = mpi_rank * DIMSIZE/mpi_size;
start[1] = 0;
count[0] = DIMSIZE/mpi_size;
count[1] = DIMSIZE;
count[2] = 1;
/*printf("mpi_rank=%d start[0]=%d start[1]=%d count[0]=%d count[1]=%d\n",
mpi_rank, start[0], start[1], count[0], count[1]);*/
/* Not necessary, but harmless. */
if (nc_var_par_access(ncid, v1id, NC_COLLECTIVE)) ERR;
for (start[2] = 0; start[2] < NUM_SLABS; start[2]++)
{
#ifdef USE_MPE
MPE_Log_event(s_write, 0, "start write slab");
#endif /* USE_MPE */
/* Write slabs of phoney data. */
if (nc_put_vara_int(ncid, v1id, start, count, slab_data)) ERR;
#ifdef USE_MPE
MPE_Log_event(e_write, 0, "end write file");
#endif /* USE_MPE */
}
#ifdef USE_MPE
MPE_Log_event(s_close, 0, "start close file");
#endif /* USE_MPE */
/* Close the netcdf file. */
if ((res = nc_close(ncid))) ERR;
#ifdef USE_MPE
MPE_Log_event(e_close, 0, "end close file");
#endif /* USE_MPE */
/* Shut down MPI. */
MPI_Finalize();
if (mpi_rank == 0)
{
SUMMARIZE_ERR;
FINAL_RESULTS;
}
return 0;
}

View File

@ -0,0 +1,159 @@
/*
Copyright 2019, UCAR/Unidata See COPYRIGHT file for copying and
redistribution conditions.
This program tests netcdf-4 parallel I/O using zlib, shuffle, and
fletcher32 fliters while writing with parallel I/O. This works for
HDF5-1.10.2 and later. In this case HDF5_SUPPORTS_PAR_FILTERS will
be defined during configure.
Ed Hartnett, 12/19/2019
*/
#include <nc_tests.h>
#include "err_macros.h"
#include <mpi.h>
#define FILE_NAME "tst_parallel_zlib2.nc"
#define NDIMS 3
#define DIMSIZE 24
#define QTR_DATA (DIMSIZE * DIMSIZE / 4)
#define NUM_PROC 4
#define NUM_SLABS 10
#define NUM_SHUFFLE_SETTINGS 2
int
main(int argc, char **argv)
{
/* MPI stuff. */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
/* Netcdf-4 stuff. */
int ncid, v1id, dimids[NDIMS];
size_t start[NDIMS], count[NDIMS];
int i, res;
int *slab_data; /* one slab */
/* Initialize MPI. */
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* Allocate data. */
if (!(slab_data = malloc(sizeof(int) * DIMSIZE * DIMSIZE / mpi_size))) ERR;
/* Create phony data. We're going to write a 24x24 array of ints,
in 4 sets of 144. */
for (i = 0; i < DIMSIZE * DIMSIZE / mpi_size; i++)
slab_data[i] = mpi_rank;
if (!mpi_rank)
{
printf("\n*** Testing parallel writes with zlib some more.\n");
}
{
int s;
for (s = 0; s < NUM_SHUFFLE_SETTINGS; s++)
{
printf("*** testing simple write with zlib shuffle %d...", s);
/* nc_set_log_level(3); */
/* Create a parallel netcdf-4 file. */
if (nc_create_par(FILE_NAME, NC_NETCDF4, comm, info, &ncid)) ERR;
/* Create three dimensions. */
if (nc_def_dim(ncid, "d1", DIMSIZE, dimids)) ERR;
if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR;
if (nc_def_dim(ncid, "d3", NUM_SLABS, &dimids[2])) ERR;
/* Create one var. Turn on deflation. */
if ((res = nc_def_var(ncid, "v1", NC_INT, NDIMS, dimids, &v1id))) ERR;
/* Setting deflate only will work for HDF5-1.10.2 and later
* versions. */
res = nc_def_var_deflate(ncid, 0, s, 1, 1);
#ifdef HDF5_SUPPORTS_PAR_FILTERS
if (res) ERR;
#else
if (res != NC_EINVAL) ERR;
#endif
/* Setting fletcher32 only will work for HDF5-1.10.2 and later
* versions. */
res = nc_def_var_fletcher32(ncid, 0, 1);
#ifdef HDF5_SUPPORTS_PAR_FILTERS
if (res) ERR;
#else
if (res != NC_EINVAL) ERR;
#endif
/* Write metadata to file. */
if (nc_enddef(ncid)) ERR;
/* Set up slab for this process. */
start[0] = mpi_rank * DIMSIZE/mpi_size;
start[1] = 0;
count[0] = DIMSIZE/mpi_size;
count[1] = DIMSIZE;
count[2] = 1;
/*printf("mpi_rank=%d start[0]=%d start[1]=%d count[0]=%d count[1]=%d\n",
mpi_rank, start[0], start[1], count[0], count[1]);*/
/* Should not be allowed to change access to independent,
* because filters are in use. */
if (nc_var_par_access(ncid, v1id, NC_INDEPENDENT) != NC_EINVAL) ERR;
/* Write slabs of data. */
for (start[2] = 0; start[2] < NUM_SLABS; start[2]++)
if (nc_put_vara_int(ncid, v1id, start, count, slab_data)) ERR;
/* Close the netcdf file. */
if (nc_close(ncid)) ERR;
/* Check file. */
{
int shuffle_in, deflate_in, deflate_level_in;
int *slab_data_in;
/* Allocate data. */
if (!(slab_data_in = malloc(sizeof(int) * DIMSIZE * DIMSIZE / mpi_size))) ERR;
/* Reopen the file for parallel access. */
if (nc_open_par(FILE_NAME, NC_NOWRITE, comm, info, &ncid)) ERR;
/* Check state of deflate. */
if (nc_inq_var_deflate(ncid, 0, &shuffle_in, &deflate_in, &deflate_level_in)) ERR;
if ((s && !shuffle_in) || (!s && shuffle_in)) ERR;
if (!deflate_in || deflate_level_in != 1) ERR;
/* Use parallel I/O to read the data. */
for (start[2] = 0; start[2] < NUM_SLABS; start[2]++)
{
if (nc_get_vara_int(ncid, 0, start, count, slab_data_in)) ERR;
for (i = 0; i < DIMSIZE * DIMSIZE / mpi_size; i++)
if (slab_data_in[i] != mpi_rank) ERR;
}
/* Close the netcdf file. */
if (nc_close(ncid)) ERR;
free(slab_data_in);
}
if (!mpi_rank)
SUMMARIZE_ERR;
} /* next shuffle filter test */
free(slab_data);
}
/* Shut down MPI. */
MPI_Finalize();
if (!mpi_rank)
FINAL_RESULTS;
return 0;
}