Merge pull request #1788 from Unidata/NOAA-GSD-ejh_par_test

Noaa gsd ejh par test
This commit is contained in:
Ward Fisher 2020-07-09 19:12:10 -06:00 committed by GitHub
commit 57ea8f903d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 980 additions and 17 deletions

View File

@ -9,6 +9,7 @@ This file contains a high-level description of this package's evolution. Release
* [Enhancement] Added new test for using compression with parallel I/O: nc_test4/tst_h_par_compress.c. See [https://github.com/Unidata/netcdf-c/pull/1784].
* [Bug Fix] Don't return error for extra calls to nc_redef() for netCDF/HDF5 files, unless classic model is in use. See [https://github.com/Unidata/netcdf-c/issues/1779].
* [Enhancement] Added new parallel I/O benchmark program to mimic NOAA UFS data writes, built when --enable-benchmarks is in configure. See [https://github.com/Unidata/netcdf-c/pull/1777].
* [Bug Fix] Now allow szip to be used on variables with unlimited dimension [https://github.com/Unidata/netcdf-c/issues/1774].
* [Enhancement] Add support for cloud storage using a variant of the Zarr storage format. Warning: this feature is highly experimental and is subject to rapid evolution [https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in].
* [Bug Fix] Fix nccopy to properly set default chunking parameters when not otherwise specified. This can significantly improve performance in selected cases. Note that if seeing slow performance with nccopy, then, as a work-around, specifically set the chunking parameters. [https://github.com/Unidata/netcdf-c/issues/1763].

View File

@ -1642,6 +1642,7 @@ AC_CONFIG_FILES(dap4_test/pingurl4.c:ncdap_test/pingurl.c)
AC_CONFIG_FILES([h5_test/run_par_tests.sh], [chmod ugo+x h5_test/run_par_tests.sh])
AC_CONFIG_FILES([nc_test4/run_par_test.sh], [chmod ugo+x nc_test4/run_par_test.sh])
AC_CONFIG_FILES([nc_perf/run_par_bm_test.sh], [chmod ugo+x nc_perf/run_par_bm_test.sh])
AC_CONFIG_FILES([nc_perf/run_gfs_test.sh], [chmod ugo+x nc_perf/run_gfs_test.sh])
AC_CONFIG_FILES([examples/C/run_par_test.sh], [chmod ugo+x examples/C/run_par_test.sh])
AC_CONFIG_FILES([nc-config], [chmod 755 nc-config])
AC_CONFIG_FILES([Makefile

View File

@ -1556,6 +1556,7 @@ NC4_put_vars(int ncid, int varid, const size_t *startp, const size_t *countp,
start[i] = startp[i];
count[i] = countp ? countp[i] : var->dim[i]->len;
stride[i] = stridep ? stridep[i] : 1;
LOG((4, "start[%d] %ld count[%d] %ld stride[%d] %ld", i, start[i], i, count[i], i, stride[i]));
/* Check to see if any counts are zero. */
if (!count[i])

View File

@ -20,7 +20,7 @@ LDADD = ${top_builddir}/liblib/libnetcdf.la
check_PROGRAMS = tst_create_files bm_file tst_chunks3 tst_ar4 \
tst_ar4_3d tst_ar4_4d bm_many_objs tst_h_many_atts bm_many_atts \
tst_files2 tst_files3 tst_mem tst_mem1 tst_knmi bm_netcdf4_recs \
tst_wrf_reads tst_attsperf bigmeta openbigmeta tst_bm_rando
tst_wrf_reads tst_attsperf bigmeta openbigmeta tst_bm_rando
bm_file_SOURCES = bm_file.c tst_utils.c
bm_netcdf4_recs_SOURCES = bm_netcdf4_recs.c tst_utils.c
@ -48,21 +48,24 @@ TESTS += run_bm_test1.sh run_bm_test2.sh
run_bm_test1.log: tst_create_files.log
run_bm_test2.log: tst_create_files.log
# This will run a parallel I/O benchmark for parallel builds.
# This will run parallel I/O benchmarks for parallel builds.
if TEST_PARALLEL4
TESTS += run_par_bm_test.sh
run_par_bm_test.log: tst_create_files.log
check_PROGRAMS += tst_gfs_data_1
TESTS += run_par_bm_test.sh run_gfs_test.sh
run_par_bm_test.log: tst_create_files.log run_bm_test1.log
endif # TEST_PARALLEL4
endif # BUILD_UTILITIES
EXTRA_DIST = run_par_bm_test.sh.in run_knmi_bm.sh perftest.sh \
run_bm_test1.sh run_bm_test2.sh run_tst_chunks.sh run_bm_elena.sh \
CMakeLists.txt
# Extra files for the dist. Note that parallel tests end in .in,
# because configure substitute in the launcher (usually mpiexec).
EXTRA_DIST = run_knmi_bm.sh perftest.sh run_bm_test1.sh \
run_bm_test2.sh run_tst_chunks.sh run_bm_elena.sh CMakeLists.txt \
run_gfs_test.sh.in run_par_bm_test.sh.in gfs_sample.cdl
CLEANFILES = tst_*.nc bigmeta.nc bigvars.nc floats*.nc \
floats*.cdl shorts*.nc shorts*.cdl ints*.nc ints*.cdl tst_*.cdl
CLEANFILES = tst_*.nc bigmeta.nc bigvars.nc floats*.nc floats*.cdl \
shorts*.nc shorts*.cdl ints*.nc ints*.cdl tst_*.cdl
DISTCLEANFILES = run_par_bm_test.sh MSGCPP_CWP_NC*.nc
DISTCLEANFILES = run_par_bm_test.sh MSGCPP_CWP_NC*.nc run_gfs_test.sh
# If valgrind is present, add valgrind targets.
@VALGRIND_CHECK_RULES@

View File

@ -817,7 +817,7 @@ main(int argc, char **argv)
extern int opterr;
extern char *optarg;
char file_in[NC_MAX_NAME + 1], file_out[NC_MAX_NAME + 1] = {""};
char file_out_2[NC_MAX_NAME + 1];
char file_out_2[NC_MAX_NAME + 10 + 1]; /* extra 10 to silence warning */
int out_format, in_format, header = 0, doublecheck = 0;
int convert_unlim = 0;
char *str1, *str2, *token, *subtoken;
@ -1068,7 +1068,7 @@ main(int argc, char **argv)
if (doublecheck)
{
/* We need a string long enough for the copy command. */
char cmd[NC_MAX_NAME * 2 + 5];
char cmd[NC_MAX_NAME * 3 + 5];
#ifdef USE_PARALLEL
MPI_Barrier(MPI_COMM_WORLD);

274
nc_perf/gfs_sample.cdl Normal file
View File

@ -0,0 +1,274 @@
netcdf gfs.t00z.atmf024 {
dimensions:
grid_xt = 3072 ;
grid_yt = 1536 ;
pfull = 127 ;
phalf = 128 ;
time = UNLIMITED ; // (1 currently)
variables:
double grid_xt(grid_xt) ;
grid_xt:cartesian_axis = "X" ;
grid_xt:long_name = "T-cell longitude" ;
grid_xt:units = "degrees_E" ;
grid_xt:_Storage = "contiguous" ;
grid_xt:_Endianness = "little" ;
double lon(grid_yt, grid_xt) ;
lon:long_name = "T-cell longitude" ;
lon:units = "degrees_E" ;
lon:_Storage = "contiguous" ;
lon:_Endianness = "little" ;
double grid_yt(grid_yt) ;
grid_yt:cartesian_axis = "Y" ;
grid_yt:long_name = "T-cell latiitude" ;
grid_yt:units = "degrees_N" ;
grid_yt:_Storage = "contiguous" ;
grid_yt:_Endianness = "little" ;
double lat(grid_yt, grid_xt) ;
lat:long_name = "T-cell latitude" ;
lat:units = "degrees_N" ;
lat:_Storage = "contiguous" ;
lat:_Endianness = "little" ;
float pfull(pfull) ;
pfull:long_name = "ref full pressure level" ;
pfull:units = "mb" ;
pfull:cartesian_axis = "Z" ;
pfull:positive = "down" ;
pfull:edges = "phalf" ;
pfull:_Storage = "contiguous" ;
pfull:_Endianness = "little" ;
float phalf(phalf) ;
phalf:long_name = "ref half pressure level" ;
phalf:units = "mb" ;
phalf:cartesian_axis = "Z" ;
phalf:positive = "down" ;
phalf:_Storage = "contiguous" ;
phalf:_Endianness = "little" ;
double time(time) ;
time:long_name = "time" ;
time:units = "hours since 2018-01-10 00:00:00" ;
time:cartesian_axis = "T" ;
time:calendar_type = "JULIAN" ;
time:calendar = "JULIAN" ;
time:_Storage = "chunked" ;
time:_ChunkSizes = 1 ;
time:_Endianness = "little" ;
float cld_amt(time, pfull, grid_yt, grid_xt) ;
cld_amt:long_name = "cloud amount" ;
cld_amt:units = "1" ;
cld_amt:missing_value = -1.e+10f ;
cld_amt:_FillValue = -1.e+10f ;
cld_amt:cell_methods = "time: point" ;
cld_amt:output_file = "atm" ;
cld_amt:max_abs_compression_error = 3.057718e-05f ;
cld_amt:nbits = 14 ;
cld_amt:_Storage = "chunked" ;
cld_amt:_ChunkSizes = 1, 22, 308, 615 ;
cld_amt:_DeflateLevel = 1 ;
cld_amt:_Endianness = "little" ;
float clwmr(time, pfull, grid_yt, grid_xt) ;
clwmr:long_name = "cloud water mixing ratio" ;
clwmr:units = "kg/kg" ;
clwmr:missing_value = -1.e+10f ;
clwmr:_FillValue = -1.e+10f ;
clwmr:cell_methods = "time: point" ;
clwmr:output_file = "atm" ;
clwmr:max_abs_compression_error = 4.976755e-08f ;
clwmr:nbits = 14 ;
clwmr:_Storage = "chunked" ;
clwmr:_ChunkSizes = 1, 22, 308, 615 ;
clwmr:_DeflateLevel = 1 ;
clwmr:_Endianness = "little" ;
float delz(time, pfull, grid_yt, grid_xt) ;
delz:long_name = "height thickness" ;
delz:units = "m" ;
delz:missing_value = -1.e+10f ;
delz:_FillValue = -1.e+10f ;
delz:cell_methods = "time: point" ;
delz:output_file = "atm" ;
delz:max_abs_compression_error = 0.1002197f ;
delz:nbits = 14 ;
delz:_Storage = "chunked" ;
delz:_ChunkSizes = 1, 22, 308, 615 ;
delz:_DeflateLevel = 1 ;
delz:_Endianness = "little" ;
float dpres(time, pfull, grid_yt, grid_xt) ;
dpres:long_name = "pressure thickness" ;
dpres:units = "pa" ;
dpres:missing_value = -1.e+10f ;
dpres:_FillValue = -1.e+10f ;
dpres:cell_methods = "time: point" ;
dpres:output_file = "atm" ;
dpres:max_abs_compression_error = 0.05603027f ;
dpres:nbits = 14 ;
dpres:_Storage = "chunked" ;
dpres:_ChunkSizes = 1, 22, 308, 615 ;
dpres:_DeflateLevel = 1 ;
dpres:_Endianness = "little" ;
float dzdt(time, pfull, grid_yt, grid_xt) ;
dzdt:long_name = "vertical wind" ;
dzdt:units = "m/sec" ;
dzdt:missing_value = -1.e+10f ;
dzdt:_FillValue = -1.e+10f ;
dzdt:cell_methods = "time: point" ;
dzdt:output_file = "atm" ;
dzdt:max_abs_compression_error = 0.0003833771f ;
dzdt:nbits = 14 ;
dzdt:_Storage = "chunked" ;
dzdt:_ChunkSizes = 1, 22, 308, 615 ;
dzdt:_DeflateLevel = 1 ;
dzdt:_Endianness = "little" ;
float grle(time, pfull, grid_yt, grid_xt) ;
grle:long_name = "graupel mixing ratio" ;
grle:units = "kg/kg" ;
grle:missing_value = -1.e+10f ;
grle:_FillValue = -1.e+10f ;
grle:cell_methods = "time: point" ;
grle:output_file = "atm" ;
grle:max_abs_compression_error = 3.105961e-07f ;
grle:nbits = 14 ;
grle:_Storage = "chunked" ;
grle:_ChunkSizes = 1, 22, 308, 615 ;
grle:_DeflateLevel = 1 ;
grle:_Endianness = "little" ;
float hgtsfc(time, grid_yt, grid_xt) ;
hgtsfc:long_name = "surface geopotential height" ;
hgtsfc:units = "gpm" ;
hgtsfc:missing_value = -1.e+10f ;
hgtsfc:_FillValue = -1.e+10f ;
hgtsfc:cell_methods = "time: point" ;
hgtsfc:output_file = "atm" ;
hgtsfc:_Storage = "chunked" ;
hgtsfc:_ChunkSizes = 1, 768, 1536 ;
hgtsfc:_DeflateLevel = 1 ;
hgtsfc:_Shuffle = "true" ;
hgtsfc:_Endianness = "little" ;
float icmr(time, pfull, grid_yt, grid_xt) ;
icmr:long_name = "cloud ice mixing ratio" ;
icmr:units = "kg/kg" ;
icmr:missing_value = -1.e+10f ;
icmr:_FillValue = -1.e+10f ;
icmr:cell_methods = "time: point" ;
icmr:output_file = "atm" ;
icmr:max_abs_compression_error = 4.316098e-08f ;
icmr:nbits = 14 ;
icmr:_Storage = "chunked" ;
icmr:_ChunkSizes = 1, 22, 308, 615 ;
icmr:_DeflateLevel = 1 ;
icmr:_Endianness = "little" ;
float o3mr(time, pfull, grid_yt, grid_xt) ;
o3mr:long_name = "ozone mixing ratio" ;
o3mr:units = "kg/kg" ;
o3mr:missing_value = -1.e+10f ;
o3mr:_FillValue = -1.e+10f ;
o3mr:cell_methods = "time: point" ;
o3mr:output_file = "atm" ;
o3mr:max_abs_compression_error = 5.438778e-10f ;
o3mr:nbits = 14 ;
o3mr:_Storage = "chunked" ;
o3mr:_ChunkSizes = 1, 22, 308, 615 ;
o3mr:_DeflateLevel = 1 ;
o3mr:_Endianness = "little" ;
float pressfc(time, grid_yt, grid_xt) ;
pressfc:long_name = "surface pressure" ;
pressfc:units = "pa" ;
pressfc:missing_value = -1.e+10f ;
pressfc:_FillValue = -1.e+10f ;
pressfc:cell_methods = "time: point" ;
pressfc:output_file = "atm" ;
pressfc:_Storage = "chunked" ;
pressfc:_ChunkSizes = 1, 768, 1536 ;
pressfc:_DeflateLevel = 1 ;
pressfc:_Shuffle = "true" ;
pressfc:_Endianness = "little" ;
float rwmr(time, pfull, grid_yt, grid_xt) ;
rwmr:long_name = "rain mixing ratio" ;
rwmr:units = "kg/kg" ;
rwmr:missing_value = -1.e+10f ;
rwmr:_FillValue = -1.e+10f ;
rwmr:cell_methods = "time: point" ;
rwmr:output_file = "atm" ;
rwmr:max_abs_compression_error = 1.406297e-07f ;
rwmr:nbits = 14 ;
rwmr:_Storage = "chunked" ;
rwmr:_ChunkSizes = 1, 22, 308, 615 ;
rwmr:_DeflateLevel = 1 ;
rwmr:_Endianness = "little" ;
float snmr(time, pfull, grid_yt, grid_xt) ;
snmr:long_name = "snow mixing ratio" ;
snmr:units = "kg/kg" ;
snmr:missing_value = -1.e+10f ;
snmr:_FillValue = -1.e+10f ;
snmr:cell_methods = "time: point" ;
snmr:output_file = "atm" ;
snmr:max_abs_compression_error = 6.280607e-08f ;
snmr:nbits = 14 ;
snmr:_Storage = "chunked" ;
snmr:_ChunkSizes = 1, 22, 308, 615 ;
snmr:_DeflateLevel = 1 ;
snmr:_Endianness = "little" ;
float spfh(time, pfull, grid_yt, grid_xt) ;
spfh:long_name = "specific humidity" ;
spfh:units = "kg/kg" ;
spfh:missing_value = -1.e+10f ;
spfh:_FillValue = -1.e+10f ;
spfh:cell_methods = "time: point" ;
spfh:output_file = "atm" ;
spfh:max_abs_compression_error = 7.404014e-07f ;
spfh:nbits = 14 ;
spfh:_Storage = "chunked" ;
spfh:_ChunkSizes = 1, 22, 308, 615 ;
spfh:_DeflateLevel = 1 ;
spfh:_Endianness = "little" ;
float tmp(time, pfull, grid_yt, grid_xt) ;
tmp:long_name = "temperature" ;
tmp:units = "K" ;
tmp:missing_value = -1.e+10f ;
tmp:_FillValue = -1.e+10f ;
tmp:cell_methods = "time: point" ;
tmp:output_file = "atm" ;
tmp:max_abs_compression_error = 0.004516602f ;
tmp:nbits = 14 ;
tmp:_Storage = "chunked" ;
tmp:_ChunkSizes = 1, 22, 308, 615 ;
tmp:_DeflateLevel = 1 ;
tmp:_Endianness = "little" ;
float ugrd(time, pfull, grid_yt, grid_xt) ;
ugrd:long_name = "zonal wind" ;
ugrd:units = "m/sec" ;
ugrd:missing_value = -1.e+10f ;
ugrd:_FillValue = -1.e+10f ;
ugrd:cell_methods = "time: point" ;
ugrd:output_file = "atm" ;
ugrd:max_abs_compression_error = 0.008621216f ;
ugrd:nbits = 14 ;
ugrd:_Storage = "chunked" ;
ugrd:_ChunkSizes = 1, 22, 308, 615 ;
ugrd:_DeflateLevel = 1 ;
ugrd:_Endianness = "little" ;
float vgrd(time, pfull, grid_yt, grid_xt) ;
vgrd:long_name = "meridional wind" ;
vgrd:units = "m/sec" ;
vgrd:missing_value = -1.e+10f ;
vgrd:_FillValue = -1.e+10f ;
vgrd:cell_methods = "time: point" ;
vgrd:output_file = "atm" ;
vgrd:max_abs_compression_error = 0.00667572f ;
vgrd:nbits = 14 ;
vgrd:_Storage = "chunked" ;
vgrd:_ChunkSizes = 1, 22, 308, 615 ;
vgrd:_DeflateLevel = 1 ;
vgrd:_Endianness = "little" ;
// global attributes:
:hydrostatic = "non-hydrostatic" ;
:ncnsto = 9 ;
:ak = 0.999f, 1.605f, 2.532f, 3.924f, 5.976f, 8.947f, 13.177f, 19.096f, 27.243f, 38.276f, 52.984f, 72.293f, 97.269f, 129.11f, 169.135f, 218.767f, 279.506f, 352.894f, 440.481f, 543.782f, 664.236f, 803.164f, 961.734f, 1140.931f, 1341.538f, 1564.119f, 1809.028f, 2076.415f, 2366.252f, 2678.372f, 3012.51f, 3368.363f, 3745.646f, 4144.164f, 4563.881f, 5004.995f, 5468.017f, 5953.848f, 6463.864f, 7000.f, 7563.494f, 8150.661f, 8756.529f, 9376.141f, 10004.55f, 10636.85f, 11268.16f, 11893.64f, 12508.52f, 13108.09f, 13687.73f, 14242.89f, 14769.15f, 15262.2f, 15717.86f, 16132.09f, 16501.02f, 16820.94f, 17088.32f, 17299.85f, 17453.08f, 17548.35f, 17586.77f, 17569.7f, 17498.7f, 17375.56f, 17202.3f, 16981.14f, 16714.5f, 16405.02f, 16055.49f, 15668.86f, 15248.25f, 14796.87f, 14318.04f, 13815.15f, 13291.63f, 12750.92f, 12196.47f, 11631.66f, 11059.83f, 10484.21f, 9907.927f, 9333.967f, 8765.155f, 8204.142f, 7653.387f, 7115.147f, 6591.468f, 6084.176f, 5594.876f, 5124.949f, 4675.554f, 4247.633f, 3841.918f, 3458.933f, 3099.01f, 2762.297f, 2448.768f, 2158.238f, 1890.375f, 1644.712f, 1420.661f, 1217.528f, 1034.524f, 870.778f, 725.348f, 597.235f, 485.392f, 388.734f, 306.149f, 236.502f, 178.651f, 131.447f, 93.74f, 64.392f, 42.274f, 26.274f, 15.302f, 8.287f, 4.19f, 1.994f, 0.81f, 0.232f, 0.029f, 0.f, 0.f, 0.f ;
:bk = 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 1.018e-05f, 8.141e-05f, 0.00027469f, 0.00065078f, 0.00127009f, 0.00219248f, 0.00347713f, 0.00518228f, 0.00736504f, 0.0100812f, 0.01338492f, 0.01732857f, 0.02196239f, 0.02733428f, 0.03348954f, 0.04047056f, 0.04831661f, 0.05706358f, 0.06674372f, 0.07738548f, 0.08900629f, 0.101594f, 0.1151262f, 0.1295762f, 0.1449129f, 0.1611008f, 0.1780999f, 0.195866f, 0.2143511f, 0.2335031f, 0.2532663f, 0.2735822f, 0.294389f, 0.3156229f, 0.337218f, 0.3591072f, 0.3812224f, 0.4034951f, 0.4258572f, 0.4482413f, 0.4705813f, 0.492813f, 0.5148743f, 0.5367062f, 0.5582525f, 0.5794605f, 0.6002815f, 0.6206707f, 0.6405875f, 0.6599957f, 0.6788633f, 0.6971631f, 0.714872f, 0.7319713f, 0.7484465f, 0.7642871f, 0.7794867f, 0.7940422f, 0.8079541f, 0.8212263f, 0.8338652f, 0.8458801f, 0.8572826f, 0.8680866f, 0.8783077f, 0.8879632f, 0.8970718f, 0.9056532f, 0.9137284f, 0.9213187f, 0.9284464f, 0.9351338f, 0.9414037f, 0.9472789f, 0.9527821f, 0.957936f, 0.962763f, 0.9672851f, 0.971524f, 0.9755009f, 0.9792364f, 0.9827508f, 0.9860625f, 0.9891851f, 0.9921299f, 0.9949077f, 0.9975282f, 1.f ;
:source = "FV3GFS" ;
:grid = "gaussian" ;
:im = 3072 ;
:jm = 1536 ;
:_SuperblockVersion = 2 ;
:_IsNetcdf4 = 0 ;
:_Format = "netCDF-4 classic model" ;
}

View File

@ -0,0 +1,12 @@
#!/bin/sh
# This shell file tests the GFS tests parallel I/O.
# Ed Hartnett, 6/30/20
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
echo "*** Running tst_gfs_1..."
@MPIEXEC@ -n 4 ./tst_gfs_data_1
exit 0

View File

@ -1,6 +1,6 @@
#!/bin/sh
# This shell file tests the bm_ile program for parallel I/O.
# This shell file tests the bm_file program for parallel I/O.
# Ed Hartnett
if test "x$srcdir" = x ; then srcdir=`pwd`; fi

View File

@ -18,6 +18,7 @@
/* We will create this file. */
#define FILE_NAME "tst_floats_1D.nc"
#define MAX_TYPE_NAME 6
int
main(int argc, char **argv)
@ -142,7 +143,7 @@ main(int argc, char **argv)
int ncid, dimids[MAX_DIMS], varid;
char dim_name[NC_MAX_NAME + 1], file_name[NC_MAX_NAME + 1];
char type_name[MAX_TYPES][NC_MAX_NAME + 1] = {"floats", "ints", "shorts"};
char type_name[MAX_TYPES][MAX_TYPE_NAME + 1] = {"floats", "ints", "shorts"};
int typeid[MAX_TYPES] = {NC_FLOAT, NC_INT, NC_SHORT};
size_t len;
float fdata[TOTAL_SIZE];
@ -254,7 +255,7 @@ main(int argc, char **argv)
int ncid, dimids[MAX_DIMS], varid;
char dim_name[NC_MAX_NAME + 1], file_name[NC_MAX_NAME + 1];
char type_name[MAX_TYPES][NC_MAX_NAME + 1] = {"floats", "ints", "shorts"};
char type_name[MAX_TYPES][MAX_TYPE_NAME + 1] = {"floats", "ints", "shorts"};
int typeid[MAX_TYPES] = {NC_FLOAT, NC_INT, NC_SHORT};
size_t len;
float fdata[TOTAL_SIZE];

668
nc_perf/tst_gfs_data_1.c Normal file
View File

@ -0,0 +1,668 @@
/*
Copyright 2020, UCAR/Unidata See COPYRIGHT file for copying and
redistribution conditions.
This program tests and benchmarks netcdf-4 parallel I/O using the
same access pattern as is used by NOAA's GFS when writing and
reading model data. See:
https://github.com/Unidata/netcdf-fortran/issues/264.
Also see the file gfs_sample.cdl to see what is being produced by
this program.
Ed Hartnett, 6/28/20
*/
#include <nc_tests.h>
#include <time.h>
#include <sys/time.h> /* Extra high precision time info. */
#include "err_macros.h"
#include <mpi.h>
#define FILE_NAME "tst_gfs_data_1.nc"
#define NUM_META_VARS 7
#define NUM_META_TRIES 2
#define NDIM2 2
#define NDIM4 4
#define NDIM5 5
#define NUM_PROC 4
#define NUM_SHUFFLE_SETTINGS 2
#ifdef HAVE_H5Z_SZIP
#define NUM_COMPRESSION_FILTERS 2
#else
#define NUM_COMPRESSION_FILTERS 1
#endif
#define NUM_DEFLATE_LEVELS 3
#define NUM_UNLIM_TRIES 1
#define THOUSAND 1000
#define NUM_DATA_VARS 10
#define ERR_AWFUL 1
#define GRID_XT_LEN 3072
#define GRID_YT_LEN 1536
#define PFULL_LEN 127
#define PHALF_LEN 128
#define TIME_LEN 1
char dim_name[NDIM5][NC_MAX_NAME + 1] = {"grid_xt", "grid_yt", "pfull",
"phalf", "time"};
char var_name[NUM_META_VARS][NC_MAX_NAME + 1] = {"grid_xt", "lon", "grid_yt",
"lat", "pfull", "phalf", "time"};
int var_type[NUM_META_VARS] = {NC_DOUBLE, NC_DOUBLE, NC_DOUBLE, NC_DOUBLE,
NC_FLOAT, NC_FLOAT, NC_DOUBLE};
int dim_len[NDIM5] = {GRID_XT_LEN, GRID_YT_LEN, PFULL_LEN, PHALF_LEN,
TIME_LEN};
/* Get the size of a file in bytes. */
int
get_file_size(char *filename, size_t *file_size)
{
FILE *fp;
assert(filename && file_size);
fp = fopen(filename, "r");
if (fp)
{
fseek(fp, 0 , SEEK_END);
*file_size = ftell(fp);
fclose(fp);
}
return 0;
}
/* Check all the metadata, including coordinate variable data. */
int
check_meta(int ncid, int *data_varid, int s, int f, int deflate, int u,
size_t phalf_size, size_t phalf_start, float *phalf, size_t *data_start,
size_t *data_count, size_t pfull_start, size_t pfull_size, float *pfull,
size_t grid_xt_start, size_t grid_xt_size, double *grid_xt, size_t grid_yt_start,
size_t grid_yt_size, double *grid_yt, size_t *latlon_start,
size_t *latlon_count, double *lat, double *lon, int my_rank)
{
int ndims, nvars, natts, unlimdimid;
char name_in[NC_MAX_NAME + 1];
int xtype_in;
int ndims_in;
int dimids_in[NDIM4];
size_t len_in;
double *grid_xt_in, *grid_yt_in;
double *lat_in, *lon_in;
float *phalf_in, *pfull_in;
int d, v, i;
/* Check number of dims, vars, atts. */
if (nc_inq(ncid, &ndims, &nvars, &natts, &unlimdimid)) ERR;
if (ndims != NDIM5 || nvars != NUM_META_VARS + NUM_DATA_VARS ||
natts != 0) ERR;
if (unlimdimid != (u ? 4 : -1)) ERR;
/* Check the dimensions. */
for (d = 0; d < NDIM5; d++)
{
if (nc_inq_dim(ncid, d, name_in, &len_in)) ERR;
if (strcmp(name_in, dim_name[d]) || len_in != dim_len[d]) ERR;
}
/* Check metadata vars. */
for (v = 0; v < NUM_META_VARS; v++)
{
if (nc_inq_var(ncid, v, name_in, &xtype_in, &ndims_in, dimids_in,
&natts)) ERR;
if (strcmp(name_in, var_name[v]) || xtype_in != var_type[v]) ERR;
}
/* Check the values for grid_xt. */
if (!(grid_xt_in = malloc(grid_xt_size * sizeof(double)))) ERR;
if (nc_get_vara_double(ncid, 0, &grid_xt_start, &grid_xt_size, grid_xt_in)) ERR;
for (i = 0; i < grid_xt_size; i++)
if (grid_xt_in[i] != grid_xt[i]) ERR;
free(grid_xt_in);
/* Check the values for lon. */
if (!(lon_in = malloc(latlon_count[0] * latlon_count[1] * sizeof(double)))) ERR;
if (nc_get_vara_double(ncid, 1, latlon_start, latlon_count, lon_in)) ERR;
for (i = 0; i < latlon_count[0] * latlon_count[1]; i++)
if (lon_in[i] != lon[i]) ERR;
free(lon_in);
/* Check the values for grid_yt. */
if (!(grid_yt_in = malloc(grid_yt_size * sizeof(double)))) ERR;
if (nc_get_vara_double(ncid, 2, &grid_yt_start, &grid_yt_size, grid_yt_in)) ERR;
for (i = 0; i < grid_yt_size; i++)
if (grid_yt_in[i] != grid_yt[i]) ERR;
free(grid_yt_in);
/* Check the values for lat. */
if (!(lat_in = malloc(latlon_count[0] * latlon_count[1] * sizeof(double)))) ERR;
if (nc_get_vara_double(ncid, 1, latlon_start, latlon_count, lat_in)) ERR;
for (i = 0; i < latlon_count[0] * latlon_count[1]; i++)
if (lat_in[i] != lat[i]) ERR;
free(lat_in);
/* Check the values for pfull. */
if (!(pfull_in = malloc(pfull_size * sizeof(float)))) ERR;
if (nc_get_vara_float(ncid, 4, &pfull_start, &pfull_size, pfull_in)) ERR;
for (i = 0; i < pfull_size; i++)
if (pfull_in[i] != pfull[i]) ERR;
free(pfull_in);
/* Check the values for phalf. */
if (!(phalf_in = malloc(phalf_size * sizeof(float)))) ERR;
if (nc_get_vara_float(ncid, 5, &phalf_start, &phalf_size, phalf_in)) ERR;
for (i = 0; i < phalf_size; i++)
if (phalf_in[i] != phalf[i]) ERR;
free(phalf_in);
return 0;
}
/* Write all the metadata, including coordinate variable data. */
int
write_meta(int ncid, int *data_varid, int s, int f, int deflate, int u,
size_t phalf_size, size_t phalf_start, float *phalf, size_t *data_start,
size_t *data_count, size_t pfull_start, size_t pfull_size, float *pfull,
size_t grid_xt_start, size_t grid_xt_size, double *grid_xt, size_t grid_yt_start,
size_t grid_yt_size, double *grid_yt, size_t *latlon_start,
size_t *latlon_count, double *lat, double *lon, int my_rank)
{
int dimid[NDIM5];
int dimid_data[NDIM4];
int varid[NUM_META_VARS];
double value_time = 2.0;
int dv;
int res;
/* Turn off fill mode. */
if (nc_set_fill(ncid, NC_NOFILL, NULL)) ERR;
/* Define dimension grid_xt. */
if (nc_def_dim(ncid, dim_name[0], dim_len[0], &dimid[0])) ERR;
/* Define dimension grid_yt. */
if (nc_def_dim(ncid, dim_name[1], dim_len[1], &dimid[1])) ERR;
/* Define variable grid_xt. */
if (nc_def_var(ncid, var_name[0], var_type[0], 1, &dimid[0], &varid[0])) ERR;
if (nc_var_par_access(ncid, varid[0], NC_INDEPENDENT)) ERR;
/* Define variable lon. */
if (nc_def_var(ncid, var_name[1], var_type[1], 2, dimid, &varid[1])) ERR;
if (nc_var_par_access(ncid, varid[1], NC_INDEPENDENT));
if (nc_put_att_text(ncid, varid[1], "long_name", strlen("T-cell longitude"), "T-cell longitude")) ERR;
if (nc_put_att_text(ncid, varid[1], "units", strlen("degrees_E"), "degrees_E")) ERR;
if (nc_put_att_text(ncid, varid[0], "cartesian_axis", strlen("X"), "X")) ERR;
/* Define variable grid_yt. */
if (nc_def_var(ncid, var_name[2], var_type[2], 1, &dimid[1], &varid[2])) ERR;
if (nc_var_par_access(ncid, varid[2], NC_INDEPENDENT)) ERR;
/* Define variable lat. */
if (nc_def_var(ncid, var_name[3], var_type[3], 2, dimid, &varid[3])) ERR;
if (nc_var_par_access(ncid, varid[3], NC_INDEPENDENT)) ERR;
if (nc_put_att_text(ncid, varid[3], "long_name", strlen("T-cell latitude"), "T-cell latitude")) ERR;
if (nc_put_att_text(ncid, varid[3], "units", strlen("degrees_N"), "degrees_N")) ERR;
if (nc_put_att_text(ncid, varid[2], "cartesian_axis", strlen("Y"), "Y")) ERR;
/* Define dimension pfull. */
if (nc_def_dim(ncid, dim_name[2], dim_len[2], &dimid[2])) ERR;
/* Define variable pfull and write data. */
if (nc_def_var(ncid, var_name[4], var_type[4], 1, &dimid[2], &varid[4])) ERR;
if (nc_var_par_access(ncid, varid[4], NC_INDEPENDENT)) ERR;
if (nc_enddef(ncid)) ERR;
if (nc_put_vara_float(ncid, varid[4], &pfull_start, &pfull_size, pfull)) ERR;
if (nc_redef(ncid)) ERR;
/* Define dimension phalf. This dim is only used by the phalf coord var. */
if (nc_def_dim(ncid, dim_name[3], dim_len[3], &dimid[3])) ERR;
/* Define coord variable phalf and write data. */
if (nc_def_var(ncid, var_name[5], var_type[5], 1, &dimid[3], &varid[5])) ERR;
if (nc_var_par_access(ncid, varid[5], NC_INDEPENDENT)) ERR;
if (nc_enddef(ncid)) ERR;
if (nc_put_vara_float(ncid, varid[5], &phalf_start, &phalf_size, phalf)) ERR;
if (nc_redef(ncid)) ERR;
/* Define dimension time, sometimes the unlimited dimension,
* sometimes a fixed dim of 1. */
if (nc_def_dim(ncid, dim_name[4], (u ? NC_UNLIMITED : 1), &dimid[4])) ERR;
/* Define variable time and write data. */
if (nc_def_var(ncid, var_name[6], var_type[6], 1, &dimid[4], &varid[6])) ERR;
if (nc_var_par_access(ncid, varid[6], NC_INDEPENDENT)) ERR;
if (nc_enddef(ncid)) ERR;
/* In NOAA code, do all processors write the single time value? */
if (my_rank == 0)
if (nc_put_var_double(ncid, varid[6], &value_time)) ERR;;
if (nc_redef(ncid)) ERR;
/* Write variable grid_xt data. */
if (nc_enddef(ncid)) ERR;
if (nc_put_vara_double(ncid, varid[0], &grid_xt_start, &grid_xt_size, grid_xt)) ERR;
if (nc_redef(ncid)) ERR;
/* Write lon data. */
if (nc_enddef(ncid)) ERR;
if (nc_put_vara_double(ncid, varid[1], latlon_start, latlon_count, lon)) ERR;
if (nc_redef(ncid)) ERR;
/* Write grid_yt data. */
if (nc_enddef(ncid)) ERR;
if (nc_put_vara_double(ncid, varid[2], &grid_yt_start, &grid_yt_size, grid_yt)) ERR;
if (nc_redef(ncid)) ERR;
/* Write lat data. */
if (nc_enddef(ncid)) ERR;
if (nc_put_vara_double(ncid, varid[3], latlon_start, latlon_count, lat)) ERR;
/* Specify dimensions for our data vars. */
dimid_data[0] = dimid[4];
dimid_data[1] = dimid[2];
dimid_data[2] = dimid[1];
dimid_data[3] = dimid[0];
/* Define data variables. */
for (dv = 0; dv < NUM_DATA_VARS; dv++)
{
char data_var_name[NC_MAX_NAME + 1];
sprintf(data_var_name, "var_%d", dv);
if (nc_redef(ncid)) ERR;
if (nc_def_var(ncid, data_var_name, NC_FLOAT, NDIM4, dimid_data, &data_varid[dv])) ERR;
/* Setting any filter only will work for HDF5-1.10.3 and later */
/* versions. */
if (!f)
res = nc_def_var_deflate(ncid, data_varid[dv], s, 1, deflate);
else
{
res = nc_def_var_deflate(ncid, data_varid[dv], s, 0, 0);
if (!res)
res = nc_def_var_szip(ncid, data_varid[dv], 32, 32);
}
#ifdef HDF5_SUPPORTS_PAR_FILTERS
if (res) ERR;
#else
if (res != NC_EINVAL) ERR;
#endif
if (nc_var_par_access(ncid, data_varid[dv], NC_COLLECTIVE)) ERR;
if (nc_enddef(ncid)) ERR;
}
if (nc_redef(ncid)) ERR;
if (nc_put_att_text(ncid, varid[0], "long_name", strlen("T-cell longitude"), "T-cell longitude")) ERR;
if (nc_put_att_text(ncid, varid[0], "units", strlen("degrees_E"), "degrees_E")) ERR;
if (nc_put_att_text(ncid, varid[2], "long_name", strlen("T-cell latiitude"), "T-cell latiitude")) ERR;
if (nc_put_att_text(ncid, varid[2], "units", strlen("degrees_N"), "degrees_N")) ERR;
if (nc_enddef(ncid)) ERR;
if (nc_redef(ncid)) ERR;
for (dv = 0; dv < NUM_DATA_VARS; dv++)
{
float compress_err = 42.22;
int nbits = 5;
if (nc_put_att_float(ncid, data_varid[dv], "max_abs_compression_error", NC_FLOAT, 1, &compress_err)) ERR;
if (nc_put_att_int(ncid, data_varid[dv], "nbits", NC_INT, 1, &nbits)) ERR;
}
if (nc_enddef(ncid)) ERR;
return 0;
}
/* Based on the MPI rank and number of tasks, calculate the
* decomposition of the 2D lat/lon coordinate variables. */
int
decomp_latlon(int my_rank, int mpi_size, int *dim_len, size_t *latlon_start,
size_t *latlon_count, double **lat, double **lon)
{
int i, j;
assert(dim_len && latlon_start && latlon_count && lat && lon && !*lat &&
!*lon);
/* Size of local arrays (i.e. for this pe) lon and lat data. */
if (mpi_size == 1)
{
latlon_start[0] = 0;
latlon_start[1] = 0;
latlon_count[0] = dim_len[0];
latlon_count[1] = dim_len[1];
}
else if (mpi_size == 4)
{
latlon_count[0] = dim_len[0]/2;
if (my_rank == 0 || my_rank == 2)
{
latlon_start[0] = 0;
}
else
{
latlon_start[0] = dim_len[0]/2;
}
latlon_count[1] = dim_len[1]/2;
if (my_rank == 0 || my_rank == 1)
{
latlon_start[1] = 0;
}
else
{
latlon_start[1] = dim_len[1]/2;
}
}
else if (mpi_size == 36)
{
}
/* Allocate storage. */
if (!(*lon = malloc(latlon_count[0] * latlon_count[1] * sizeof(double)))) ERR;
if (!(*lat = malloc(latlon_count[0] * latlon_count[1] * sizeof(double)))) ERR;
/* Now calculate some latlon values to write. */
for (i = 0; i < latlon_count[0]; i++)
{
for (j = 0; j < latlon_count[1]; j++)
{
(*lon)[j * latlon_count[0] + i] = my_rank * 100 + i + j;
(*lat)[j * latlon_count[0] + i] = my_rank * 100 + i + j;
}
}
printf("%d: latlon_start %ld %ld latlon_count %ld %ld\n", my_rank, latlon_start[0],
latlon_start[1], latlon_count[0], latlon_count[1]);
return 0;
}
/* Based on the MPI rank and number of tasks, calculate the
* decomposition of the 4D data. */
int
decomp_4D(int my_rank, int mpi_size, int *dim_len, size_t *start, size_t *count)
{
start[0] = 0;
count[0] = 1;
count[1] = dim_len[2];
start[1] = 0;
if (mpi_size == 1)
{
start[2] = 0;
start[3] = 0;
count[2] = dim_len[2];
count[3] = dim_len[3];
}
else if (mpi_size == 4)
{
if (my_rank == 0 || my_rank == 1)
{
start[2] = 0;
start[3] = 0;
}
else
{
start[2] = 768;
start[3] = 768;
}
count[2] = 768;
count[3] = 1536;
}
else if (mpi_size == 36)
{
start[2] = my_rank * 256;
start[3] = my_rank * 512;
count[2] = 256;
count[3] = 512;
}
else
return ERR_AWFUL;
printf("%d: start %ld %ld %ld %ld count %ld %ld %ld %ld\n", my_rank, start[0],
start[1], start[2], start[3], count[0], count[1], count[2], count[3]);
return 0;
}
/* Decompose the grid_xt and grid_yt coordinate vars, and also come up
* with some data. */
int
decomp_grid(int my_rank, int mpi_size, int *dim_len, size_t *grid_xt_start, size_t *grid_xt_size,
size_t *grid_yt_start, size_t *grid_yt_size, double **grid_xt, double **grid_yt)
{
int i;
/* Size of local (i.e. for this pe) grid_xt data. */
*grid_xt_size = dim_len[0]/mpi_size;
*grid_xt_start = my_rank * *grid_xt_size;
if (my_rank == mpi_size - 1)
*grid_xt_size = *grid_xt_size + dim_len[0] % mpi_size;
/* Size of local (i.e. for this pe) grid_yt data. */
*grid_yt_size = dim_len[1]/mpi_size;
*grid_yt_start = my_rank * *grid_yt_size;
if (my_rank == mpi_size - 1)
*grid_yt_size = *grid_yt_size + dim_len[1] % mpi_size;
/* Allocate storage for the grid_xy and grid_yt coordinate
* variable data. */
if (!(*grid_xt = malloc(*grid_xt_size * sizeof(double)))) ERR;
if (!(*grid_yt = malloc(*grid_yt_size * sizeof(double)))) ERR;
/* Fill the grid_xt and grid_yt coordinate data arrays. */
for (i = 0; i < *grid_xt_size; i++)
(*grid_xt)[i] = my_rank * 100 + i;
for (i = 0; i < *grid_yt_size; i++)
(*grid_yt)[i] = my_rank * 100 + i;
return 0;
}
/* Decompose the pfull and phalf coordinate vars. */
int
decomp_p(int my_rank, int mpi_size, size_t *data_count, int *dim_len,
size_t *phalf_start, size_t *phalf_size, float **phalf,
size_t *pfull_start, size_t *pfull_size, float **pfull)
{
int i;
/* Size of local (i.e. for this pe) phalf data. */
*phalf_size = dim_len[3]/mpi_size;
*phalf_start = my_rank * *phalf_size;
if (my_rank == mpi_size - 1)
*phalf_size = *phalf_size + dim_len[3] % mpi_size;
*pfull_size = dim_len[2]/mpi_size;
*pfull_start = my_rank * *pfull_size;
if (my_rank == mpi_size - 1)
*pfull_size = *pfull_size + dim_len[2] % mpi_size;
/* Allocate space on this pe to hold the coordinate var data for this pe. */
if (!(*pfull = malloc(data_count[1] * sizeof(float)))) ERR;
if (!(*phalf = malloc(*phalf_size * sizeof(float)))) ERR;
/* Some fake data for this pe to write. */
for (i = 0; i < data_count[1]; i++)
(*pfull)[i] = my_rank * 100 + i;
for (i = 0; i < *phalf_size; i++)
(*phalf)[i] = my_rank * 100 + i;
return 0;
}
int
main(int argc, char **argv)
{
/* MPI stuff. */
int mpi_size, my_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
/* For timing. */
double meta_start_time, meta_stop_time;
double data_start_time, data_stop_time;
int ncid;
size_t latlon_start[NDIM2], latlon_count[NDIM2];
size_t data_start[NDIM4], data_count[NDIM4];
/* Variables. */
int data_varid[NUM_DATA_VARS];
size_t pfull_size, pfull_start;
float *pfull = NULL;
size_t phalf_size, phalf_start;
float *phalf = NULL;
size_t grid_xt_size, grid_xt_start;
double *grid_xt = NULL;
size_t grid_yt_size, grid_yt_start;
double *grid_yt = NULL;
double *lon = NULL;
double *lat = NULL;
float *value_data;
int deflate_level[NUM_DEFLATE_LEVELS] = {1, 4, 9};
int f, s, u;
int i, j, k, dv, dl;
/* Initialize MPI. */
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
/* Determine 4D data decomposition to write data vars. */
if (decomp_4D(my_rank, mpi_size, dim_len, data_start, data_count)) ERR;
/* Determine 2D data decomposition to write lat/lon coordinate vars. */
if (decomp_latlon(my_rank, mpi_size, dim_len, latlon_start, latlon_count,
&lat, &lon)) ERR;
/* Decompose grid_xt and grid_yt coordiate vars. */
if (decomp_grid(my_rank, mpi_size, dim_len, &grid_xt_start, &grid_xt_size,
&grid_yt_start, &grid_yt_size, &grid_xt, &grid_yt)) ERR;
/* Decompose phalf and pfull. */
if (decomp_p(my_rank, mpi_size, data_count, dim_len, &phalf_start,
&phalf_size, &phalf, &pfull_start, &pfull_size, &pfull)) ERR;
/* Allocate space to hold the data. */
if (!(value_data = malloc(data_count[3] * data_count[2] * data_count[1] *
sizeof(float)))) ERR;
/* Create some data. */
for (k = 0; k < data_count[1]; k++)
for (j = 0; j < data_count[2]; j++)
for(i = 0; i < data_count[3]; i++)
value_data[j * data_count[3] + i] = my_rank * 100 + i + j + k;
if (my_rank == 0)
{
printf("Benchmarking creation of UFS file.\n");
printf("unlim, comp, level, shuffle, meta wr time (s), data wr rate (MB/s), "
"file size (MB)\n");
}
for (u = 0; u < NUM_UNLIM_TRIES; u++)
{
for (f = 0; f < NUM_COMPRESSION_FILTERS; f++)
{
for (s = 0; s < NUM_SHUFFLE_SETTINGS; s++)
{
for (dl = 0; dl < NUM_DEFLATE_LEVELS; dl++)
{
size_t file_size;
/* No deflate levels for szip. */
if (f && dl) continue;
/* nc_set_log_level(3); */
/* Create a parallel netcdf-4 file. */
meta_start_time = MPI_Wtime();
if (nc_create_par(FILE_NAME, NC_NETCDF4, comm, info,
&ncid)) ERR;
if (write_meta(ncid, data_varid, s, f, deflate_level[dl], u,
phalf_size, phalf_start, phalf,
data_start, data_count, pfull_start, pfull_size, pfull, grid_xt_start,
grid_xt_size, grid_xt, grid_yt_start,
grid_yt_size, grid_yt, latlon_start,
latlon_count, lat, lon, my_rank)) ERR;
/* Stop the timer for metadata writes. */
MPI_Barrier(MPI_COMM_WORLD);
meta_stop_time = MPI_Wtime();
data_start_time = MPI_Wtime();
/* Write one record each of the data variables. */
for (dv = 0; dv < NUM_DATA_VARS; dv++)
{
if (nc_put_vara_float(ncid, data_varid[dv], data_start,
data_count, value_data)) ERR;
if (nc_redef(ncid)) ERR;
}
/* Close the file. */
if (nc_close(ncid)) ERR;
/* Stop the data timer. */
MPI_Barrier(MPI_COMM_WORLD);
data_stop_time = MPI_Wtime();
/* Get the file size. */
if (get_file_size(FILE_NAME, &file_size)) ERR;
/* Check the file metadata for correctness. */
if (nc_open_par(FILE_NAME, NC_NOWRITE, comm, info, &ncid)) ERR;
if (check_meta(ncid, data_varid, s, f, deflate_level[dl], u,
phalf_size, phalf_start, phalf,
data_start, data_count, pfull_start, pfull_size,
pfull, grid_xt_start, grid_xt_size, grid_xt,
grid_yt_start, grid_yt_size, grid_yt, latlon_start,
latlon_count, lat, lon, my_rank)) ERR;
if (nc_close(ncid)) ERR;
/* Print out results. */
if (my_rank == 0)
{
float data_size, data_rate;
data_size = NUM_DATA_VARS * dim_len[0] * dim_len[1] *
dim_len[3] * sizeof(float)/1000000;
data_rate = data_size / (data_stop_time - data_start_time);
printf("%d %s, %d, %d, %g, %g, %g\n", u, (f ? "szip" : "zlib"),
deflate_level[dl], s, meta_stop_time - meta_start_time,
data_rate, (float)file_size/1000000);
}
MPI_Barrier(MPI_COMM_WORLD);
} /* next deflate level */
} /* next shuffle filter test */
} /* next compression filter (zlib and szip) */
} /* next unlim_try */
/* Free resources. */
if (grid_xt)
free(grid_xt);
if (grid_yt)
free(grid_yt);
if (pfull)
free(pfull);
if (phalf)
free(phalf);
if (lon)
free(lon);
if (lat)
free(lat);
free(value_data);
if (!my_rank)
SUMMARIZE_ERR;
/* Shut down MPI. */
MPI_Finalize();
if (!my_rank)
FINAL_RESULTS;
return 0;
}

View File

@ -2,8 +2,8 @@
Copyright 2018 University Corporation for Atmospheric Research/Unidata
See COPYRIGHT file for conditions of use.
Test internal netcdf-4 file code.
$Id$
Test memory use of a netCDF-4 file with unlimited dimensions.
Ed Hartnett
*/
#include <config.h>

View File

@ -92,6 +92,7 @@ IF(TEST_PARALLEL4)
build_bin_test(tst_parallel5)
build_bin_test(tst_parallel_zlib)
build_bin_test(tst_parallel_compress)
build_bin_test(tst_gfs_data_1)
build_bin_test(tst_nc4perf)
build_bin_test(tst_mode)
build_bin_test(tst_simplerw_coll_r)

View File

@ -59,4 +59,5 @@ if test "@HAS_PAR_FILTERS@" = "yes"; then
echo "Parallel I/O more tests with zlib and szip (if present in HDF5)."
@MPIEXEC@ -n 1 ./tst_parallel_compress
@MPIEXEC@ -n 4 ./tst_parallel_compress
fi