Merge branch 'pnetcdf_driver' of github.com:wkliao/netcdf-c into pnetcdf_driver

This commit is contained in:
Wei-keng Liao 2018-09-20 11:50:27 -05:00
commit 0853b3c9ee
37 changed files with 393 additions and 1222 deletions

View File

@ -1535,6 +1535,7 @@ AC_CONFIG_FILES(dap4_test/findtestserver4.c:ncdap_test/findtestserver.c.in)
AC_MSG_NOTICE([generating header files and makefiles]) AC_MSG_NOTICE([generating header files and makefiles])
AC_CONFIG_FILES([nc_test4/run_par_test.sh], [chmod ugo+x nc_test4/run_par_test.sh]) AC_CONFIG_FILES([nc_test4/run_par_test.sh], [chmod ugo+x nc_test4/run_par_test.sh])
AC_CONFIG_FILES([nc_test4/run_par_bm_test.sh], [chmod ugo+x nc_test4/run_par_bm_test.sh])
AC_CONFIG_FILES([nc-config], [chmod 755 nc-config]) AC_CONFIG_FILES([nc-config], [chmod 755 nc-config])
AC_CONFIG_FILES([Makefile AC_CONFIG_FILES([Makefile
netcdf.pc netcdf.pc

View File

@ -80,4 +80,15 @@ int ERR_report(int stat, const char* file, int line)
return 0; \ return 0; \
} while (0) } while (0)
/* This macro does the same as FINAL_RESULTS, but without the success
* message. */
#define FINAL_RESULTS_QUIET do { \
if (total_err) \
{ \
printf("%d errors detected! Sorry!\n", total_err); \
return 2; \
} \
return 0; \
} while (0)
#endif /* _ERR_MACROS_H */ #endif /* _ERR_MACROS_H */

View File

@ -22,7 +22,6 @@
#include "netcdf_par.h" #include "netcdf_par.h"
#endif #endif
#include "netcdf.h" #include "netcdf.h"
//#include "err_macros.h"
/** NC_MAX_DIMS for tests. Allows different NC_MAX_DIMS values /** NC_MAX_DIMS for tests. Allows different NC_MAX_DIMS values
* without breaking this test with a heap or stack overflow. */ * without breaking this test with a heap or stack overflow. */

View File

@ -61,9 +61,10 @@ nc_put_att_string(int ncid, int varid, const char *name,
\ingroup attributes \ingroup attributes
Write a text attribute. Write a text attribute.
Add or change a text attribute. If this attribute is new, Add or change a text attribute. If this attribute is new, or if the
or if the space required to store the attribute is greater than space required to store the attribute is greater than before, the
before, the netCDF dataset must be in define mode. netCDF dataset must be in define mode for classic formats (or
netCDF-4/HDF5 with NC_CLASSIC_MODEL).
Although it's possible to create attributes of all types, text and Although it's possible to create attributes of all types, text and
double attributes are adequate for most purposes. double attributes are adequate for most purposes.
@ -153,7 +154,8 @@ Write an attribute.
The function nc_put_att_ type adds or changes a variable attribute or The function nc_put_att_ type adds or changes a variable attribute or
global attribute of an open netCDF dataset. If this attribute is new, global attribute of an open netCDF dataset. If this attribute is new,
or if the space required to store the attribute is greater than or if the space required to store the attribute is greater than
before, the netCDF dataset must be in define mode. before, the netCDF dataset must be in define mode for classic formats
(or netCDF-4/HDF5 with NC_CLASSIC_MODEL).
With netCDF-4 files, nc_put_att will notice if you are writing a With netCDF-4 files, nc_put_att will notice if you are writing a
_FillValue attribute, and will tell the HDF5 layer to use the _FillValue attribute, and will tell the HDF5 layer to use the

View File

@ -14,7 +14,7 @@
#include "config.h" #include "config.h"
#include "hdf5internal.h" #include "hdf5internal.h"
extern int nc4_vararray_add(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var); static void dumpopenobjects(NC_FILE_INFO_T* h5);
/** @internal When we have open objects at file close, should /** @internal When we have open objects at file close, should
we log them or print to stdout. Default is to log. */ we log them or print to stdout. Default is to log. */

View File

@ -818,7 +818,7 @@ int
nc_def_var_chunking_ints(int ncid, int varid, int contiguous, int *chunksizesp) nc_def_var_chunking_ints(int ncid, int varid, int contiguous, int *chunksizesp)
{ {
NC_VAR_INFO_T *var; NC_VAR_INFO_T *var;
size_t *cs = NULL; size_t *cs;
int i, retval; int i, retval;
/* Get pointer to the var. */ /* Get pointer to the var. */

View File

@ -25,18 +25,13 @@
#define NC_HDF5_MAX_NAME 1024 /**< @internal Max size of HDF5 name. */ #define NC_HDF5_MAX_NAME 1024 /**< @internal Max size of HDF5 name. */
#define MAXNAME 1024 /**< Max HDF5 name. */
/** @internal HDF5 object types. */
static unsigned int OTYPES[5] = {H5F_OBJ_FILE, H5F_OBJ_DATASET, H5F_OBJ_GROUP,
H5F_OBJ_DATATYPE, H5F_OBJ_ATTR};
/** /**
* @internal Flag attributes in a linked list as dirty. * @internal Flag attributes in a linked list as dirty.
* *
* @param attlist List of attributes, may be NULL. * @param attlist List of attributes, may be NULL.
* *
* @return NC_NOERR No error. * @return NC_NOERR No error.
* @author Dennis Heimbigner
*/ */
static int static int
flag_atts_dirty(NCindex *attlist) { flag_atts_dirty(NCindex *attlist) {
@ -55,7 +50,6 @@ flag_atts_dirty(NCindex *attlist) {
} }
return NC_NOERR; return NC_NOERR;
} }
/** /**
@ -3218,23 +3212,25 @@ exit:
} }
/** /**
* @internal * @internal Report information about an open HDF5 object. This is
* called on any still-open objects when a HDF5 file close is
* attempted.
* *
* @param uselog * @param uselog If true, send output to LOG not stderr.
* @param id HDF5 ID. * @param id HDF5 ID of open object.
* @param type * @param type Type of HDF5 object, file, dataset, etc.
* *
* @return NC_NOERR No error. * @author Dennis Heimbigner
*/ */
void void
reportobject(int uselog, hid_t id, unsigned int type) reportobject(int uselog, hid_t id, unsigned int type)
{ {
char name[MAXNAME]; char name[NC_HDF5_MAX_NAME];
ssize_t len; ssize_t len;
const char* typename = NULL; const char* typename = NULL;
long long printid = (long long)id; long long printid = (long long)id;
len = H5Iget_name(id, name, MAXNAME); len = H5Iget_name(id, name, NC_HDF5_MAX_NAME);
if(len < 0) return; if(len < 0) return;
name[len] = '\0'; name[len] = '\0';
@ -3245,7 +3241,7 @@ reportobject(int uselog, hid_t id, unsigned int type)
case H5F_OBJ_DATATYPE: typename = "Datatype"; break; case H5F_OBJ_DATATYPE: typename = "Datatype"; break;
case H5F_OBJ_ATTR: case H5F_OBJ_ATTR:
typename = "Attribute"; typename = "Attribute";
len = H5Aget_name(id, MAXNAME, name); len = H5Aget_name(id, NC_HDF5_MAX_NAME, name);
if(len < 0) len = 0; if(len < 0) len = 0;
name[len] = '\0'; name[len] = '\0';
break; break;
@ -3270,7 +3266,7 @@ reportobject(int uselog, hid_t id, unsigned int type)
* @param ntypes Number of types. * @param ntypes Number of types.
* @param otypes Pointer that gets number of open types. * @param otypes Pointer that gets number of open types.
* *
* @return ::NC_NOERR No error. * @author Dennis Heimbigner
*/ */
static void static void
reportopenobjectsT(int uselog, hid_t fid, int ntypes, unsigned int* otypes) reportopenobjectsT(int uselog, hid_t fid, int ntypes, unsigned int* otypes)
@ -3307,11 +3303,14 @@ reportopenobjectsT(int uselog, hid_t fid, int ntypes, unsigned int* otypes)
* @param uselog * @param uselog
* @param fid HDF5 file ID. * @param fid HDF5 file ID.
* *
* @return NC_NOERR No error. * @author Dennit Heimbigner
*/ */
void void
reportopenobjects(int uselog, hid_t fid) reportopenobjects(int uselog, hid_t fid)
{ {
unsigned int OTYPES[5] = {H5F_OBJ_FILE, H5F_OBJ_DATASET, H5F_OBJ_GROUP,
H5F_OBJ_DATATYPE, H5F_OBJ_ATTR};
reportopenobjectsT(uselog, fid ,5, OTYPES); reportopenobjectsT(uselog, fid ,5, OTYPES);
} }
@ -3320,6 +3319,7 @@ reportopenobjects(int uselog, hid_t fid)
* *
* @param h5 file object * @param h5 file object
* *
* @author Dennis Heimbigner
*/ */
void void
showopenobjects5(NC_FILE_INFO_T* h5) showopenobjects5(NC_FILE_INFO_T* h5)
@ -3341,6 +3341,7 @@ showopenobjects5(NC_FILE_INFO_T* h5)
* *
* @param ncid file id * @param ncid file id
* *
* @author Dennis Heimbigner
*/ */
void void
showopenobjects(int ncid) showopenobjects(int ncid)

View File

@ -61,10 +61,9 @@ IF(BUILD_BENCHMARKS)
add_sh_test(nc_test4 run_bm_elena) add_sh_test(nc_test4 run_bm_elena)
add_sh_test(nc_test4 run_bm_test2) add_sh_test(nc_test4 run_bm_test2)
add_sh_test(nc_test4 run_tst_chunks) add_sh_test(nc_test4 run_tst_chunks)
add_sh_test(nc_test4 run_bm_ar4) add_sh_test(nc_test4 run_knmi_bm)
add_sh_test(nc_test4 run_get_knmi_files)
SET(NC4_TESTS ${NC4_TESTS} tst_create_files bm_file tst_chunks3 tst_ar4 tst_ar4_3d tst_ar4_4d bm_many_objs tst_h_many_atts bm_many_atts tst_files2 tst_files3 tst_ar5 tst_h_files3 tst_mem tst_knmi bm_netcdf4_recs) SET(NC4_TESTS ${NC4_TESTS} tst_create_files bm_file tst_chunks3 tst_ar4 tst_ar4_3d tst_ar4_4d bm_many_objs tst_h_many_atts bm_many_atts tst_files2 tst_files3 tst_h_files3 tst_mem tst_knmi bm_netcdf4_recs)
IF(TEST_PARALLEL) IF(TEST_PARALLEL)
add_sh_test(nc_test4 run_par_bm_test) add_sh_test(nc_test4 run_par_bm_test)
ENDIF() ENDIF()

View File

@ -1,8 +1,10 @@
# This is part of the netCDF package. # This is part of the netCDF package. Copyright 2005-2018 University
# Copyright 2005 University Corporation for Atmospheric Research/Unidata # Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
# See COPYRIGHT file for conditions of use. # conditions of use.
#
# This directory holds tests for netCDF-4. It is skipped if netCDF-4
# is not enabled.
# #
# This entire directory will be skipped if netCDF-4 is not enabled.
# Ed Hartnett, Ward Fisher # Ed Hartnett, Ward Fisher
# Put together AM_CPPFLAGS and AM_LDFLAGS. # Put together AM_CPPFLAGS and AM_LDFLAGS.
@ -15,6 +17,7 @@ include $(top_srcdir)/lib_flags.am
# Note which tests depend on other tests. necessary for make -j check # Note which tests depend on other tests. necessary for make -j check
TEST_EXTENSIONS = .sh TEST_EXTENSIONS = .sh
extradir = extradir =
# Link to our assembled library. # Link to our assembled library.
AM_LDFLAGS += ${top_builddir}/liblib/libnetcdf.la AM_LDFLAGS += ${top_builddir}/liblib/libnetcdf.la
LDADD = ${top_builddir}/liblib/libnetcdf.la LDADD = ${top_builddir}/liblib/libnetcdf.la
@ -64,7 +67,7 @@ endif # BUILD_V2
if BUILD_BENCHMARKS if BUILD_BENCHMARKS
check_PROGRAMS += tst_create_files bm_file tst_chunks3 tst_ar4 \ check_PROGRAMS += tst_create_files bm_file tst_chunks3 tst_ar4 \
tst_ar4_3d tst_ar4_4d bm_many_objs tst_h_many_atts bm_many_atts \ tst_ar4_3d tst_ar4_4d bm_many_objs tst_h_many_atts bm_many_atts \
tst_files2 tst_files3 tst_ar5 tst_mem tst_knmi bm_netcdf4_recs tst_files2 tst_files3 tst_mem tst_knmi bm_netcdf4_recs
bm_netcdf4_recs_SOURCES = bm_netcdf4_recs.c tst_utils.c bm_netcdf4_recs_SOURCES = bm_netcdf4_recs.c tst_utils.c
bm_many_atts_SOURCES = bm_many_atts.c tst_utils.c bm_many_atts_SOURCES = bm_many_atts.c tst_utils.c
@ -77,23 +80,21 @@ tst_h_many_atts_SOURCES = tst_h_many_atts.c tst_utils.c
bm_file_SOURCES = bm_file.c tst_utils.c bm_file_SOURCES = bm_file.c tst_utils.c
tst_knmi_SOURCES = tst_knmi.c tst_utils.c tst_knmi_SOURCES = tst_knmi.c tst_utils.c
#WARNING: test_knmi depends on run_get_knmi_files.sh,
# so they must appear in the appropriate order.
TESTS += tst_ar4_3d tst_create_files run_bm_test1.sh run_bm_elena.sh \ TESTS += tst_ar4_3d tst_create_files run_bm_test1.sh run_bm_elena.sh \
run_bm_test2.sh run_tst_chunks.sh tst_files2 tst_files3 tst_ar5 \ run_bm_test2.sh run_tst_chunks.sh tst_files2 tst_files3 tst_mem \
tst_mem run_get_knmi_files.sh tst_knmi run_knmi_bm.sh
# tst_create_files creates files for other tests.
run_bm_test1.log: tst_create_files.log
run_bm_test2.log: tst_create_files.log
run_bm_elena.log: tst_create_files.log
# This will run a parallel I/O benchmark for parallel builds. # This will run a parallel I/O benchmark for parallel builds.
if TEST_PARALLEL4 if TEST_PARALLEL4
TESTS += run_par_bm_test.sh TESTS += run_par_bm_test.sh
# This benchmark depends on tst_create_files being run.
run_par_bm_test.log: tst_create_files.log
endif # TEST_PARALLEL4 endif # TEST_PARALLEL4
benchmarks: check
./run_bm_radar_2D.sh
./run_bm_radar_2D_compression1.sh
./run_bm.sh
./run_tst_chunks.sh
./run_bm_ar4.sh
endif # BUILD_BENCHMARKS endif # BUILD_BENCHMARKS
# Szip Tests (requires ncdump) # Szip Tests (requires ncdump)
@ -126,13 +127,10 @@ check_PROGRAMS += bigmeta openbigmeta tst_attsperf
TESTS += tst_attsperf perftest.sh TESTS += tst_attsperf perftest.sh
endif endif
EXTRA_DIST = run_par_test.sh.in run_bm.sh run_bm_test1.sh \ EXTRA_DIST = run_par_test.sh.in run_par_bm_test.sh.in run_bm_test1.sh \
run_bm_test2.sh run_bm_radar_2D.sh run_bm_radar_2D_compression1.sh \ run_bm_test2.sh run_bm_elena.sh run_tst_chunks.sh \
run_par_bm_test.sh run_bm_elena.sh run_par_bm_radar_2D.sh \ ref_tst_compounds.nc ref_tst_xplatform2_1.nc ref_tst_xplatform2_2.nc \
run_bm_radar_2D_endianness1.sh run_tst_chunks.sh ref_chunks1.cdl \ ref_tst_dims.nc ref_tst_interops4.nc run_knmi_bm.sh CMakeLists.txt \
ref_chunks2.cdl run_bm_ar4.sh ref_tst_compounds.nc \
ref_tst_xplatform2_1.nc ref_tst_xplatform2_2.nc ref_tst_dims.nc \
ref_tst_interops4.nc run_get_knmi_files.sh CMakeLists.txt \
run_grp_rename.sh tst_h5_endians.c tst_atts_string_rewrite.c \ run_grp_rename.sh tst_h5_endians.c tst_atts_string_rewrite.c \
tst_put_vars_two_unlim_dim.c tst_empty_vlen_unlim.c \ tst_put_vars_two_unlim_dim.c tst_empty_vlen_unlim.c \
run_empty_vlen_test.sh ref_hdf5_compat1.nc ref_hdf5_compat2.nc \ run_empty_vlen_test.sh ref_hdf5_compat1.nc ref_hdf5_compat2.nc \
@ -141,13 +139,13 @@ ref_szip.cdl tst_filter.sh bzip2.cdl filtered.cdl unfiltered.cdl \
ref_bzip2.c findplugin.in perftest.sh ref_bzip2.c findplugin.in perftest.sh
CLEANFILES = tst_mpi_parallel.bin cdm_sea_soundings.nc bm_chunking.nc \ CLEANFILES = tst_mpi_parallel.bin cdm_sea_soundings.nc bm_chunking.nc \
bm_radar.nc bm_radar1.nc radar_*.txt tst_floats_1D.cdl floats_1D_3.nc \ tst_floats_1D.cdl floats_1D_3.nc floats_1D.cdl tst_*.nc \
floats_1D.cdl tst_*.nc tst_floats2_*.cdl tst_ints2_*.cdl \ tst_floats2_*.cdl tst_ints2_*.cdl tst_shorts2_*.cdl tst_elena_*.cdl \
tst_shorts2_*.cdl tst_elena_*.cdl tst_simple*.cdl tst_chunks.cdl \ tst_simple*.cdl tst_chunks.cdl pr_A1.* tauu_A1.* usi_01.* thetau_01.* \
pr_A1.* tauu_A1.* usi_01.* thetau_01.* tst_*.h5 tst_grp_rename.cdl \ tst_*.h5 tst_grp_rename.cdl tst_grp_rename.dmp ref_grp_rename.cdl \
tst_grp_rename.dmp ref_grp_rename.cdl foo1.nc tst_*.h4 test.nc \ foo1.nc tst_*.h4 test.nc testszip.nc test.h5 szip_dump.cdl \
testszip.nc test.h5 szip_dump.cdl perftest.txt bigmeta.nc bigvars.nc \ perftest.txt bigmeta.nc bigvars.nc run_par_test.sh *.gz MSGCPP_*.nc \
run_par_test.sh floats*.nc floats*.cdl shorts*.nc shorts*.cdl ints*.nc ints*.cdl
DISTCLEANFILES = findplugin.sh DISTCLEANFILES = findplugin.sh

View File

@ -10,17 +10,11 @@
handled by this program. (Input files may be in netCDF-4 format, but handled by this program. (Input files may be in netCDF-4 format, but
they must conform to the classic model for this program to work.) they must conform to the classic model for this program to work.)
For the 3.7 and 4.0 netCDF releases, this program is not expected Ed Hartnett
for general use. It may be made safer and more general in future
releases, but for now, users should use this code with caution.
$Id: bm_file.c,v 1.64 2010/01/11 19:27:11 ed Exp $
*/ */
#include <nc_tests.h> /* The ERR macro is here... */ #include <nc_tests.h> /* The ERR macro is here... */
#include <err_macros.h> #include <err_macros.h>
#include <stdio.h>
#include <string.h>
#include <time.h> #include <time.h>
#include <sys/time.h> /* Extra high precision time info. */ #include <sys/time.h> /* Extra high precision time info. */
#include <math.h> #include <math.h>
@ -30,7 +24,6 @@
#ifdef USE_PARALLEL #ifdef USE_PARALLEL
#include <mpi.h> #include <mpi.h>
#endif #endif
#include <netcdf.h>
#define MILLION 1000000 #define MILLION 1000000
#define BAD -99 #define BAD -99
@ -69,6 +62,10 @@ return 2; \
} while (0) } while (0)
#endif #endif
/* Prototype from tst_utils.c. */
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
struct timeval *y);
/* This function will fill the start and count arrays for the reads /* This function will fill the start and count arrays for the reads
* and writes. */ * and writes. */
static int static int
@ -280,11 +277,11 @@ cmp_file(char *file1, char *file2, int *meta_read_us, int *data_read_us,
if (use_par) if (use_par)
{ {
#ifdef USE_PARALLEL #ifdef USE_PARALLEL
if ((ret = nc_open_par(file1, 0, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid1))) if ((ret = nc_open_par(file1, NC_MPIIO, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid1)))
ERR1(ret); ERR1(ret);
MPI_Barrier(MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD);
ftime = MPI_Wtime(); ftime = MPI_Wtime();
if ((ret = nc_open_par(file2, 0, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid2))) if ((ret = nc_open_par(file2, NC_MPIIO, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid2)))
ERR1(ret); ERR1(ret);
*meta_read_us += (MPI_Wtime() - ftime) * MILLION; *meta_read_us += (MPI_Wtime() - ftime) * MILLION;
#else #else
@ -482,7 +479,7 @@ int copy_file(char *file_name_in, char *file_name_out, int cmode_out,
{ {
#ifdef USE_PARALLEL #ifdef USE_PARALLEL
ftime = MPI_Wtime(); ftime = MPI_Wtime();
if ((ret = nc_open_par(file_name_in, 0, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid_in))) if ((ret = nc_open_par(file_name_in, NC_MPIIO, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid_in)))
ERR1(ret); ERR1(ret);
*meta_read_us += (MPI_Wtime() - ftime) * MILLION; *meta_read_us += (MPI_Wtime() - ftime) * MILLION;
#else #else
@ -512,7 +509,7 @@ int copy_file(char *file_name_in, char *file_name_out, int cmode_out,
if (use_par) if (use_par)
{ {
#ifdef USE_PARALLEL #ifdef USE_PARALLEL
if ((ret = nc_create_par(file_name_out, cmode_out, MPI_COMM_WORLD, if ((ret = nc_create_par(file_name_out, cmode_out|NC_MPIIO, MPI_COMM_WORLD,
MPI_INFO_NULL, &ncid_out))) MPI_INFO_NULL, &ncid_out)))
ERR1(ret); ERR1(ret);
#else #else
@ -1151,5 +1148,5 @@ main(int argc, char **argv)
MPI_Finalize(); MPI_Finalize();
#endif #endif
return 0; FINAL_RESULTS_QUIET;
} }

View File

@ -1,10 +1,10 @@
/* /* This is part of the netCDF package. Copyright 2005-2918 University
Copyright 2010, UCAR/Unidata Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
See COPYRIGHT file for copying and redistribution conditions. conditions of use.
This program benchmarks creating a netCDF file with many objects. This program benchmarks creating a netCDF file with many objects.
$Id $ Ed Hartnett
*/ */
#include <config.h> #include <config.h>
@ -19,6 +19,10 @@ $Id $
/* We will create this file. */ /* We will create this file. */
#define FILE_NAME "bm_many_atts.nc" #define FILE_NAME "bm_many_atts.nc"
/* Prototype from tst_utils.c. */
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
struct timeval *y);
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
struct timeval start_time, end_time, diff_time; struct timeval start_time, end_time, diff_time;
@ -71,5 +75,5 @@ int main(int argc, char **argv)
} }
} }
nc_close(ncid); nc_close(ncid);
return(0); FINAL_RESULTS;
} }

View File

@ -1,8 +1,10 @@
/** \file /* This is part of the netCDF package. Copyright 2018 University
Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
conditions of use. See www.unidata.ucar.edu for more info.
This program benchmarks creating a netCDF file with many objects. This program benchmarks creating a netCDF file with many objects.
Copyright 2010, UCAR/Unidata See COPYRIGHT file for copying and Ed Hartnett
redistribution conditions.
*/ */
#include <config.h> #include <config.h>
@ -17,6 +19,10 @@ redistribution conditions.
/* We will create this file. */ /* We will create this file. */
#define FILE_NAME "bm_many_objs.nc" #define FILE_NAME "bm_many_objs.nc"
/* Prototype from tst_utils.c. */
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
struct timeval *y);
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
struct timeval start_time, end_time, diff_time; struct timeval start_time, end_time, diff_time;
@ -93,5 +99,5 @@ int main(int argc, char **argv)
} }
} }
nc_close(ncid); nc_close(ncid);
return(0); FINAL_RESULTS;
} }

View File

@ -1,17 +1,15 @@
/** \file /* This is part of the netCDF package. Copyright 2005 University
Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
conditions of use. See www.unidata.ucar.edu for more info.
This program benchmarks creating a netCDF file and reading records. This program benchmarks creating a netCDF file and reading records.
Copyright 2011, UCAR/Unidata See COPYRIGHT file for copying and Ed Hartnett
redistribution conditions.
*/ */
#include <config.h> #include <config.h>
#include <nc_tests.h> #include <nc_tests.h>
#include "err_macros.h" #include "err_macros.h"
#include <netcdf.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h> #include <time.h>
#include <sys/time.h> /* Extra high precision time info. */ #include <sys/time.h> /* Extra high precision time info. */
@ -20,17 +18,9 @@ redistribution conditions.
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
struct timeval start_time, end_time, diff_time; printf("Running benchmark...");
double sec; {
int nitem = 10000; /* default number of objects of each type */
int i;
int ncid; int ncid;
int data[] = {42};
int g, grp, numgrp;
char gname[16];
int v, var, numvar, vn, vleft, nvars;
int stat; /* return status */
/* dimension ids */ /* dimension ids */
int basetime_dim; int basetime_dim;
@ -88,8 +78,7 @@ int main(int argc, char **argv)
if (nc_put_att_text(ncid, temperature_2m_id, "cell_methods", 10, "area: mean")) ERR; if (nc_put_att_text(ncid, temperature_2m_id, "cell_methods", 10, "area: mean")) ERR;
if (nc_put_att_text(ncid, temperature_2m_id, "coordinates", 5, "level")) ERR; if (nc_put_att_text(ncid, temperature_2m_id, "coordinates", 5, "level")) ERR;
if (nc_close(ncid)) ERR; if (nc_close(ncid)) ERR;
}
if (gettimeofday(&start_time, NULL)) ERR; SUMMARIZE_ERR;
FINAL_RESULTS;
return(0);
} }

View File

@ -1,248 +0,0 @@
netcdf tst_chunks {
dimensions:
dim1 = 6 ;
dim2 = 12 ;
dim3 = 4 ;
variables:
float var_contiguous(dim1, dim2, dim3) ;
var_contiguous:_Storage = "contiguous" ;
var_contiguous:_Endianness = "little" ;
float var_chunked(dim1, dim2, dim3) ;
var_chunked:_Storage = "chunked" ;
var_chunked:_ChunkSizes = 2, 3, 1 ;
var_chunked:_Endianness = "little" ;
float var_compressed(dim1, dim2, dim3) ;
var_compressed:_Storage = "chunked" ;
var_compressed:_ChunkSizes = 2, 3, 1 ;
var_compressed:_DeflateLevel = 1 ;
var_compressed:_Endianness = "little" ;
// global attributes:
:_NCProperties = "version=1|netcdflibversion=4.4.2-development|hdf5libversion=1.8.17" ;
:_SuperblockVersion = 0 ;
:_IsNetcdf4 = 1 ;
:_Format = "netCDF-4 classic model" ;
data:
var_contiguous =
0, 0, 0, 0,
1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3,
4, 4, 4, 4,
5, 5, 5, 5,
6, 6, 6, 6,
7, 7, 7, 7,
8, 8, 8, 8,
9, 9, 9, 9,
10, 10, 10, 10,
11, 11, 11, 11,
12, 12, 12, 12,
13, 13, 13, 13,
14, 14, 14, 14,
15, 15, 15, 15,
16, 16, 16, 16,
17, 17, 17, 17,
18, 18, 18, 18,
19, 19, 19, 19,
20, 20, 20, 20,
21, 21, 21, 21,
22, 22, 22, 22,
23, 23, 23, 23,
24, 24, 24, 24,
25, 25, 25, 25,
26, 26, 26, 26,
27, 27, 27, 27,
28, 28, 28, 28,
29, 29, 29, 29,
30, 30, 30, 30,
31, 31, 31, 31,
32, 32, 32, 32,
33, 33, 33, 33,
34, 34, 34, 34,
35, 35, 35, 35,
36, 36, 36, 36,
37, 37, 37, 37,
38, 38, 38, 38,
39, 39, 39, 39,
40, 40, 40, 40,
41, 41, 41, 41,
42, 42, 42, 42,
43, 43, 43, 43,
44, 44, 44, 44,
45, 45, 45, 45,
46, 46, 46, 46,
47, 47, 47, 47,
48, 48, 48, 48,
49, 49, 49, 49,
50, 50, 50, 50,
51, 51, 51, 51,
52, 52, 52, 52,
53, 53, 53, 53,
54, 54, 54, 54,
55, 55, 55, 55,
56, 56, 56, 56,
57, 57, 57, 57,
58, 58, 58, 58,
59, 59, 59, 59,
60, 60, 60, 60,
61, 61, 61, 61,
62, 62, 62, 62,
63, 63, 63, 63,
64, 64, 64, 64,
65, 65, 65, 65,
66, 66, 66, 66,
67, 67, 67, 67,
68, 68, 68, 68,
69, 69, 69, 69,
70, 70, 70, 70,
71, 71, 71, 71 ;
var_chunked =
0, 0, 0, 0,
1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3,
4, 4, 4, 4,
5, 5, 5, 5,
6, 6, 6, 6,
7, 7, 7, 7,
8, 8, 8, 8,
9, 9, 9, 9,
10, 10, 10, 10,
11, 11, 11, 11,
12, 12, 12, 12,
13, 13, 13, 13,
14, 14, 14, 14,
15, 15, 15, 15,
16, 16, 16, 16,
17, 17, 17, 17,
18, 18, 18, 18,
19, 19, 19, 19,
20, 20, 20, 20,
21, 21, 21, 21,
22, 22, 22, 22,
23, 23, 23, 23,
24, 24, 24, 24,
25, 25, 25, 25,
26, 26, 26, 26,
27, 27, 27, 27,
28, 28, 28, 28,
29, 29, 29, 29,
30, 30, 30, 30,
31, 31, 31, 31,
32, 32, 32, 32,
33, 33, 33, 33,
34, 34, 34, 34,
35, 35, 35, 35,
36, 36, 36, 36,
37, 37, 37, 37,
38, 38, 38, 38,
39, 39, 39, 39,
40, 40, 40, 40,
41, 41, 41, 41,
42, 42, 42, 42,
43, 43, 43, 43,
44, 44, 44, 44,
45, 45, 45, 45,
46, 46, 46, 46,
47, 47, 47, 47,
48, 48, 48, 48,
49, 49, 49, 49,
50, 50, 50, 50,
51, 51, 51, 51,
52, 52, 52, 52,
53, 53, 53, 53,
54, 54, 54, 54,
55, 55, 55, 55,
56, 56, 56, 56,
57, 57, 57, 57,
58, 58, 58, 58,
59, 59, 59, 59,
60, 60, 60, 60,
61, 61, 61, 61,
62, 62, 62, 62,
63, 63, 63, 63,
64, 64, 64, 64,
65, 65, 65, 65,
66, 66, 66, 66,
67, 67, 67, 67,
68, 68, 68, 68,
69, 69, 69, 69,
70, 70, 70, 70,
71, 71, 71, 71 ;
var_compressed =
0, 0, 0, 0,
1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3,
4, 4, 4, 4,
5, 5, 5, 5,
6, 6, 6, 6,
7, 7, 7, 7,
8, 8, 8, 8,
9, 9, 9, 9,
10, 10, 10, 10,
11, 11, 11, 11,
12, 12, 12, 12,
13, 13, 13, 13,
14, 14, 14, 14,
15, 15, 15, 15,
16, 16, 16, 16,
17, 17, 17, 17,
18, 18, 18, 18,
19, 19, 19, 19,
20, 20, 20, 20,
21, 21, 21, 21,
22, 22, 22, 22,
23, 23, 23, 23,
24, 24, 24, 24,
25, 25, 25, 25,
26, 26, 26, 26,
27, 27, 27, 27,
28, 28, 28, 28,
29, 29, 29, 29,
30, 30, 30, 30,
31, 31, 31, 31,
32, 32, 32, 32,
33, 33, 33, 33,
34, 34, 34, 34,
35, 35, 35, 35,
36, 36, 36, 36,
37, 37, 37, 37,
38, 38, 38, 38,
39, 39, 39, 39,
40, 40, 40, 40,
41, 41, 41, 41,
42, 42, 42, 42,
43, 43, 43, 43,
44, 44, 44, 44,
45, 45, 45, 45,
46, 46, 46, 46,
47, 47, 47, 47,
48, 48, 48, 48,
49, 49, 49, 49,
50, 50, 50, 50,
51, 51, 51, 51,
52, 52, 52, 52,
53, 53, 53, 53,
54, 54, 54, 54,
55, 55, 55, 55,
56, 56, 56, 56,
57, 57, 57, 57,
58, 58, 58, 58,
59, 59, 59, 59,
60, 60, 60, 60,
61, 61, 61, 61,
62, 62, 62, 62,
63, 63, 63, 63,
64, 64, 64, 64,
65, 65, 65, 65,
66, 66, 66, 66,
67, 67, 67, 67,
68, 68, 68, 68,
69, 69, 69, 69,
70, 70, 70, 70,
71, 71, 71, 71 ;
}

View File

@ -1,25 +0,0 @@
netcdf tst_chunks {
dimensions:
dim1 = 32 ;
dim2 = 90 ;
dim3 = 91 ;
variables:
float var_contiguous(dim1, dim2, dim3) ;
var_contiguous:_Storage = "contiguous" ;
var_contiguous:_Endianness = "little" ;
float var_chunked(dim1, dim2, dim3) ;
var_chunked:_Storage = "chunked" ;
var_chunked:_ChunkSizes = 8, 10, 13 ;
var_chunked:_Endianness = "little" ;
float var_compressed(dim1, dim2, dim3) ;
var_compressed:_Storage = "chunked" ;
var_compressed:_ChunkSizes = 8, 10, 13 ;
var_compressed:_DeflateLevel = 1 ;
var_compressed:_Endianness = "little" ;
// global attributes:
:_NCProperties = "version=1|netcdflibversion=4.4.2-development|hdf5libversion=1.8.17" ;
:_SuperblockVersion = 0 ;
:_IsNetcdf4 = 1 ;
:_Format = "netCDF-4 classic model" ;
}

View File

@ -1,93 +0,0 @@
#!/bin/sh
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
# This shell runs a bunch of benchmarks on some specific files
# available at Unidata.
# $Id: run_bm.sh,v 1.8 2007/11/30 16:45:33 ed Exp $
set -e
# Radar 2D file. Make sure we have a local disk copy. Not much point
# in benchmarking read and write times over NFS!
TMP=/shecky/data
d1=20070803-2300
echo "howdy!"
ls $TMP/${d1}-2d.nc3
if ! test -f $TMP/${d1}-2d.nc3; then
cp /upc/share/testdata/nssl/mosaic2d_nc/tile1/$d1.netcdf $TMP/$d1-2d.nc3
fi
# Copy the 2D rarar file into a netCDF-4 version, with various
# compression settings.
out1=radar_2d_compression.txt
c0=100
c1=200
h=-h
for ((s=0; s < 2 ; s++))
do
for ((d=0; d <= 9 ; d=d+2))
do
cmd="${execdir}/bm_file $h -f 3 -o $TMP/$d1-2d.nc4 -c 0:${d}:${s}:${c0}:${c1}"
for ((v=1; v < 12; v++))
do
cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
done
cmd="$cmd $TMP/$d1-2d.nc3"
echo "cmd=$cmd"
if ! ($cmd >> $out1); then
exit 1;
fi
h=
done
done
exit 0
# Get local copy of the radar 3D file.
d1=20070803-2300
if ! test -f $TMP/${d1}-3d.nc3; then
cp /upc/share/testdata/nssl/mosaic3d_nc/tile1/20070803-2300.netcdf $TMP/${d1}-3d.nc3
fi
# Test different compressions, with and without shuffle.
out1=radar_3d_compression.txt
c0=3
c1=100
c2=200
h=-h
for ((s=0; s < 2 ; s++))
do
for ((d=0; d <= 9 ; d++))
do
cmd="${execdir}/bm_file $h -f 3 -o $TMP/$d1.nc4 -c 0:${d}:${s}:${c0}:${c1}:${c2} $TMP/$d1.nc3"
echo "cmd=$cmd"
if ! ($cmd >> $out1); then
exit 1;
fi
h=
done
done
# Try different chunk sizes with the same compession.
out1=radar_3d_chunking.txt
s=1
d=3
h=-h
for c0 in 1 2 5
do
for c1 in 10 100 200 500
do
for c3 in 10 100 200 500
do
cmd="${execdir}/bm_file $h -f 3 -o $TMP/$d1.nc4 -c 0:${d}:${s}:${c0}:${c1}:${c2} $TMP/$d1.nc3"
echo "cmd=$cmd"
if ! ($cmd >> $out1); then
exit 1;
fi
h=
done
done
done

View File

@ -1,94 +0,0 @@
#!/bin/sh
# This shell runs a bunch of benchmarks on some specific files
# available at Unidata. If you want to run this shell, you need these
# data files.
# This script gets and benchmarks against some AR4 data.
# $Id: run_bm_ar4.sh,v 1.4 2010/01/11 19:27:11 ed Exp $
set -e
echo ""
#file_list="pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12.nc tauu_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12.nc thetao_O1.SRESA1B_2.CCSM.ocnm.2000-01_cat_2099-12.nc usi_O1.20C3M_8.CCSM.icem.1870-01_cat_1999-12.nc"
#file_list="pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12.nc tauu_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12.nc usi_O1.20C3M_8.CCSM.icem.1870-01_cat_1999-12.nc"
file_list="pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12.nc"
echo " *** Getting sample AR4 files $file_list"
# Get the files.
for f1 in $file_list
do
if ! test -f $f1; then
wget ftp://ftp.unidata.ucar.edu/pub/netcdf/sample_data/ar4/$f1.gz
gunzip $f1.gz
fi
done
echo "SUCCESS!!!"
out='run_bm_ar4_pr_out.csv'
rm -rf $out
echo " *** Benchmarking pr_A1 file with various chunksizes (output to ${out})..."
# Create netCDF-4 versions of the file, with different chunksizes.
h=-h
s=0
pr_ar4_sample="pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12.nc"
file_num=0
for d in -1
do
for c0 in 4 8 16 32 64
do
for c1 in 64
do
for c2 in 128
do
if test $d = -1; then
file_out="pr_A1_${c0}_${c1}_${c2}.nc"
else
file_out="pr_A1_z${d}_${c0}_${c1}_${c2}.nc"
fi
out_files="$out_files $file_out"
# If the output file does not yet exist, create it.
if test -f $file_out; then
echo "found existing $file_out"
else
cmd="${execdir}/bm_file $h -f 3 -c 6:${d}:${s}:${c0}:${c1}:${c2} -o ${file_out} ${pr_ar4_sample}"
echo "cmd=$cmd"
# bash ./clear_cache.sh
if ! ($cmd >> $out); then
exit 1;
fi
fi
# Turn off header next time around.
h=
done
done
done
done
echo $out_files
# Do the horizonatal runs.
#bash ./clear_cache.sh
${execdir}/tst_ar4 -h $pr_ar4_sample
for f1 in $out_files
do
# bash ./clear_cache.sh
${execdir}/tst_ar4 ${f1}
done
# Do the timeseries runs.
#bash ./clear_cache.sh
${execdir}/tst_ar4 -t -h $pr_ar4_sample
for f1 in $out_files
do
# bash ./clear_cache.sh
${execdir}/tst_ar4 -t ${f1}
done
echo "SUCCESS!!!"
exit 0

View File

@ -3,11 +3,14 @@
# This shell runs some benchmarks that Elena ran as described here: # This shell runs some benchmarks that Elena ran as described here:
# http://hdfeos.org/workshops/ws06/presentations/Pourmal/HDF5_IO_Perf.pdf # http://hdfeos.org/workshops/ws06/presentations/Pourmal/HDF5_IO_Perf.pdf
# $Id: run_bm_elena.sh,v 1.2 2007/12/30 17:19:29 ed Exp $ # Ed Hartnett
set -e # Load common values for netCDF shell script tests.
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
# Run benchmarks.
echo "" echo ""
echo "*** Testing the benchmarking program bm_file for simple float file, no compression..." echo "*** Testing the benchmarking program bm_file for simple float file, no compression..."
${execdir}/bm_file -h -d -f 3 -o tst_elena_out.nc -c 0:-1:0:1024:16:256 tst_elena_int_3D.nc ${execdir}/bm_file -h -d -f 3 -o tst_elena_out.nc -c 0:-1:0:1024:16:256 tst_elena_int_3D.nc
${execdir}/bm_file -d -f 3 -o tst_elena_out.nc -c 0:-1:0:1024:256:256 tst_elena_int_3D.nc ${execdir}/bm_file -d -f 3 -o tst_elena_out.nc -c 0:-1:0:1024:256:256 tst_elena_int_3D.nc

View File

@ -1,122 +0,0 @@
#!/bin/sh
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
# This shell runs a bunch of benchmarks on some specific files
# available at Unidata. If you want to run this shell, you need these
# data files.
# This script gets and benchmarks against some 2D radar data.
# $Id: run_bm_radar_2D.sh,v 1.9 2008/01/16 13:27:01 ed Exp $
set -e
# Radar 2D file. Make sure we have a local disk copy. Not much point
# in benchmarking read and write times over NFS!
TMP=/shecky/data
d1=20070803-2300
file_num=0
for t in 1 2 4
do
file=${d1}_tile${t}-2d.nc3
in_file[$file_num]=$file
let file_num=$file_num+1
if ! test -f $TMP/$file; then
echo "getting file: $file"
cp -f /upc/share/testdata/nssl/mosaic2d_nc/tile${t}/$d1.netcdf.gz $TMP
gunzip -f $TMP/$d1.netcdf.gz
cp $d1.netcdf $TMP/$file
fi
done
num_in_files=${#in_file[@]}
# Copy the 2D rarar file into a netCDF-4 version, with various
# CHUNKING settings.
out1=radar_2d_chunking.csv
out2=radar_2d_chunking_2.csv
rm -rf $out1 $out2
# Turn on header (for the first run of bm_file).
h=-h
# Turn off compression and shuffle filters.
s=0
d=-1
# file_num=0
# for c0 in 251 1001 1501
# do
# for c1 in 251 1001 2001
# do
# # Build the command including chunk sizes for all 13 vars.
# cmd="./bm_file $h -f 4 -o $TMP/$d1-2d_${c0}x${c1}.nc4 -c 0:${d}:${s}:${c0}:${c1}"
# for ((v=1; v < 12; v++))
# do
# cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
# done
# cmd="$cmd $TMP/${in_file[${file_num}]}"
# echo "cmd=$cmd"
# if ! ($cmd); then
# exit 1;
# fi
# h=
# # Switch to the next input file of three.
# let file_num=$file_num+1
# test $file_num -eq $num_in_files && file_num=0
# done
# done
file_num=0
for c0 in 251 1001 1501
do
for c1 in 251 1001 2001
do
for try in 0 1 2 3 4 5 6 7 8 9
do
# Confuse the disk buffering by copying the file each time, so
# always reading a new file.
# cp $TMP/${in_file[${file_num}]} $TMP/cp_${in_file[${file_num}]}
# Build the command including chunk sizes for all 13 vars.
cmd="./bm_file $h -f 4 -c 0:${d}:${s}:${c0}:${c1}"
for ((v=1; v < 12; v++))
do
cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
done
cmd="$cmd $TMP/${in_file[${file_num}]}"
echo "cmd=$cmd"
sudo bash ./clear_cache.sh
if ! ($cmd >> $out1); then
exit 1;
fi
cmd="./bm_file $h -f 3 -c 0:${d}:${s}:${c0}:${c1}"
for ((v=1; v < 12; v++))
do
cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
done
cmd="$cmd $TMP/$d1-2d_${c0}x${c1}.nc4"
echo "cmd=$cmd"
sudo bash ./clear_cache.sh
if ! ($cmd >> $out2); then
exit 1;
fi
# Remove the copy. Next read will a "new" file.
# rm $TMP/cp_${in_file[${file_num}]}
# Turn off header next time around.
h=
# Switch to the next input file of three.
let file_num=$file_num+1
test $file_num -eq $num_in_files && file_num=0
done
done
done
exit 0

View File

@ -1,65 +0,0 @@
#!/bin/sh
# This shell runs a bunch of benchmarks on some specific files
# available at Unidata. If you want to run this shell, you need these
# data files.
# This script gets and benchmarks against some 2D radar data.
# $Id: run_bm_radar_2D_compression1.sh,v 1.2 2007/12/30 15:39:13 ed Exp $
set -e
# Radar 2D file. Make sure we have a local disk copy. Not much point
# in benchmarking read and write times over NFS!
TMP=/shecky/data
d1=20070803-2300
file_num=0
for t in 1 2 4
do
file=${d1}_tile${t}-2d.nc3
in_file[$file_num]=$file
let file_num=$file_num+1
if ! test -f $TMP/$file; then
echo "getting file: $file"
cp -f /upc/share/testdata/nssl/mosaic2d_nc/tile${t}/$d1.netcdf.gz $TMP
gunzip -f $TMP/$d1.netcdf.gz
cp $d1.netcdf $TMP/$file
fi
done
num_in_files=${#in_file[@]}
# Copy the 2D rarar file into a netCDF-4 version, with various
# compression settings.
out1=radar_2d_compression.csv
rm -rf $out1
c0=1001
c1=500
h=-h
file_num=0
for ((s=0; s < 2 ; s++))
do
for ((d=-1; d <= 9 ; d++))
do
# Confuse the disk buffering by copying the file each time, so
# always reading a new file.
cp $TMP/${in_file[${file_num}]} $TMP/cp_${in_file[${file_num}]}
cmd="./bm_file $h -f 3 -d -o $TMP/$d1-2d.nc4 -c 0:${d}:${s}:${c0}:${c1}"
for ((v=1; v < 12; v++))
do
cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
done
cmd="$cmd $TMP/cp_${in_file[${file_num}]}"
echo "cmd=$cmd"
if ! ($cmd >> $out1); then
exit 1;
fi
rm $TMP/cp_${in_file[${file_num}]}
h=
let file_num=$file_num+1
test $file_num -eq $num_in_files && file_num=0
done
done
exit 0

View File

@ -1,80 +0,0 @@
#!/bin/sh
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
# This shell runs a bunch of benchmarks on some specific files
# available at Unidata. If you want to run this shell, you need these
# data files.
# This script gets and benchmarks against some 2D radar data.
# $Id: run_bm_radar_2D_endianness1.sh,v 1.1 2008/01/03 16:19:08 ed Exp $
set -e
# Radar 2D file. Make sure we have a local disk copy. Not much point
# in benchmarking read and write times over NFS!
TMP=/shecky/data
d1=20070803-2300
file_num=0
for t in 1 2 4
do
file=${d1}_tile${t}-2d.nc3
in_file[$file_num]=$file
let file_num=$file_num+1
if ! test -f $TMP/$file; then
echo "getting file: $file"
cp -f /upc/share/testdata/nssl/mosaic2d_nc/tile${t}/$d1.netcdf.gz $TMP
gunzip -f $TMP/$d1.netcdf.gz
cp $d1.netcdf $TMP/$file
fi
done
num_in_files=${#in_file[@]}
# Copy the 2D rarar file into a netCDF-4 version, with various
# CHUNKING settings.
out1=radar_2d_endianness.csv
rm -rf $out1
# Turn on header (for the first run of bm_file).
h=-h
# Turn off compression and shuffle filters.
s=0
d=-1
# Set good chunksizes.
c0=501
c1=1001
file_num=0
for ((end=0; end <= 2 ; end++))
do
# Confuse the disk buffering by copying the file each time, so
# always reading a new file.
cp $TMP/${in_file[${file_num}]} $TMP/cp_${in_file[${file_num}]}
# Build the command including chunk sizes for all 13 vars.
cmd="./bm_file -e $end $h -f 3 -d -o $TMP/$d1-2d.nc4 -c 0:${d}:${s}:${c0}:${c1}"
for ((v=1; v < 12; v++))
do
cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
done
cmd="$cmd $TMP/cp_${in_file[${file_num}]}"
echo "cmd=$cmd"
if ! ($cmd >> $out1); then
exit 1;
fi
# Remove the copy. Next read will a "new" file.
rm $TMP/cp_${in_file[${file_num}]}
# Turn off header next time around.
h=
# Switch to the next input file of three.
let file_num=$file_num+1
test $file_num -eq $num_in_files && file_num=0
done
exit 0

View File

@ -1,16 +1,13 @@
#!/bin/sh #!/bin/sh
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
# This shell just tests the bm_file program by running it a few times # This shell just tests the bm_file program by running it a few times
# on a simple test file. Then it uses ncdum to check that the output # on a simple test file. Then it uses ncdum to check that the output
# is what it should be. # is what it should be.
# $Id: run_bm_test1.sh,v 1.13 2008/01/04 15:57:48 ed Exp $ # Ed Hartnett
set -e if test "x$srcdir" = x ; then srcdir=`pwd`; fi
echo "" . ../test_common.sh
for type_name in floats ints shorts for type_name in floats ints shorts
do do

View File

@ -1,20 +0,0 @@
#!/bin/sh
# This shell gets files from the netCDF ftp site for testing.
set -e
echo ""
file_list="MSGCPP_CWP_NC3.nc MSGCPP_CWP_NC4.nc"
echo "Getting KNMI test files $file_list"
for f1 in $file_list
do
if ! test -f $f1; then
wget ftp://ftp.unidata.ucar.edu/pub/netcdf/sample_data/$f1.gz
gunzip $f1.gz
fi
done
echo "SUCCESS!!!"
exit 0

28
nc_test4/run_knmi_bm.sh Executable file
View File

@ -0,0 +1,28 @@
#!/bin/sh
# This shell gets some files from the netCDF ftp site for testing,
# then runs the tst_knmi benchmarking program.
# Ed Hartnett
# Load common values for netCDF shell script tests.
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
# Get files if needed.
echo ""
file_list="MSGCPP_CWP_NC3.nc MSGCPP_CWP_NC4.nc"
echo "Getting KNMI test files $file_list"
for f1 in $file_list
do
if ! test -f $f1; then
wget ftp://ftp.unidata.ucar.edu/pub/netcdf/sample_data/$f1.gz
gunzip $f1.gz
fi
done
# Run the C program on these files.
${execdir}/tst_knmi
echo "SUCCESS!!!"
exit 0

View File

@ -1,25 +0,0 @@
#!/bin/sh
# This shell file runs benchmarks on the 2D radar data on parallel platforms.
# $Id: run_par_bm_radar_2D.sh,v 1.1 2007/12/18 01:16:25 ed Exp $
set -e
echo ""
echo "Getting radar 2D data file from Unidata FTP site..."
file=20070803-2300_tile1-2d.nc3
if ! test -f $file; then
wget ftp://ftp.unidata.ucar.edu/pub/netcdf/sample_data/$file
fi
echo "*** Running bm_file for parallel access on $file..."
header="-h"
chunksizes="1501:2001"
for numproc in 1 4 16
do
mpiexec -n $numproc ./bm_file -p -d ${header} -s 16 -f 4 -o tst_r2d.nc -c 0:-1:0:1501:2001 $file
header=
done
echo '*** SUCCESS!!!'
exit 0

View File

@ -1,26 +1,24 @@
#!/bin/sh #!/bin/sh
# This shell file tests the bm_ile program for parallel I/O.
# Ed Hartnett
if test "x$srcdir" = x ; then srcdir=`pwd`; fi if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh . ../test_common.sh
# This shell file tests the bm_ile program for parallel I/O.
# $Id: run_par_bm_test.sh,v 1.5 2007/12/12 18:00:39 ed Exp $
set -e
echo "" echo ""
for type_name in floats ints shorts for type_name in floats ints shorts
do do
echo "*** Running bm_file for parallel access on simple ${type_name} test files, 1D to 6D..." echo "*** Running bm_file for parallel access on simple ${type_name} test files, 1D to 6D..."
header="-h" header="-h"
for ((i=1; i <= 3; i++)) for i in 1 2 3
do do
test $i = 1 && chunksizes="100000" test $i = 1 && chunksizes="100000"
test $i = 2 && chunksizes="316:316" test $i = 2 && chunksizes="316:316"
test $i = 3 && chunksizes="46:46:46" test $i = 3 && chunksizes="46:46:46"
for numproc in 1 4 16 for numproc in 1 4 16
do do
mpiexec -n $numproc ./bm_file -p -d ${header} -s ${numproc} -f 4 -o p_${type_name}2_${i}D_3.nc -c 0:-1:0:${chunksizes} ${type_name}2_${i}D_3.nc @MPIEXEC@ -n $numproc ./bm_file -p -d ${header} -s ${numproc} -f 4 -o p_${type_name}2_${i}D_3.nc -c 0:-1:0:${chunksizes} ${type_name}2_${i}D_3.nc
${NCDUMP} -n tst_${type_name}2_${i}D p_${type_name}2_${i}D_3.nc > p_${type_name}2_${i}D.cdl ${NCDUMP} -n tst_${type_name}2_${i}D p_${type_name}2_${i}D_3.nc > p_${type_name}2_${i}D.cdl
diff tst_${type_name}2_${i}D.cdl p_${type_name}2_${i}D.cdl &> /dev/null diff tst_${type_name}2_${i}D.cdl p_${type_name}2_${i}D.cdl &> /dev/null
header= header=

View File

@ -1,17 +1,15 @@
#!/bin/sh #!/bin/sh
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
# This shell just tests the tst_chunks3 program by running it a few # This shell just tests the tst_chunks3 program by running it a few
# times to generate a simple test file. Then it uses ncdump -s to # times to generate a simple test file. Then it uses ncdump -s to
# check that the output is what it should be. # check that the output is what it should be.
# $Id: run_tst_chunks3.sh,v 1.2 2009/02/24 01:49:12 russ Exp $ # Russ Rew
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
set -e
echo "" echo ""
echo "*** Running benchmarking program tst_chunks3 for tiny test file" echo "*** Running benchmarking program tst_chunks3 for tiny test file"
compress_level=1 compress_level=1
dim1=6 dim1=6
@ -21,8 +19,6 @@ chunk2=3
dim3=4 dim3=4
chunk3=1 chunk3=1
${execdir}/tst_chunks3 $compress_level $dim1 $chunk1 $dim2 $chunk2 $dim3 $chunk3 ${execdir}/tst_chunks3 $compress_level $dim1 $chunk1 $dim2 $chunk2 $dim3 $chunk3
${NCDUMP} -n tst_chunks -s tst_chunks3.nc > tst_chunks3.cdl
diff tst_chunks3.cdl ref_chunks1.cdl
echo '*** SUCCESS!!!' echo '*** SUCCESS!!!'
echo "" echo ""
@ -38,8 +34,6 @@ cachesize=10000000
cachehash=10000 cachehash=10000
cachepre=0.0 cachepre=0.0
${execdir}/tst_chunks3 $compress_level $dim1 $chunk1 $dim2 $chunk2 $dim3 $chunk3 $cachesize $cachehash $cachepre ${execdir}/tst_chunks3 $compress_level $dim1 $chunk1 $dim2 $chunk2 $dim3 $chunk3 $cachesize $cachehash $cachepre
${NCDUMP} -n tst_chunks -s -h tst_chunks3.nc > tst_chunks3.cdl
diff tst_chunks3.cdl ref_chunks2.cdl
echo '*** SUCCESS!!!' echo '*** SUCCESS!!!'
exit 0 exit 0

View File

@ -4,7 +4,7 @@ See COPYRIGHT file for copying and redistribution conditions.
This program tests netcdf-4 performance with some AR-4 3D data. This program tests netcdf-4 performance with some AR-4 3D data.
$Id: tst_ar4.c,v 1.4 2010/01/11 19:27:11 ed Exp $ Ed Hartnett
*/ */
#include <nc_tests.h> #include <nc_tests.h>
@ -13,6 +13,10 @@ $Id: tst_ar4.c,v 1.4 2010/01/11 19:27:11 ed Exp $
#include <sys/time.h> #include <sys/time.h>
#include <unistd.h> #include <unistd.h>
/* Prototype from tst_utils.c. */
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
struct timeval *y);
/* From the data file we are using: /* From the data file we are using:
netcdf pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12 { netcdf pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12 {
@ -239,5 +243,5 @@ main(int argc, char **argv)
else else
printf("%d\t\t%d\n", (int)read_1_us, (int)avg_read_us); printf("%d\t\t%d\n", (int)read_1_us, (int)avg_read_us);
return 0; FINAL_RESULTS;
} }

View File

@ -1,10 +1,10 @@
/* /*
Copyright 2009, UCAR/Unidata Copyright 2009-2018, UCAR/Unidata
See COPYRIGHT file for copying and redistribution conditions. See COPYRIGHT file for copying and redistribution conditions.
This program tests netcdf-4 performance with some AR-4 3D data. This program tests netcdf-4 performance with some AR-4 3D data.
$Id: tst_ar4_3d.c,v 1.1 2010/01/11 19:28:28 ed Exp $ Ed Hartnett
*/ */
#include <nc_tests.h> #include <nc_tests.h>
@ -22,6 +22,10 @@ $Id: tst_ar4_3d.c,v 1.1 2010/01/11 19:28:28 ed Exp $
#define SIXTY_FOUR_MEG (SIXTEEN_MEG * 4) #define SIXTY_FOUR_MEG (SIXTEEN_MEG * 4)
#define ONE_TWENTY_EIGHT_MEG (SIXTEEN_MEG * 8) #define ONE_TWENTY_EIGHT_MEG (SIXTEEN_MEG * 8)
/* Prototype from tst_utils.c. */
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
struct timeval *y);
/* From the data file we are using: /* From the data file we are using:
netcdf pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12 { netcdf pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12 {
@ -247,5 +251,5 @@ main(int argc, char **argv)
else else
printf("%d\t\t%d\n", (int)read_1_us, (int)avg_read_us); printf("%d\t\t%d\n", (int)read_1_us, (int)avg_read_us);
return 0; FINAL_RESULTS;
} }

View File

@ -1,10 +1,9 @@
/* /* Copyright 2009-2018, UCAR/Unidata
Copyright 2009, UCAR/Unidata
See COPYRIGHT file for copying and redistribution conditions. See COPYRIGHT file for copying and redistribution conditions.
This program tests netcdf-4 performance with some AR-4 3D data. This program tests netcdf-4 performance with some AR-4 4D data.
$Id: tst_ar4_4d.c,v 1.2 2010/01/14 20:25:55 ed Exp $ Ed Hartnett
*/ */
#include <nc_tests.h> #include <nc_tests.h>
@ -22,6 +21,10 @@ $Id: tst_ar4_4d.c,v 1.2 2010/01/14 20:25:55 ed Exp $
#define SIXTY_FOUR_MEG (SIXTEEN_MEG * 4) #define SIXTY_FOUR_MEG (SIXTEEN_MEG * 4)
#define ONE_TWENTY_EIGHT_MEG (SIXTEEN_MEG * 8) #define ONE_TWENTY_EIGHT_MEG (SIXTEEN_MEG * 8)
/* Prototype from tst_utils.c. */
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
struct timeval *y);
/* From the data file we are using: /* From the data file we are using:
../ncdump/ncdump -h -s thetao_O1.SRESA1B_2.CCSM.ocnm.2000-01_cat_2099-12.nc ../ncdump/ncdump -h -s thetao_O1.SRESA1B_2.CCSM.ocnm.2000-01_cat_2099-12.nc
@ -409,5 +412,5 @@ main(int argc, char **argv)
else else
printf("%d\t\t%d\n", (int)read_1_us, (int)avg_read_us); printf("%d\t\t%d\n", (int)read_1_us, (int)avg_read_us);
return 0; FINAL_RESULTS;
} }

View File

@ -1,99 +0,0 @@
/* This is part of the netCDF package.
Copyright 2005 University Corporation for Atmospheric Research/Unidata
See COPYRIGHT file for conditions of use.
This program does some benchmarking of netCDF files for the AR-5
data.
*/
#include <nc_tests.h>
#include "err_macros.h"
#include "netcdf.h"
#include <unistd.h>
#include <time.h>
#include <sys/time.h> /* Extra high precision time info. */
#include <../ncdump/nciter.h>
#define MILLION 1000000
#define MAX_LEN 30
#define TMP_FILE_NAME "tst_files2_tmp.out"
/* This function uses the ps command to find the amount of memory in
use by the process. From the ps man page:
size SZ approximate amount of swap space that would be required if
the process were to dirty all writable pages and then be
swapped out. This number is very rough!
*/
void
get_mem_used1(int *mem_used)
{
char cmd[NC_MAX_NAME + 1];
char blob[MAX_LEN + 1] = "";
FILE *fp;
int num_char;
/* Run the ps command for this process, putting output (one number)
* into file TMP_FILE_NAME. */
sprintf(cmd, "ps -o size= %d > %s", getpid(), TMP_FILE_NAME);
system(cmd);
/* Read the results and delete temp file. */
if (!(fp = fopen(TMP_FILE_NAME, "r"))) exit;
num_char = fread(blob, MAX_LEN, 1, fp);
sscanf(blob, "%d", mem_used);
fclose(fp);
unlink(TMP_FILE_NAME);
}
int
main(int argc, char **argv)
{
#define BUFSIZE 1000000 /* access data in megabyte sized pieces */
#define THETAU_FILE "/machine/downloads/AR5_sample_data/thetao_O1.SRESA1B_2.CCSM.ocnm.2000-01_cat_2099-12.nc"
#define NDIMS_DATA 4
printf("\n*** Running some AR-5 benchmarks.\n");
printf("*** testing various chunksizes for thetau file...\n");
{
int ncid, ncid_out;
/*char var_buf[BUFSIZE];*/ /* buffer for variable data */
/* nciter_t iter; */ /* opaque structure for iteration status */
/* size_t start[NDIMS_DATA];
size_t count[NDIMS_DATA];*/
/*float *data = (float *)var_buf; */
char file_out[NC_MAX_NAME + 1];
/*int ndims, nvars, natts, unlimdimid;*/
size_t cs[NDIMS_DATA] = {120, 4, 40, 32};
/* /\* Open input. *\/ */
/* if (nc_open(THETAU_FILE, NC_NOWRITE, &ncid)) ERR; */
/* /\* Create output file. *\/ */
/* sprintf(file_out, "thetau_%d_%d_%d_%d.nc", (int)cs[0], */
/* (int)cs[1], (int)cs[2], (int)cs[3]); */
/* if (nc_create(file_out, NC_NOWRITE, &ncid_out)) ERR; */
/* /\* Copy the easy ones. *\/ */
/* /\* if (nc_inq(ncid, &ndims, &nvars, &natts, &unlimdimid)) ERR; */
/* if (ndims != 5 || nvars != 9 || natts != 8 || unlimdimid != 0) ERR;*\/ */
/* /\* /\\* Copy the main data payload with Russ's new nciters. *\\/ *\/ */
/* /\* varid = 8; *\/ */
/* /\* if (nc_get_iter(ncid, varid, BUFSIZE, &iter)) ERR; *\/ */
/* /\* while((nvals = nc_next_iter(&iter, start, count)) > 0) *\/ */
/* /\* { *\/ */
/* /\* /\\* read in a block of data *\\/ *\/ */
/* /\* if (nc_get_vara_double(ncid, varid, start, count, data)) ERR; *\/ */
/* /\* /\\* now write the changed data back out *\\/ *\/ */
/* /\* if (nc_out_vara_double(ncid, varid, start, count, data)) ERR; *\/ */
/* /\* } *\/ */
/* /\* if (nvals < 0) ERR; *\/ */
/* if (nc_close(ncid)) ERR; */
/* if (nc_close(ncid_out)) ERR; */
}
SUMMARIZE_ERR;
FINAL_RESULTS;
}

View File

@ -1,3 +1,12 @@
/* This is part of the netCDF package. Copyright 2005-2018 University
Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
conditions of use.
Runs benchmarks on different chunking sizes.
Russ Rew, Ed Hartnett, Dennis Heimbigner
*/
#include <config.h> #include <config.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
@ -298,7 +307,7 @@ main(int argc, char *argv[]) {
count[1] = dims[1]; count[1] = dims[1];
count[2] = dims[2]; count[2] = dims[2];
sprintf(time_mess," contiguous write %3ld %3ld %3ld", sprintf(time_mess," contiguous write %3d %3ld %3ld",
1, dims[1], dims[2]); 1, dims[1], dims[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[0]; i++) { for(i = 0; i < dims[0]; i++) {
@ -310,7 +319,7 @@ main(int argc, char *argv[]) {
printf("\n"); printf("\n");
contig_time = TMsec; contig_time = TMsec;
sprintf(time_mess," chunked write %3ld %3ld %3ld %3ld %3ld %3ld", sprintf(time_mess," chunked write %3d %3ld %3ld %3ld %3ld %3ld",
1, dims[1], dims[2], chunks[0], chunks[1], chunks[2]); 1, dims[1], dims[2], chunks[0], chunks[1], chunks[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[0]; i++) { for(i = 0; i < dims[0]; i++) {
@ -326,7 +335,7 @@ main(int argc, char *argv[]) {
else else
printf(" %5.2g x slower\n", 1.0/ratio); printf(" %5.2g x slower\n", 1.0/ratio);
sprintf(time_mess," compressed write %3ld %3ld %3ld %3ld %3ld %3ld", sprintf(time_mess," compressed write %3d %3ld %3ld %3ld %3ld %3ld",
1, dims[1], dims[2], chunks[0], chunks[1], chunks[2]); 1, dims[1], dims[2], chunks[0], chunks[1], chunks[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[0]; i++) { for(i = 0; i < dims[0]; i++) {
@ -351,7 +360,7 @@ main(int argc, char *argv[]) {
count[1] = 1; count[1] = 1;
count[2] = dims[2]; count[2] = dims[2];
sprintf(time_mess," contiguous write %3ld %3ld %3ld", sprintf(time_mess," contiguous write %3ld %3d %3ld",
dims[0], 1, dims[2]); dims[0], 1, dims[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[1]; i++) { for(i = 0; i < dims[1]; i++) {
@ -363,7 +372,7 @@ main(int argc, char *argv[]) {
printf("\n"); printf("\n");
contig_time = TMsec; contig_time = TMsec;
sprintf(time_mess," chunked write %3ld %3ld %3ld %3ld %3ld %3ld", sprintf(time_mess," chunked write %3ld %3d %3ld %3ld %3ld %3ld",
dims[0], 1, dims[2], chunks[0], chunks[1], chunks[2]); dims[0], 1, dims[2], chunks[0], chunks[1], chunks[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[1]; i++) { for(i = 0; i < dims[1]; i++) {
@ -379,7 +388,7 @@ main(int argc, char *argv[]) {
else else
printf(" %5.2g x slower\n", 1.0/ratio); printf(" %5.2g x slower\n", 1.0/ratio);
sprintf(time_mess," compressed write %3ld %3ld %3ld %3ld %3ld %3ld", sprintf(time_mess," compressed write %3ld %3d %3ld %3ld %3ld %3ld",
dims[0], 1, dims[2], chunks[0], chunks[1], chunks[2]); dims[0], 1, dims[2], chunks[0], chunks[1], chunks[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[1]; i++) { for(i = 0; i < dims[1]; i++) {
@ -404,7 +413,7 @@ main(int argc, char *argv[]) {
count[1] = dims[1]; count[1] = dims[1];
count[2] = 1; count[2] = 1;
sprintf(time_mess," contiguous write %3ld %3ld %3ld", sprintf(time_mess," contiguous write %3ld %3ld %3d",
dims[0], dims[1], 1); dims[0], dims[1], 1);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[2]; i++) { for(i = 0; i < dims[2]; i++) {
@ -416,7 +425,7 @@ main(int argc, char *argv[]) {
printf("\n"); printf("\n");
contig_time = TMsec; contig_time = TMsec;
sprintf(time_mess," chunked write %3ld %3ld %3ld %3ld %3ld %3ld", sprintf(time_mess," chunked write %3ld %3ld %3d %3ld %3ld %3ld",
dims[0], dims[1], 1, chunks[0], chunks[1], chunks[2]); dims[0], dims[1], 1, chunks[0], chunks[1], chunks[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[2]; i++) { for(i = 0; i < dims[2]; i++) {
@ -432,7 +441,7 @@ main(int argc, char *argv[]) {
else else
printf(" %5.2g x slower\n", 1.0/ratio); printf(" %5.2g x slower\n", 1.0/ratio);
sprintf(time_mess," compressed write %3ld %3ld %3ld %3ld %3ld %3ld", sprintf(time_mess," compressed write %3ld %3ld %3d %3ld %3ld %3ld",
dims[0], dims[1], 1, chunks[0], chunks[1], chunks[2]); dims[0], dims[1], 1, chunks[0], chunks[1], chunks[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[2]; i++) { for(i = 0; i < dims[2]; i++) {
@ -457,7 +466,7 @@ main(int argc, char *argv[]) {
count[1] = dims[1]; count[1] = dims[1];
count[2] = dims[2]; count[2] = dims[2];
sprintf(time_mess," contiguous read %3ld %3ld %3ld", sprintf(time_mess," contiguous read %3d %3ld %3ld",
1, dims[1], dims[2]); 1, dims[1], dims[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[0]; i++) { for(i = 0; i < dims[0]; i++) {
@ -469,7 +478,7 @@ main(int argc, char *argv[]) {
printf("\n"); printf("\n");
contig_time = TMsec; contig_time = TMsec;
sprintf(time_mess," chunked read %3ld %3ld %3ld %3ld %3ld %3ld", sprintf(time_mess," chunked read %3d %3ld %3ld %3ld %3ld %3ld",
1, dims[1], dims[2] , chunks[0], chunks[1], chunks[2]); 1, dims[1], dims[2] , chunks[0], chunks[1], chunks[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[0]; i++) { for(i = 0; i < dims[0]; i++) {
@ -485,7 +494,7 @@ main(int argc, char *argv[]) {
else else
printf(" %5.2g x slower\n", 1.0/ratio); printf(" %5.2g x slower\n", 1.0/ratio);
sprintf(time_mess," compressed read %3ld %3ld %3ld %3ld %3ld %3ld", sprintf(time_mess," compressed read %3d %3ld %3ld %3ld %3ld %3ld",
1, dims[1], dims[2] , chunks[0], chunks[1], chunks[2]); 1, dims[1], dims[2] , chunks[0], chunks[1], chunks[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[0]; i++) { for(i = 0; i < dims[0]; i++) {
@ -510,7 +519,7 @@ main(int argc, char *argv[]) {
count[1] = 1; count[1] = 1;
count[2] = dims[2]; count[2] = dims[2];
sprintf(time_mess," contiguous read %3ld %3ld %3ld", sprintf(time_mess," contiguous read %3ld %3d %3ld",
dims[0], 1, dims[2]); dims[0], 1, dims[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[1]; i++) { for(i = 0; i < dims[1]; i++) {
@ -522,7 +531,7 @@ main(int argc, char *argv[]) {
printf("\n"); printf("\n");
contig_time = TMsec; contig_time = TMsec;
sprintf(time_mess," chunked read %3ld %3ld %3ld %3ld %3ld %3ld", sprintf(time_mess," chunked read %3ld %3d %3ld %3ld %3ld %3ld",
dims[0], 1, dims[2], chunks[0], chunks[1], chunks[2]); dims[0], 1, dims[2], chunks[0], chunks[1], chunks[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[1]; i++) { for(i = 0; i < dims[1]; i++) {
@ -538,7 +547,7 @@ main(int argc, char *argv[]) {
else else
printf(" %5.2g x slower\n", 1.0/ratio); printf(" %5.2g x slower\n", 1.0/ratio);
sprintf(time_mess," compressed read %3ld %3ld %3ld %3ld %3ld %3ld", sprintf(time_mess," compressed read %3ld %3d %3ld %3ld %3ld %3ld",
dims[0], 1, dims[2], chunks[0], chunks[1], chunks[2]); dims[0], 1, dims[2], chunks[0], chunks[1], chunks[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[1]; i++) { for(i = 0; i < dims[1]; i++) {
@ -563,7 +572,7 @@ main(int argc, char *argv[]) {
count[1] = dims[1]; count[1] = dims[1];
count[2] = 1; count[2] = 1;
sprintf(time_mess," contiguous read %3ld %3ld %3ld", sprintf(time_mess," contiguous read %3ld %3ld %3d",
dims[0], dims[1], 1); dims[0], dims[1], 1);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[2]; i++) { for(i = 0; i < dims[2]; i++) {
@ -575,7 +584,7 @@ main(int argc, char *argv[]) {
printf("\n"); printf("\n");
contig_time = TMsec; contig_time = TMsec;
sprintf(time_mess," chunked read %3ld %3ld %3ld %3ld %3ld %3ld", sprintf(time_mess," chunked read %3ld %3ld %3d %3ld %3ld %3ld",
dims[0], dims[1], 1, chunks[0], chunks[1], chunks[2]); dims[0], dims[1], 1, chunks[0], chunks[1], chunks[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[2]; i++) { for(i = 0; i < dims[2]; i++) {
@ -591,7 +600,7 @@ main(int argc, char *argv[]) {
else else
printf(" %5.2g x slower\n", 1.0/ratio); printf(" %5.2g x slower\n", 1.0/ratio);
sprintf(time_mess," compressed read %3ld %3ld %3ld %3ld %3ld %3ld", sprintf(time_mess," compressed read %3ld %3ld %3d %3ld %3ld %3ld",
dims[0], dims[1], 1, chunks[0], chunks[1], chunks[2]); dims[0], dims[1], 1, chunks[0], chunks[1], chunks[2]);
TIMING_START ; TIMING_START ;
for(i = 0; i < dims[2]; i++) { for(i = 0; i < dims[2]; i++) {

View File

@ -1,10 +1,10 @@
/* /*
Copyright 2007, UCAR/Unidata Copyright 2007-2018, UCAR/Unidata
See COPYRIGHT file for copying and redistribution conditions. See COPYRIGHT file for copying and redistribution conditions.
This program creates a test file. This program creates a test file.
$Id: tst_create_files.c,v 1.11 2008/01/09 16:30:23 ed Exp $ Ed Hartnett
*/ */
#include <config.h> #include <config.h>
#include <nc_tests.h> #include <nc_tests.h>
@ -21,9 +21,6 @@
int int
main(int argc, char **argv) main(int argc, char **argv)
{ {
int nc_argc = argc;
int nc_argv = argv;
printf("\n*** Create some files for testing benchmarks.\n"); printf("\n*** Create some files for testing benchmarks.\n");
#ifdef LARGE_FILE_TESTS #ifdef LARGE_FILE_TESTS

View File

@ -1,14 +1,15 @@
/* This is part of the netCDF package. /* This is part of the netCDF package. Copyright 2005-2018 University
Copyright 2005 University Corporation for Atmospheric Research/Unidata Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
See COPYRIGHT file for conditions of use. conditions of use.
Test netcdf-4 variables. This is a benchmark test which times how long it takes to create
$Id: tst_files2.c,v 1.11 2010/01/31 19:00:44 ed Exp $ some files.
Ed Hartnett
*/ */
#include <nc_tests.h> #include <nc_tests.h>
#include "err_macros.h" #include "err_macros.h"
#include "netcdf.h"
#include <unistd.h> #include <unistd.h>
#include <time.h> #include <time.h>
#include <sys/time.h> /* Extra high precision time info. */ #include <sys/time.h> /* Extra high precision time info. */
@ -20,6 +21,10 @@
void *last_sbrk; void *last_sbrk;
/* Prototype from tst_utils.c. */
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
struct timeval *y);
/* This function uses the ps command to find the amount of memory in /* This function uses the ps command to find the amount of memory in
use by the process. From the ps man page: use by the process. From the ps man page:
@ -33,7 +38,6 @@ get_mem_used1(int *mem_used)
char cmd[NC_MAX_NAME + 1]; char cmd[NC_MAX_NAME + 1];
char blob[MAX_LEN + 1] = ""; char blob[MAX_LEN + 1] = "";
FILE *fp; FILE *fp;
int num_char;
/* Run the ps command for this process, putting output (one number) /* Run the ps command for this process, putting output (one number)
* into file TMP_FILE_NAME. */ * into file TMP_FILE_NAME. */
@ -42,7 +46,7 @@ get_mem_used1(int *mem_used)
/* Read the results and delete temp file. */ /* Read the results and delete temp file. */
if (!(fp = fopen(TMP_FILE_NAME, "r"))) ERR; if (!(fp = fopen(TMP_FILE_NAME, "r"))) ERR;
num_char = fread(blob, MAX_LEN, 1, fp); fread(blob, MAX_LEN, 1, fp);
sscanf(blob, "%d", mem_used); sscanf(blob, "%d", mem_used);
fclose(fp); fclose(fp);
unlink(TMP_FILE_NAME); unlink(TMP_FILE_NAME);
@ -95,16 +99,12 @@ create_sample_file(char *file_name, int ndims, int *dim_len,
float *data_out; float *data_out;
size_t start[MAX_DIMS], count[MAX_DIMS]; size_t start[MAX_DIMS], count[MAX_DIMS];
int slab_nelems; int slab_nelems;
int i, d, ret; int i, d;
if (ndims != MAX_DIMS && ndims != MAX_DIMS - 1) ERR_RET; if (ndims != MAX_DIMS && ndims != MAX_DIMS - 1) ERR_RET;
/* Create a file. */ /* Create a file. */
ret = nc_create(file_name, NC_NOCLOBBER|mode, &ncid); if (nc_create(file_name, NC_CLOBBER|mode, &ncid)) ERR_RET;
if (ret == NC_EEXIST)
return NC_NOERR;
else if (ret)
ERR_RET;
/* Initialize sample data. Slab of data will be full extent of last /* Initialize sample data. Slab of data will be full extent of last
* two dimensions. */ * two dimensions. */
@ -181,8 +181,8 @@ main(int argc, char **argv)
#define NUM_TRIES 6 #define NUM_TRIES 6
int *ncid_in; int *ncid_in;
int mem_used, mem_used2; int mem_used, mem_used2;
int mem_per_file; /* int mem_per_file; */
int num_files[NUM_TRIES] = {1, 1, 1, 1, 1, 1}; int num_files[NUM_TRIES] = {1, 5, 10, 20, 35, 50};
char file_name[NUM_TRIES][NC_MAX_NAME + 1]; char file_name[NUM_TRIES][NC_MAX_NAME + 1];
int num_vars[NUM_TRIES]; int num_vars[NUM_TRIES];
size_t cache_size[NUM_TRIES]; size_t cache_size[NUM_TRIES];
@ -190,10 +190,10 @@ main(int argc, char **argv)
char mode_name[NUM_TRIES][8]; char mode_name[NUM_TRIES][8];
int ndims[NUM_TRIES]; int ndims[NUM_TRIES];
int dim_len[NUM_TRIES][MAX_DIMS]; int dim_len[NUM_TRIES][MAX_DIMS];
int dim_4d[MAX_DIMS] = {NC_UNLIMITED, 10, 1000, 1000}; int dim_4d[MAX_DIMS] = {NC_UNLIMITED, 10, 100, 100};
char dimstr[30]; char dimstr[30];
char chunkstr[30]; char chunkstr[30];
int num_recs[NUM_TRIES] = {1, 1, 1}; int num_recs[NUM_TRIES] = {1, 1, 1, 1, 1, 1};
struct timeval start_time, end_time, diff_time; struct timeval start_time, end_time, diff_time;
struct timeval close_start_time, close_end_time, close_diff_time; struct timeval close_start_time, close_end_time, close_diff_time;
int open_us, close_us, create_us; int open_us, close_us, create_us;
@ -202,11 +202,9 @@ main(int argc, char **argv)
int d, f, t; int d, f, t;
printf("dims\t\tchunks\t\tformat\tnum_files\tcache(kb)\tnum_vars\tmem(kb)\t" printf("dims\t\tchunks\t\tformat\tnum_files\tcache(kb)\tnum_vars\tmem(kb)\t"
"open_time(us)\tclose_time(us)\tcreate_time(us)\n"); "open_time(us/file)\tclose_time(us/file)\tcreate_time(us/file)\n");
for (t = 0; t < NUM_TRIES; t++) for (t = 0; t < NUM_TRIES; t++)
{ {
/* Set up filename. */
sprintf(file_name[t], "tst_files2_%d.nc", t);
strcpy(mode_name[t], "netcdf4"); strcpy(mode_name[t], "netcdf4");
mode[t] = NC_NETCDF4; mode[t] = NC_NETCDF4;
cache_size[t] = 16000000; cache_size[t] = 16000000;
@ -215,26 +213,29 @@ main(int argc, char **argv)
for (d = 0; d < ndims[t]; d++) for (d = 0; d < ndims[t]; d++)
dim_len[t][d] = dim_4d[d]; dim_len[t][d] = dim_4d[d];
/* Create sample file (unless it already exists). */ /* Create sample files. */
if (gettimeofday(&start_time, NULL)) ERR; if (gettimeofday(&start_time, NULL)) ERR;
for (f = 0; f < num_files[t]; f++)
{
/* Set up filename. */
sprintf(file_name[t], "tst_files2_%d_%d.nc", t, f);
if (create_sample_file(file_name[t], ndims[t], dim_len[t], num_vars[t], if (create_sample_file(file_name[t], ndims[t], dim_len[t], num_vars[t],
mode[t], num_recs[t])) ERR; mode[t], num_recs[t])) ERR;
/* How long did it take? */ /* How long did it take? */
if (gettimeofday(&end_time, NULL)) ERR; if (gettimeofday(&end_time, NULL)) ERR;
if (nc4_timeval_subtract(&diff_time, &end_time, &start_time)) ERR; if (nc4_timeval_subtract(&diff_time, &end_time, &start_time)) ERR;
create_us = ((int)diff_time.tv_sec * MILLION + (int)diff_time.tv_usec); create_us = ((int)diff_time.tv_sec * MILLION + (int)diff_time.tv_usec) / num_files[t];
}
/* Change the cache settings. */ /* /\* Change the cache settings. *\/ */
if (nc_set_chunk_cache(cache_size[t], 20000, .75)) ERR; /* if (nc_set_chunk_cache(cache_size[t], 20000, .75)) ERR; */
/* We need storage for an array of ncids. */ /* We need storage for an array of ncids. */
if (!(ncid_in = malloc(num_files[t] * sizeof(int)))) ERR; if (!(ncid_in = malloc(num_files[t] * sizeof(int)))) ERR;
/* How much memory is in use now? */ /* How much memory is in use now? */
if (get_mem_used1(&mem_used)) ERR; if (get_mem_used1(&mem_used)) ERR;
/* get_mem_used2(&mem_used);
get_mem_used3(&mem_used);*/
/* Open the first file to get chunksizes. */ /* Open the first file to get chunksizes. */
if (gettimeofday(&start_time, NULL)) ERR; if (gettimeofday(&start_time, NULL)) ERR;
@ -248,7 +249,7 @@ main(int argc, char **argv)
/* How long did it take per file? */ /* How long did it take per file? */
if (gettimeofday(&end_time, NULL)) ERR; if (gettimeofday(&end_time, NULL)) ERR;
if (nc4_timeval_subtract(&diff_time, &end_time, &start_time)) ERR; if (nc4_timeval_subtract(&diff_time, &end_time, &start_time)) ERR;
open_us = ((int)diff_time.tv_sec * MILLION + (int)diff_time.tv_usec); open_us = ((int)diff_time.tv_sec * MILLION + (int)diff_time.tv_usec) / num_files[t];
/* How much memory is in use by this process now? */ /* How much memory is in use by this process now? */
if (get_mem_used1(&mem_used2)) ERR; if (get_mem_used1(&mem_used2)) ERR;
@ -261,13 +262,14 @@ main(int argc, char **argv)
/* How long did it take to close all files? */ /* How long did it take to close all files? */
if (gettimeofday(&close_end_time, NULL)) ERR; if (gettimeofday(&close_end_time, NULL)) ERR;
if (nc4_timeval_subtract(&close_diff_time, &close_end_time, &close_start_time)) ERR; if (nc4_timeval_subtract(&close_diff_time, &close_end_time, &close_start_time)) ERR;
close_us = ((int)close_diff_time.tv_sec * MILLION + (int)close_diff_time.tv_usec); close_us = ((int)close_diff_time.tv_sec * MILLION +
(int)close_diff_time.tv_usec) / num_files[t];
/* We're done with this. */ /* We're done with this. */
free(ncid_in); free(ncid_in);
/* How much memory was used for each open file? */ /* How much memory was used for each open file? */
mem_per_file = mem_used2/num_files[t]; /* mem_per_file = mem_used2/num_files[t]; */
/* Prepare the dimensions string. */ /* Prepare the dimensions string. */
if (ndims[t] == MAX_DIMS) if (ndims[t] == MAX_DIMS)

View File

@ -1,21 +1,19 @@
/* This is part of the netCDF package. /* This is part of the netCDF package. Copyright 2005-2018 University
Copyright 2005 University Corporation for Atmospheric Research/Unidata Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
See COPYRIGHT file for conditions of use. conditions of use.
Test internal netcdf-4 file code. This is a benchmark program which tests file writes with compressed
$Id: tst_files3.c,v 1.5 2010/02/02 17:19:28 ed Exp $ data.
Ed Hartnett
*/ */
#include <config.h>
#include <stdio.h>
#include <nc_tests.h> #include <nc_tests.h>
#include "err_macros.h" #include "err_macros.h"
#include "netcdf.h"
#include <hdf5.h> #include <hdf5.h>
#include <unistd.h> #include <unistd.h>
#include <time.h> #include <time.h>
#include <sys/time.h> /* Extra high precision time info. */ #include <sys/time.h> /* Extra high precision time info. */
#include <string.h>
#define NDIMS1 1 #define NDIMS1 1
#define NDIMS 3 #define NDIMS 3
@ -25,14 +23,15 @@
#define Z_LEN 128 #define Z_LEN 128
#define NUM_TRIES 200 #define NUM_TRIES 200
/* Prototype from tst_utils.c. */
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
struct timeval *y);
int dump_file2(const float *data, int docompression, int usedefdim) int dump_file2(const float *data, int docompression, int usedefdim)
{ {
int ncmode, ncid, dimids[NDIMS], var; int ncid, dimids[NDIMS], var;
size_t start[NDIMS] = {0, 0, 0}; size_t start[NDIMS] = {0, 0, 0};
size_t count[NDIMS] = {1, 1, Z_LEN}; size_t count[NDIMS] = {1, 1, Z_LEN};
/* size_t count[NDIMS] = {X_LEN, Y_LEN, Z_LEN};*/
ncmode = NC_CLOBBER|NC_NETCDF4;
if (nc_create(FILE_NAME, NC_NETCDF4, &ncid)) ERR_RET; if (nc_create(FILE_NAME, NC_NETCDF4, &ncid)) ERR_RET;
if (nc_def_dim(ncid, "time", X_LEN, &dimids[0])) ERR_RET; if (nc_def_dim(ncid, "time", X_LEN, &dimids[0])) ERR_RET;
@ -96,9 +95,8 @@ int dump_file3(const float *data, int docompression, int usedefdim)
int dump_hdf_file(const float *data, int docompression) int dump_hdf_file(const float *data, int docompression)
{ {
hid_t file_id, dataset_id, dataspace_id, propid; hid_t file_id, dataset_id, propid;
hid_t file_spaceid, mem_spaceid, access_plistid, xfer_plistid; hid_t file_spaceid, mem_spaceid, access_plistid, xfer_plistid;
herr_t status;
hsize_t dims[NDIMS] = {X_LEN, Y_LEN, Z_LEN}; hsize_t dims[NDIMS] = {X_LEN, Y_LEN, Z_LEN};
hsize_t start[NDIMS] = {0, 0, 0}; hsize_t start[NDIMS] = {0, 0, 0};
hsize_t count[NDIMS] = {1, 1, Z_LEN}; hsize_t count[NDIMS] = {1, 1, Z_LEN};

View File

@ -6,7 +6,9 @@
but they use HDF5 the same way that netCDF-4 does, so if these but they use HDF5 the same way that netCDF-4 does, so if these
tests don't work, than netCDF-4 won't work either. tests don't work, than netCDF-4 won't work either.
Ed Hartnett
*/ */
#include <config.h> #include <config.h>
#include <nc_tests.h> #include <nc_tests.h>
#include "err_macros.h" #include "err_macros.h"
@ -19,6 +21,10 @@
#define FILE_NAME "tst_h_many_atts.h5" #define FILE_NAME "tst_h_many_atts.h5"
#define GRP_NAME "group1" #define GRP_NAME "group1"
/* Prototype from tst_utils.c. */
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
struct timeval *y);
int int
main() main()
{ {

View File

@ -1,9 +1,12 @@
/** \file /* This is part of the netCDF package. Copyright 2005-2018 University
Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
conditions of use.
Performance test from KNMI. This is a benchmarking program that depends on some KNMI files from
the Unidata ftp site. The files are opened and read, and
performance is timed.
Copyright 2009, UCAR/Unidata. See \ref copyright file for copying and Ed Hartnett
redistribution conditions.
*/ */
#include <nc_tests.h> #include <nc_tests.h>
@ -28,6 +31,10 @@ redistribution conditions.
#define TIME_LEN 1560 #define TIME_LEN 1560
#define NUM_TS 1 #define NUM_TS 1
/* Prototype from tst_utils.c. */
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
struct timeval *y);
extern const char* nc_strerror(int ncerr); extern const char* nc_strerror(int ncerr);
static int static int
complain(int stat) complain(int stat)
@ -77,24 +84,9 @@ read_file(char *filename)
int int
main(int argc, char **argv) main(int argc, char **argv)
{ {
int c, header = 0, verbose = 0, timeseries = 0;
int ncid, varid, storage;
char name_in[NC_MAX_NAME + 1];
size_t len;
size_t cs[NDIMS3] = {0, 0, 0};
int cache = MEGABYTE;
int ndims, dimid[NDIMS3];
float hor_data[LAT_LEN * LON_LEN];
int read_1_us, avg_read_us;
float ts_data[TIME_LEN];
size_t start[NDIMS3], count[NDIMS3];
int deflate, shuffle, deflate_level;
struct timeval start_time, end_time, diff_time;
printf("\n*** Testing netcdf-4 vs. netcdf-3 performance.\n"); printf("\n*** Testing netcdf-4 vs. netcdf-3 performance.\n");
if (complain(read_file(FILE_NAME_1))) ERR; if (complain(read_file(FILE_NAME_1))) ERR;
if (complain(read_file(FILE_NAME_2))) ERR; if (complain(read_file(FILE_NAME_2))) ERR;
SUMMARIZE_ERR; SUMMARIZE_ERR;
FINAL_RESULTS; FINAL_RESULTS;
} }