mirror of
https://github.com/Unidata/netcdf-c.git
synced 2025-03-07 17:08:02 +08:00
Merge branch 'pnetcdf_driver' of github.com:wkliao/netcdf-c into pnetcdf_driver
This commit is contained in:
commit
0853b3c9ee
@ -1535,6 +1535,7 @@ AC_CONFIG_FILES(dap4_test/findtestserver4.c:ncdap_test/findtestserver.c.in)
|
||||
|
||||
AC_MSG_NOTICE([generating header files and makefiles])
|
||||
AC_CONFIG_FILES([nc_test4/run_par_test.sh], [chmod ugo+x nc_test4/run_par_test.sh])
|
||||
AC_CONFIG_FILES([nc_test4/run_par_bm_test.sh], [chmod ugo+x nc_test4/run_par_bm_test.sh])
|
||||
AC_CONFIG_FILES([nc-config], [chmod 755 nc-config])
|
||||
AC_CONFIG_FILES([Makefile
|
||||
netcdf.pc
|
||||
|
@ -80,4 +80,15 @@ int ERR_report(int stat, const char* file, int line)
|
||||
return 0; \
|
||||
} while (0)
|
||||
|
||||
/* This macro does the same as FINAL_RESULTS, but without the success
|
||||
* message. */
|
||||
#define FINAL_RESULTS_QUIET do { \
|
||||
if (total_err) \
|
||||
{ \
|
||||
printf("%d errors detected! Sorry!\n", total_err); \
|
||||
return 2; \
|
||||
} \
|
||||
return 0; \
|
||||
} while (0)
|
||||
|
||||
#endif /* _ERR_MACROS_H */
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include "netcdf_par.h"
|
||||
#endif
|
||||
#include "netcdf.h"
|
||||
//#include "err_macros.h"
|
||||
|
||||
/** NC_MAX_DIMS for tests. Allows different NC_MAX_DIMS values
|
||||
* without breaking this test with a heap or stack overflow. */
|
||||
|
@ -61,9 +61,10 @@ nc_put_att_string(int ncid, int varid, const char *name,
|
||||
\ingroup attributes
|
||||
Write a text attribute.
|
||||
|
||||
Add or change a text attribute. If this attribute is new,
|
||||
or if the space required to store the attribute is greater than
|
||||
before, the netCDF dataset must be in define mode.
|
||||
Add or change a text attribute. If this attribute is new, or if the
|
||||
space required to store the attribute is greater than before, the
|
||||
netCDF dataset must be in define mode for classic formats (or
|
||||
netCDF-4/HDF5 with NC_CLASSIC_MODEL).
|
||||
|
||||
Although it's possible to create attributes of all types, text and
|
||||
double attributes are adequate for most purposes.
|
||||
@ -153,7 +154,8 @@ Write an attribute.
|
||||
The function nc_put_att_ type adds or changes a variable attribute or
|
||||
global attribute of an open netCDF dataset. If this attribute is new,
|
||||
or if the space required to store the attribute is greater than
|
||||
before, the netCDF dataset must be in define mode.
|
||||
before, the netCDF dataset must be in define mode for classic formats
|
||||
(or netCDF-4/HDF5 with NC_CLASSIC_MODEL).
|
||||
|
||||
With netCDF-4 files, nc_put_att will notice if you are writing a
|
||||
_FillValue attribute, and will tell the HDF5 layer to use the
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "config.h"
|
||||
#include "hdf5internal.h"
|
||||
|
||||
extern int nc4_vararray_add(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var);
|
||||
static void dumpopenobjects(NC_FILE_INFO_T* h5);
|
||||
|
||||
/** @internal When we have open objects at file close, should
|
||||
we log them or print to stdout. Default is to log. */
|
||||
|
@ -818,7 +818,7 @@ int
|
||||
nc_def_var_chunking_ints(int ncid, int varid, int contiguous, int *chunksizesp)
|
||||
{
|
||||
NC_VAR_INFO_T *var;
|
||||
size_t *cs = NULL;
|
||||
size_t *cs;
|
||||
int i, retval;
|
||||
|
||||
/* Get pointer to the var. */
|
||||
|
@ -25,18 +25,13 @@
|
||||
|
||||
#define NC_HDF5_MAX_NAME 1024 /**< @internal Max size of HDF5 name. */
|
||||
|
||||
#define MAXNAME 1024 /**< Max HDF5 name. */
|
||||
|
||||
/** @internal HDF5 object types. */
|
||||
static unsigned int OTYPES[5] = {H5F_OBJ_FILE, H5F_OBJ_DATASET, H5F_OBJ_GROUP,
|
||||
H5F_OBJ_DATATYPE, H5F_OBJ_ATTR};
|
||||
|
||||
/**
|
||||
* @internal Flag attributes in a linked list as dirty.
|
||||
*
|
||||
* @param attlist List of attributes, may be NULL.
|
||||
*
|
||||
* @return NC_NOERR No error.
|
||||
* @author Dennis Heimbigner
|
||||
*/
|
||||
static int
|
||||
flag_atts_dirty(NCindex *attlist) {
|
||||
@ -55,7 +50,6 @@ flag_atts_dirty(NCindex *attlist) {
|
||||
}
|
||||
|
||||
return NC_NOERR;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3218,23 +3212,25 @@ exit:
|
||||
}
|
||||
|
||||
/**
|
||||
* @internal
|
||||
* @internal Report information about an open HDF5 object. This is
|
||||
* called on any still-open objects when a HDF5 file close is
|
||||
* attempted.
|
||||
*
|
||||
* @param uselog
|
||||
* @param id HDF5 ID.
|
||||
* @param type
|
||||
* @param uselog If true, send output to LOG not stderr.
|
||||
* @param id HDF5 ID of open object.
|
||||
* @param type Type of HDF5 object, file, dataset, etc.
|
||||
*
|
||||
* @return NC_NOERR No error.
|
||||
* @author Dennis Heimbigner
|
||||
*/
|
||||
void
|
||||
reportobject(int uselog, hid_t id, unsigned int type)
|
||||
{
|
||||
char name[MAXNAME];
|
||||
char name[NC_HDF5_MAX_NAME];
|
||||
ssize_t len;
|
||||
const char* typename = NULL;
|
||||
long long printid = (long long)id;
|
||||
|
||||
len = H5Iget_name(id, name, MAXNAME);
|
||||
len = H5Iget_name(id, name, NC_HDF5_MAX_NAME);
|
||||
if(len < 0) return;
|
||||
name[len] = '\0';
|
||||
|
||||
@ -3245,7 +3241,7 @@ reportobject(int uselog, hid_t id, unsigned int type)
|
||||
case H5F_OBJ_DATATYPE: typename = "Datatype"; break;
|
||||
case H5F_OBJ_ATTR:
|
||||
typename = "Attribute";
|
||||
len = H5Aget_name(id, MAXNAME, name);
|
||||
len = H5Aget_name(id, NC_HDF5_MAX_NAME, name);
|
||||
if(len < 0) len = 0;
|
||||
name[len] = '\0';
|
||||
break;
|
||||
@ -3270,7 +3266,7 @@ reportobject(int uselog, hid_t id, unsigned int type)
|
||||
* @param ntypes Number of types.
|
||||
* @param otypes Pointer that gets number of open types.
|
||||
*
|
||||
* @return ::NC_NOERR No error.
|
||||
* @author Dennis Heimbigner
|
||||
*/
|
||||
static void
|
||||
reportopenobjectsT(int uselog, hid_t fid, int ntypes, unsigned int* otypes)
|
||||
@ -3307,11 +3303,14 @@ reportopenobjectsT(int uselog, hid_t fid, int ntypes, unsigned int* otypes)
|
||||
* @param uselog
|
||||
* @param fid HDF5 file ID.
|
||||
*
|
||||
* @return NC_NOERR No error.
|
||||
* @author Dennit Heimbigner
|
||||
*/
|
||||
void
|
||||
reportopenobjects(int uselog, hid_t fid)
|
||||
{
|
||||
unsigned int OTYPES[5] = {H5F_OBJ_FILE, H5F_OBJ_DATASET, H5F_OBJ_GROUP,
|
||||
H5F_OBJ_DATATYPE, H5F_OBJ_ATTR};
|
||||
|
||||
reportopenobjectsT(uselog, fid ,5, OTYPES);
|
||||
}
|
||||
|
||||
@ -3320,6 +3319,7 @@ reportopenobjects(int uselog, hid_t fid)
|
||||
*
|
||||
* @param h5 file object
|
||||
*
|
||||
* @author Dennis Heimbigner
|
||||
*/
|
||||
void
|
||||
showopenobjects5(NC_FILE_INFO_T* h5)
|
||||
@ -3341,6 +3341,7 @@ showopenobjects5(NC_FILE_INFO_T* h5)
|
||||
*
|
||||
* @param ncid file id
|
||||
*
|
||||
* @author Dennis Heimbigner
|
||||
*/
|
||||
void
|
||||
showopenobjects(int ncid)
|
||||
|
@ -61,10 +61,9 @@ IF(BUILD_BENCHMARKS)
|
||||
add_sh_test(nc_test4 run_bm_elena)
|
||||
add_sh_test(nc_test4 run_bm_test2)
|
||||
add_sh_test(nc_test4 run_tst_chunks)
|
||||
add_sh_test(nc_test4 run_bm_ar4)
|
||||
add_sh_test(nc_test4 run_get_knmi_files)
|
||||
add_sh_test(nc_test4 run_knmi_bm)
|
||||
|
||||
SET(NC4_TESTS ${NC4_TESTS} tst_create_files bm_file tst_chunks3 tst_ar4 tst_ar4_3d tst_ar4_4d bm_many_objs tst_h_many_atts bm_many_atts tst_files2 tst_files3 tst_ar5 tst_h_files3 tst_mem tst_knmi bm_netcdf4_recs)
|
||||
SET(NC4_TESTS ${NC4_TESTS} tst_create_files bm_file tst_chunks3 tst_ar4 tst_ar4_3d tst_ar4_4d bm_many_objs tst_h_many_atts bm_many_atts tst_files2 tst_files3 tst_h_files3 tst_mem tst_knmi bm_netcdf4_recs)
|
||||
IF(TEST_PARALLEL)
|
||||
add_sh_test(nc_test4 run_par_bm_test)
|
||||
ENDIF()
|
||||
|
@ -1,8 +1,10 @@
|
||||
# This is part of the netCDF package.
|
||||
# Copyright 2005 University Corporation for Atmospheric Research/Unidata
|
||||
# See COPYRIGHT file for conditions of use.
|
||||
# This is part of the netCDF package. Copyright 2005-2018 University
|
||||
# Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
|
||||
# conditions of use.
|
||||
#
|
||||
# This directory holds tests for netCDF-4. It is skipped if netCDF-4
|
||||
# is not enabled.
|
||||
#
|
||||
# This entire directory will be skipped if netCDF-4 is not enabled.
|
||||
# Ed Hartnett, Ward Fisher
|
||||
|
||||
# Put together AM_CPPFLAGS and AM_LDFLAGS.
|
||||
@ -14,7 +16,8 @@ include $(top_srcdir)/lib_flags.am
|
||||
|
||||
# Note which tests depend on other tests. necessary for make -j check
|
||||
TEST_EXTENSIONS = .sh
|
||||
extradir=
|
||||
extradir =
|
||||
|
||||
# Link to our assembled library.
|
||||
AM_LDFLAGS += ${top_builddir}/liblib/libnetcdf.la
|
||||
LDADD = ${top_builddir}/liblib/libnetcdf.la
|
||||
@ -64,7 +67,7 @@ endif # BUILD_V2
|
||||
if BUILD_BENCHMARKS
|
||||
check_PROGRAMS += tst_create_files bm_file tst_chunks3 tst_ar4 \
|
||||
tst_ar4_3d tst_ar4_4d bm_many_objs tst_h_many_atts bm_many_atts \
|
||||
tst_files2 tst_files3 tst_ar5 tst_mem tst_knmi bm_netcdf4_recs
|
||||
tst_files2 tst_files3 tst_mem tst_knmi bm_netcdf4_recs
|
||||
|
||||
bm_netcdf4_recs_SOURCES = bm_netcdf4_recs.c tst_utils.c
|
||||
bm_many_atts_SOURCES = bm_many_atts.c tst_utils.c
|
||||
@ -77,23 +80,21 @@ tst_h_many_atts_SOURCES = tst_h_many_atts.c tst_utils.c
|
||||
bm_file_SOURCES = bm_file.c tst_utils.c
|
||||
tst_knmi_SOURCES = tst_knmi.c tst_utils.c
|
||||
|
||||
#WARNING: test_knmi depends on run_get_knmi_files.sh,
|
||||
# so they must appear in the appropriate order.
|
||||
TESTS += tst_ar4_3d tst_create_files run_bm_test1.sh run_bm_elena.sh \
|
||||
run_bm_test2.sh run_tst_chunks.sh tst_files2 tst_files3 tst_ar5 \
|
||||
tst_mem run_get_knmi_files.sh tst_knmi
|
||||
run_bm_test2.sh run_tst_chunks.sh tst_files2 tst_files3 tst_mem \
|
||||
run_knmi_bm.sh
|
||||
|
||||
# tst_create_files creates files for other tests.
|
||||
run_bm_test1.log: tst_create_files.log
|
||||
run_bm_test2.log: tst_create_files.log
|
||||
run_bm_elena.log: tst_create_files.log
|
||||
|
||||
# This will run a parallel I/O benchmark for parallel builds.
|
||||
if TEST_PARALLEL4
|
||||
TESTS += run_par_bm_test.sh
|
||||
# This benchmark depends on tst_create_files being run.
|
||||
run_par_bm_test.log: tst_create_files.log
|
||||
endif # TEST_PARALLEL4
|
||||
|
||||
benchmarks: check
|
||||
./run_bm_radar_2D.sh
|
||||
./run_bm_radar_2D_compression1.sh
|
||||
./run_bm.sh
|
||||
./run_tst_chunks.sh
|
||||
./run_bm_ar4.sh
|
||||
endif # BUILD_BENCHMARKS
|
||||
|
||||
# Szip Tests (requires ncdump)
|
||||
@ -126,13 +127,10 @@ check_PROGRAMS += bigmeta openbigmeta tst_attsperf
|
||||
TESTS += tst_attsperf perftest.sh
|
||||
endif
|
||||
|
||||
EXTRA_DIST = run_par_test.sh.in run_bm.sh run_bm_test1.sh \
|
||||
run_bm_test2.sh run_bm_radar_2D.sh run_bm_radar_2D_compression1.sh \
|
||||
run_par_bm_test.sh run_bm_elena.sh run_par_bm_radar_2D.sh \
|
||||
run_bm_radar_2D_endianness1.sh run_tst_chunks.sh ref_chunks1.cdl \
|
||||
ref_chunks2.cdl run_bm_ar4.sh ref_tst_compounds.nc \
|
||||
ref_tst_xplatform2_1.nc ref_tst_xplatform2_2.nc ref_tst_dims.nc \
|
||||
ref_tst_interops4.nc run_get_knmi_files.sh CMakeLists.txt \
|
||||
EXTRA_DIST = run_par_test.sh.in run_par_bm_test.sh.in run_bm_test1.sh \
|
||||
run_bm_test2.sh run_bm_elena.sh run_tst_chunks.sh \
|
||||
ref_tst_compounds.nc ref_tst_xplatform2_1.nc ref_tst_xplatform2_2.nc \
|
||||
ref_tst_dims.nc ref_tst_interops4.nc run_knmi_bm.sh CMakeLists.txt \
|
||||
run_grp_rename.sh tst_h5_endians.c tst_atts_string_rewrite.c \
|
||||
tst_put_vars_two_unlim_dim.c tst_empty_vlen_unlim.c \
|
||||
run_empty_vlen_test.sh ref_hdf5_compat1.nc ref_hdf5_compat2.nc \
|
||||
@ -141,13 +139,13 @@ ref_szip.cdl tst_filter.sh bzip2.cdl filtered.cdl unfiltered.cdl \
|
||||
ref_bzip2.c findplugin.in perftest.sh
|
||||
|
||||
CLEANFILES = tst_mpi_parallel.bin cdm_sea_soundings.nc bm_chunking.nc \
|
||||
bm_radar.nc bm_radar1.nc radar_*.txt tst_floats_1D.cdl floats_1D_3.nc \
|
||||
floats_1D.cdl tst_*.nc tst_floats2_*.cdl tst_ints2_*.cdl \
|
||||
tst_shorts2_*.cdl tst_elena_*.cdl tst_simple*.cdl tst_chunks.cdl \
|
||||
pr_A1.* tauu_A1.* usi_01.* thetau_01.* tst_*.h5 tst_grp_rename.cdl \
|
||||
tst_grp_rename.dmp ref_grp_rename.cdl foo1.nc tst_*.h4 test.nc \
|
||||
testszip.nc test.h5 szip_dump.cdl perftest.txt bigmeta.nc bigvars.nc \
|
||||
run_par_test.sh
|
||||
tst_floats_1D.cdl floats_1D_3.nc floats_1D.cdl tst_*.nc \
|
||||
tst_floats2_*.cdl tst_ints2_*.cdl tst_shorts2_*.cdl tst_elena_*.cdl \
|
||||
tst_simple*.cdl tst_chunks.cdl pr_A1.* tauu_A1.* usi_01.* thetau_01.* \
|
||||
tst_*.h5 tst_grp_rename.cdl tst_grp_rename.dmp ref_grp_rename.cdl \
|
||||
foo1.nc tst_*.h4 test.nc testszip.nc test.h5 szip_dump.cdl \
|
||||
perftest.txt bigmeta.nc bigvars.nc run_par_test.sh *.gz MSGCPP_*.nc \
|
||||
floats*.nc floats*.cdl shorts*.nc shorts*.cdl ints*.nc ints*.cdl
|
||||
|
||||
DISTCLEANFILES = findplugin.sh
|
||||
|
||||
|
@ -10,17 +10,11 @@
|
||||
handled by this program. (Input files may be in netCDF-4 format, but
|
||||
they must conform to the classic model for this program to work.)
|
||||
|
||||
For the 3.7 and 4.0 netCDF releases, this program is not expected
|
||||
for general use. It may be made safer and more general in future
|
||||
releases, but for now, users should use this code with caution.
|
||||
|
||||
$Id: bm_file.c,v 1.64 2010/01/11 19:27:11 ed Exp $
|
||||
Ed Hartnett
|
||||
*/
|
||||
|
||||
#include <nc_tests.h> /* The ERR macro is here... */
|
||||
#include <err_macros.h>
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <time.h>
|
||||
#include <sys/time.h> /* Extra high precision time info. */
|
||||
#include <math.h>
|
||||
@ -30,7 +24,6 @@
|
||||
#ifdef USE_PARALLEL
|
||||
#include <mpi.h>
|
||||
#endif
|
||||
#include <netcdf.h>
|
||||
|
||||
#define MILLION 1000000
|
||||
#define BAD -99
|
||||
@ -69,6 +62,10 @@ return 2; \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/* Prototype from tst_utils.c. */
|
||||
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
|
||||
struct timeval *y);
|
||||
|
||||
/* This function will fill the start and count arrays for the reads
|
||||
* and writes. */
|
||||
static int
|
||||
@ -280,11 +277,11 @@ cmp_file(char *file1, char *file2, int *meta_read_us, int *data_read_us,
|
||||
if (use_par)
|
||||
{
|
||||
#ifdef USE_PARALLEL
|
||||
if ((ret = nc_open_par(file1, 0, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid1)))
|
||||
if ((ret = nc_open_par(file1, NC_MPIIO, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid1)))
|
||||
ERR1(ret);
|
||||
MPI_Barrier(MPI_COMM_WORLD);
|
||||
ftime = MPI_Wtime();
|
||||
if ((ret = nc_open_par(file2, 0, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid2)))
|
||||
if ((ret = nc_open_par(file2, NC_MPIIO, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid2)))
|
||||
ERR1(ret);
|
||||
*meta_read_us += (MPI_Wtime() - ftime) * MILLION;
|
||||
#else
|
||||
@ -482,7 +479,7 @@ int copy_file(char *file_name_in, char *file_name_out, int cmode_out,
|
||||
{
|
||||
#ifdef USE_PARALLEL
|
||||
ftime = MPI_Wtime();
|
||||
if ((ret = nc_open_par(file_name_in, 0, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid_in)))
|
||||
if ((ret = nc_open_par(file_name_in, NC_MPIIO, MPI_COMM_WORLD, MPI_INFO_NULL, &ncid_in)))
|
||||
ERR1(ret);
|
||||
*meta_read_us += (MPI_Wtime() - ftime) * MILLION;
|
||||
#else
|
||||
@ -512,7 +509,7 @@ int copy_file(char *file_name_in, char *file_name_out, int cmode_out,
|
||||
if (use_par)
|
||||
{
|
||||
#ifdef USE_PARALLEL
|
||||
if ((ret = nc_create_par(file_name_out, cmode_out, MPI_COMM_WORLD,
|
||||
if ((ret = nc_create_par(file_name_out, cmode_out|NC_MPIIO, MPI_COMM_WORLD,
|
||||
MPI_INFO_NULL, &ncid_out)))
|
||||
ERR1(ret);
|
||||
#else
|
||||
@ -1151,5 +1148,5 @@ main(int argc, char **argv)
|
||||
MPI_Finalize();
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
FINAL_RESULTS_QUIET;
|
||||
}
|
||||
|
@ -1,10 +1,10 @@
|
||||
/*
|
||||
Copyright 2010, UCAR/Unidata
|
||||
See COPYRIGHT file for copying and redistribution conditions.
|
||||
/* This is part of the netCDF package. Copyright 2005-2918 University
|
||||
Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
|
||||
conditions of use.
|
||||
|
||||
This program benchmarks creating a netCDF file with many objects.
|
||||
This program benchmarks creating a netCDF file with many objects.
|
||||
|
||||
$Id $
|
||||
Ed Hartnett
|
||||
*/
|
||||
|
||||
#include <config.h>
|
||||
@ -19,6 +19,10 @@ $Id $
|
||||
/* We will create this file. */
|
||||
#define FILE_NAME "bm_many_atts.nc"
|
||||
|
||||
/* Prototype from tst_utils.c. */
|
||||
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
|
||||
struct timeval *y);
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct timeval start_time, end_time, diff_time;
|
||||
@ -71,5 +75,5 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
nc_close(ncid);
|
||||
return(0);
|
||||
FINAL_RESULTS;
|
||||
}
|
||||
|
@ -1,8 +1,10 @@
|
||||
/** \file
|
||||
This program benchmarks creating a netCDF file with many objects.
|
||||
/* This is part of the netCDF package. Copyright 2018 University
|
||||
Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
|
||||
conditions of use. See www.unidata.ucar.edu for more info.
|
||||
|
||||
Copyright 2010, UCAR/Unidata See COPYRIGHT file for copying and
|
||||
redistribution conditions.
|
||||
This program benchmarks creating a netCDF file with many objects.
|
||||
|
||||
Ed Hartnett
|
||||
*/
|
||||
|
||||
#include <config.h>
|
||||
@ -17,6 +19,10 @@ redistribution conditions.
|
||||
/* We will create this file. */
|
||||
#define FILE_NAME "bm_many_objs.nc"
|
||||
|
||||
/* Prototype from tst_utils.c. */
|
||||
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
|
||||
struct timeval *y);
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct timeval start_time, end_time, diff_time;
|
||||
@ -93,5 +99,5 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
nc_close(ncid);
|
||||
return(0);
|
||||
FINAL_RESULTS;
|
||||
}
|
||||
|
@ -1,17 +1,15 @@
|
||||
/** \file
|
||||
/* This is part of the netCDF package. Copyright 2005 University
|
||||
Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
|
||||
conditions of use. See www.unidata.ucar.edu for more info.
|
||||
|
||||
This program benchmarks creating a netCDF file and reading records.
|
||||
This program benchmarks creating a netCDF file and reading records.
|
||||
|
||||
Copyright 2011, UCAR/Unidata See COPYRIGHT file for copying and
|
||||
redistribution conditions.
|
||||
Ed Hartnett
|
||||
*/
|
||||
|
||||
#include <config.h>
|
||||
#include <nc_tests.h>
|
||||
#include "err_macros.h"
|
||||
#include <netcdf.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include <sys/time.h> /* Extra high precision time info. */
|
||||
|
||||
@ -20,76 +18,67 @@ redistribution conditions.
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct timeval start_time, end_time, diff_time;
|
||||
double sec;
|
||||
int nitem = 10000; /* default number of objects of each type */
|
||||
int i;
|
||||
int ncid;
|
||||
int data[] = {42};
|
||||
int g, grp, numgrp;
|
||||
char gname[16];
|
||||
int v, var, numvar, vn, vleft, nvars;
|
||||
printf("Running benchmark...");
|
||||
{
|
||||
int ncid;
|
||||
|
||||
int stat; /* return status */
|
||||
/* dimension ids */
|
||||
int basetime_dim;
|
||||
int forecast_dim;
|
||||
int bounds_dim;
|
||||
int latitude_dim;
|
||||
int longitude_dim;
|
||||
|
||||
/* dimension ids */
|
||||
int basetime_dim;
|
||||
int forecast_dim;
|
||||
int bounds_dim;
|
||||
int latitude_dim;
|
||||
int longitude_dim;
|
||||
/* dimension lengths */
|
||||
size_t basetime_len = NC_UNLIMITED;
|
||||
size_t forecast_len = 32;
|
||||
size_t bounds_len = 2;
|
||||
size_t latitude_len = 121;
|
||||
size_t longitude_len = 101;
|
||||
|
||||
/* dimension lengths */
|
||||
size_t basetime_len = NC_UNLIMITED;
|
||||
size_t forecast_len = 32;
|
||||
size_t bounds_len = 2;
|
||||
size_t latitude_len = 121;
|
||||
size_t longitude_len = 101;
|
||||
/* variable ids */
|
||||
int temperature_2m_id;
|
||||
|
||||
/* variable ids */
|
||||
int temperature_2m_id;
|
||||
|
||||
/* rank (number of dimensions) for each variable */
|
||||
/* rank (number of dimensions) for each variable */
|
||||
# define RANK_temperature_2m 4
|
||||
|
||||
/* variable shapes */
|
||||
int temperature_2m_dims[RANK_temperature_2m];
|
||||
static const float temperature_2m_FillValue_att[1] = {9.96921e+36} ;
|
||||
static const float temperature_2m_missing_value_att[1] = {9.96921e+36} ;
|
||||
static const float temperature_2m_valid_min_att[1] = {180} ;
|
||||
static const float temperature_2m_valid_max_att[1] = {330} ;
|
||||
/* variable shapes */
|
||||
int temperature_2m_dims[RANK_temperature_2m];
|
||||
static const float temperature_2m_FillValue_att[1] = {9.96921e+36} ;
|
||||
static const float temperature_2m_missing_value_att[1] = {9.96921e+36} ;
|
||||
static const float temperature_2m_valid_min_att[1] = {180} ;
|
||||
static const float temperature_2m_valid_max_att[1] = {330} ;
|
||||
|
||||
/* enter define mode */
|
||||
if (nc_create(FILE_NAME, NC_CLOBBER, &ncid)) ERR;
|
||||
/* enter define mode */
|
||||
if (nc_create(FILE_NAME, NC_CLOBBER, &ncid)) ERR;
|
||||
|
||||
/* define dimensions */
|
||||
if (nc_def_dim(ncid, "basetime", basetime_len, &basetime_dim)) ERR;
|
||||
if (nc_def_dim(ncid, "forecast", forecast_len, &forecast_dim)) ERR;
|
||||
if (nc_def_dim(ncid, "bounds", bounds_len, &bounds_dim)) ERR;
|
||||
if (nc_def_dim(ncid, "latitude", latitude_len, &latitude_dim)) ERR;
|
||||
if (nc_def_dim(ncid, "longitude", longitude_len, &longitude_dim)) ERR;
|
||||
/* define dimensions */
|
||||
if (nc_def_dim(ncid, "basetime", basetime_len, &basetime_dim)) ERR;
|
||||
if (nc_def_dim(ncid, "forecast", forecast_len, &forecast_dim)) ERR;
|
||||
if (nc_def_dim(ncid, "bounds", bounds_len, &bounds_dim)) ERR;
|
||||
if (nc_def_dim(ncid, "latitude", latitude_len, &latitude_dim)) ERR;
|
||||
if (nc_def_dim(ncid, "longitude", longitude_len, &longitude_dim)) ERR;
|
||||
|
||||
/* define variables */
|
||||
temperature_2m_dims[0] = basetime_dim;
|
||||
temperature_2m_dims[1] = forecast_dim;
|
||||
temperature_2m_dims[2] = latitude_dim;
|
||||
temperature_2m_dims[3] = longitude_dim;
|
||||
if (nc_def_var(ncid, "temperature_2m", NC_FLOAT, RANK_temperature_2m,
|
||||
temperature_2m_dims, &temperature_2m_id)) ERR;
|
||||
/* define variables */
|
||||
temperature_2m_dims[0] = basetime_dim;
|
||||
temperature_2m_dims[1] = forecast_dim;
|
||||
temperature_2m_dims[2] = latitude_dim;
|
||||
temperature_2m_dims[3] = longitude_dim;
|
||||
if (nc_def_var(ncid, "temperature_2m", NC_FLOAT, RANK_temperature_2m,
|
||||
temperature_2m_dims, &temperature_2m_id)) ERR;
|
||||
|
||||
/* assign per-variable attributes */
|
||||
if (nc_put_att_text(ncid, temperature_2m_id, "long_name", 36, "Air temperature 2m above the surface")) ERR;
|
||||
if (nc_put_att_text(ncid, temperature_2m_id, "units", 1, "K")) ERR;
|
||||
if (nc_put_att_float(ncid, temperature_2m_id, "_FillValue", NC_FLOAT, 1, temperature_2m_FillValue_att)) ERR;
|
||||
if (nc_put_att_float(ncid, temperature_2m_id, "missing_value", NC_FLOAT, 1, temperature_2m_missing_value_att)) ERR;
|
||||
if (nc_put_att_float(ncid, temperature_2m_id, "valid_min", NC_FLOAT, 1, temperature_2m_valid_min_att)) ERR;
|
||||
if (nc_put_att_float(ncid, temperature_2m_id, "valid_max", NC_FLOAT, 1, temperature_2m_valid_max_att)) ERR;
|
||||
if (nc_put_att_text(ncid, temperature_2m_id, "standard_name", 15, "air_temperature")) ERR;
|
||||
if (nc_put_att_text(ncid, temperature_2m_id, "cell_methods", 10, "area: mean")) ERR;
|
||||
if (nc_put_att_text(ncid, temperature_2m_id, "coordinates", 5, "level")) ERR;
|
||||
if (nc_close(ncid)) ERR;
|
||||
|
||||
if (gettimeofday(&start_time, NULL)) ERR;
|
||||
|
||||
return(0);
|
||||
/* assign per-variable attributes */
|
||||
if (nc_put_att_text(ncid, temperature_2m_id, "long_name", 36, "Air temperature 2m above the surface")) ERR;
|
||||
if (nc_put_att_text(ncid, temperature_2m_id, "units", 1, "K")) ERR;
|
||||
if (nc_put_att_float(ncid, temperature_2m_id, "_FillValue", NC_FLOAT, 1, temperature_2m_FillValue_att)) ERR;
|
||||
if (nc_put_att_float(ncid, temperature_2m_id, "missing_value", NC_FLOAT, 1, temperature_2m_missing_value_att)) ERR;
|
||||
if (nc_put_att_float(ncid, temperature_2m_id, "valid_min", NC_FLOAT, 1, temperature_2m_valid_min_att)) ERR;
|
||||
if (nc_put_att_float(ncid, temperature_2m_id, "valid_max", NC_FLOAT, 1, temperature_2m_valid_max_att)) ERR;
|
||||
if (nc_put_att_text(ncid, temperature_2m_id, "standard_name", 15, "air_temperature")) ERR;
|
||||
if (nc_put_att_text(ncid, temperature_2m_id, "cell_methods", 10, "area: mean")) ERR;
|
||||
if (nc_put_att_text(ncid, temperature_2m_id, "coordinates", 5, "level")) ERR;
|
||||
if (nc_close(ncid)) ERR;
|
||||
}
|
||||
SUMMARIZE_ERR;
|
||||
FINAL_RESULTS;
|
||||
}
|
||||
|
@ -1,248 +0,0 @@
|
||||
netcdf tst_chunks {
|
||||
dimensions:
|
||||
dim1 = 6 ;
|
||||
dim2 = 12 ;
|
||||
dim3 = 4 ;
|
||||
variables:
|
||||
float var_contiguous(dim1, dim2, dim3) ;
|
||||
var_contiguous:_Storage = "contiguous" ;
|
||||
var_contiguous:_Endianness = "little" ;
|
||||
float var_chunked(dim1, dim2, dim3) ;
|
||||
var_chunked:_Storage = "chunked" ;
|
||||
var_chunked:_ChunkSizes = 2, 3, 1 ;
|
||||
var_chunked:_Endianness = "little" ;
|
||||
float var_compressed(dim1, dim2, dim3) ;
|
||||
var_compressed:_Storage = "chunked" ;
|
||||
var_compressed:_ChunkSizes = 2, 3, 1 ;
|
||||
var_compressed:_DeflateLevel = 1 ;
|
||||
var_compressed:_Endianness = "little" ;
|
||||
|
||||
// global attributes:
|
||||
:_NCProperties = "version=1|netcdflibversion=4.4.2-development|hdf5libversion=1.8.17" ;
|
||||
:_SuperblockVersion = 0 ;
|
||||
:_IsNetcdf4 = 1 ;
|
||||
:_Format = "netCDF-4 classic model" ;
|
||||
data:
|
||||
|
||||
var_contiguous =
|
||||
0, 0, 0, 0,
|
||||
1, 1, 1, 1,
|
||||
2, 2, 2, 2,
|
||||
3, 3, 3, 3,
|
||||
4, 4, 4, 4,
|
||||
5, 5, 5, 5,
|
||||
6, 6, 6, 6,
|
||||
7, 7, 7, 7,
|
||||
8, 8, 8, 8,
|
||||
9, 9, 9, 9,
|
||||
10, 10, 10, 10,
|
||||
11, 11, 11, 11,
|
||||
12, 12, 12, 12,
|
||||
13, 13, 13, 13,
|
||||
14, 14, 14, 14,
|
||||
15, 15, 15, 15,
|
||||
16, 16, 16, 16,
|
||||
17, 17, 17, 17,
|
||||
18, 18, 18, 18,
|
||||
19, 19, 19, 19,
|
||||
20, 20, 20, 20,
|
||||
21, 21, 21, 21,
|
||||
22, 22, 22, 22,
|
||||
23, 23, 23, 23,
|
||||
24, 24, 24, 24,
|
||||
25, 25, 25, 25,
|
||||
26, 26, 26, 26,
|
||||
27, 27, 27, 27,
|
||||
28, 28, 28, 28,
|
||||
29, 29, 29, 29,
|
||||
30, 30, 30, 30,
|
||||
31, 31, 31, 31,
|
||||
32, 32, 32, 32,
|
||||
33, 33, 33, 33,
|
||||
34, 34, 34, 34,
|
||||
35, 35, 35, 35,
|
||||
36, 36, 36, 36,
|
||||
37, 37, 37, 37,
|
||||
38, 38, 38, 38,
|
||||
39, 39, 39, 39,
|
||||
40, 40, 40, 40,
|
||||
41, 41, 41, 41,
|
||||
42, 42, 42, 42,
|
||||
43, 43, 43, 43,
|
||||
44, 44, 44, 44,
|
||||
45, 45, 45, 45,
|
||||
46, 46, 46, 46,
|
||||
47, 47, 47, 47,
|
||||
48, 48, 48, 48,
|
||||
49, 49, 49, 49,
|
||||
50, 50, 50, 50,
|
||||
51, 51, 51, 51,
|
||||
52, 52, 52, 52,
|
||||
53, 53, 53, 53,
|
||||
54, 54, 54, 54,
|
||||
55, 55, 55, 55,
|
||||
56, 56, 56, 56,
|
||||
57, 57, 57, 57,
|
||||
58, 58, 58, 58,
|
||||
59, 59, 59, 59,
|
||||
60, 60, 60, 60,
|
||||
61, 61, 61, 61,
|
||||
62, 62, 62, 62,
|
||||
63, 63, 63, 63,
|
||||
64, 64, 64, 64,
|
||||
65, 65, 65, 65,
|
||||
66, 66, 66, 66,
|
||||
67, 67, 67, 67,
|
||||
68, 68, 68, 68,
|
||||
69, 69, 69, 69,
|
||||
70, 70, 70, 70,
|
||||
71, 71, 71, 71 ;
|
||||
|
||||
var_chunked =
|
||||
0, 0, 0, 0,
|
||||
1, 1, 1, 1,
|
||||
2, 2, 2, 2,
|
||||
3, 3, 3, 3,
|
||||
4, 4, 4, 4,
|
||||
5, 5, 5, 5,
|
||||
6, 6, 6, 6,
|
||||
7, 7, 7, 7,
|
||||
8, 8, 8, 8,
|
||||
9, 9, 9, 9,
|
||||
10, 10, 10, 10,
|
||||
11, 11, 11, 11,
|
||||
12, 12, 12, 12,
|
||||
13, 13, 13, 13,
|
||||
14, 14, 14, 14,
|
||||
15, 15, 15, 15,
|
||||
16, 16, 16, 16,
|
||||
17, 17, 17, 17,
|
||||
18, 18, 18, 18,
|
||||
19, 19, 19, 19,
|
||||
20, 20, 20, 20,
|
||||
21, 21, 21, 21,
|
||||
22, 22, 22, 22,
|
||||
23, 23, 23, 23,
|
||||
24, 24, 24, 24,
|
||||
25, 25, 25, 25,
|
||||
26, 26, 26, 26,
|
||||
27, 27, 27, 27,
|
||||
28, 28, 28, 28,
|
||||
29, 29, 29, 29,
|
||||
30, 30, 30, 30,
|
||||
31, 31, 31, 31,
|
||||
32, 32, 32, 32,
|
||||
33, 33, 33, 33,
|
||||
34, 34, 34, 34,
|
||||
35, 35, 35, 35,
|
||||
36, 36, 36, 36,
|
||||
37, 37, 37, 37,
|
||||
38, 38, 38, 38,
|
||||
39, 39, 39, 39,
|
||||
40, 40, 40, 40,
|
||||
41, 41, 41, 41,
|
||||
42, 42, 42, 42,
|
||||
43, 43, 43, 43,
|
||||
44, 44, 44, 44,
|
||||
45, 45, 45, 45,
|
||||
46, 46, 46, 46,
|
||||
47, 47, 47, 47,
|
||||
48, 48, 48, 48,
|
||||
49, 49, 49, 49,
|
||||
50, 50, 50, 50,
|
||||
51, 51, 51, 51,
|
||||
52, 52, 52, 52,
|
||||
53, 53, 53, 53,
|
||||
54, 54, 54, 54,
|
||||
55, 55, 55, 55,
|
||||
56, 56, 56, 56,
|
||||
57, 57, 57, 57,
|
||||
58, 58, 58, 58,
|
||||
59, 59, 59, 59,
|
||||
60, 60, 60, 60,
|
||||
61, 61, 61, 61,
|
||||
62, 62, 62, 62,
|
||||
63, 63, 63, 63,
|
||||
64, 64, 64, 64,
|
||||
65, 65, 65, 65,
|
||||
66, 66, 66, 66,
|
||||
67, 67, 67, 67,
|
||||
68, 68, 68, 68,
|
||||
69, 69, 69, 69,
|
||||
70, 70, 70, 70,
|
||||
71, 71, 71, 71 ;
|
||||
|
||||
var_compressed =
|
||||
0, 0, 0, 0,
|
||||
1, 1, 1, 1,
|
||||
2, 2, 2, 2,
|
||||
3, 3, 3, 3,
|
||||
4, 4, 4, 4,
|
||||
5, 5, 5, 5,
|
||||
6, 6, 6, 6,
|
||||
7, 7, 7, 7,
|
||||
8, 8, 8, 8,
|
||||
9, 9, 9, 9,
|
||||
10, 10, 10, 10,
|
||||
11, 11, 11, 11,
|
||||
12, 12, 12, 12,
|
||||
13, 13, 13, 13,
|
||||
14, 14, 14, 14,
|
||||
15, 15, 15, 15,
|
||||
16, 16, 16, 16,
|
||||
17, 17, 17, 17,
|
||||
18, 18, 18, 18,
|
||||
19, 19, 19, 19,
|
||||
20, 20, 20, 20,
|
||||
21, 21, 21, 21,
|
||||
22, 22, 22, 22,
|
||||
23, 23, 23, 23,
|
||||
24, 24, 24, 24,
|
||||
25, 25, 25, 25,
|
||||
26, 26, 26, 26,
|
||||
27, 27, 27, 27,
|
||||
28, 28, 28, 28,
|
||||
29, 29, 29, 29,
|
||||
30, 30, 30, 30,
|
||||
31, 31, 31, 31,
|
||||
32, 32, 32, 32,
|
||||
33, 33, 33, 33,
|
||||
34, 34, 34, 34,
|
||||
35, 35, 35, 35,
|
||||
36, 36, 36, 36,
|
||||
37, 37, 37, 37,
|
||||
38, 38, 38, 38,
|
||||
39, 39, 39, 39,
|
||||
40, 40, 40, 40,
|
||||
41, 41, 41, 41,
|
||||
42, 42, 42, 42,
|
||||
43, 43, 43, 43,
|
||||
44, 44, 44, 44,
|
||||
45, 45, 45, 45,
|
||||
46, 46, 46, 46,
|
||||
47, 47, 47, 47,
|
||||
48, 48, 48, 48,
|
||||
49, 49, 49, 49,
|
||||
50, 50, 50, 50,
|
||||
51, 51, 51, 51,
|
||||
52, 52, 52, 52,
|
||||
53, 53, 53, 53,
|
||||
54, 54, 54, 54,
|
||||
55, 55, 55, 55,
|
||||
56, 56, 56, 56,
|
||||
57, 57, 57, 57,
|
||||
58, 58, 58, 58,
|
||||
59, 59, 59, 59,
|
||||
60, 60, 60, 60,
|
||||
61, 61, 61, 61,
|
||||
62, 62, 62, 62,
|
||||
63, 63, 63, 63,
|
||||
64, 64, 64, 64,
|
||||
65, 65, 65, 65,
|
||||
66, 66, 66, 66,
|
||||
67, 67, 67, 67,
|
||||
68, 68, 68, 68,
|
||||
69, 69, 69, 69,
|
||||
70, 70, 70, 70,
|
||||
71, 71, 71, 71 ;
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
netcdf tst_chunks {
|
||||
dimensions:
|
||||
dim1 = 32 ;
|
||||
dim2 = 90 ;
|
||||
dim3 = 91 ;
|
||||
variables:
|
||||
float var_contiguous(dim1, dim2, dim3) ;
|
||||
var_contiguous:_Storage = "contiguous" ;
|
||||
var_contiguous:_Endianness = "little" ;
|
||||
float var_chunked(dim1, dim2, dim3) ;
|
||||
var_chunked:_Storage = "chunked" ;
|
||||
var_chunked:_ChunkSizes = 8, 10, 13 ;
|
||||
var_chunked:_Endianness = "little" ;
|
||||
float var_compressed(dim1, dim2, dim3) ;
|
||||
var_compressed:_Storage = "chunked" ;
|
||||
var_compressed:_ChunkSizes = 8, 10, 13 ;
|
||||
var_compressed:_DeflateLevel = 1 ;
|
||||
var_compressed:_Endianness = "little" ;
|
||||
|
||||
// global attributes:
|
||||
:_NCProperties = "version=1|netcdflibversion=4.4.2-development|hdf5libversion=1.8.17" ;
|
||||
:_SuperblockVersion = 0 ;
|
||||
:_IsNetcdf4 = 1 ;
|
||||
:_Format = "netCDF-4 classic model" ;
|
||||
}
|
@ -1,93 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
# This shell runs a bunch of benchmarks on some specific files
|
||||
# available at Unidata.
|
||||
|
||||
# $Id: run_bm.sh,v 1.8 2007/11/30 16:45:33 ed Exp $
|
||||
|
||||
set -e
|
||||
|
||||
# Radar 2D file. Make sure we have a local disk copy. Not much point
|
||||
# in benchmarking read and write times over NFS!
|
||||
TMP=/shecky/data
|
||||
d1=20070803-2300
|
||||
echo "howdy!"
|
||||
ls $TMP/${d1}-2d.nc3
|
||||
if ! test -f $TMP/${d1}-2d.nc3; then
|
||||
cp /upc/share/testdata/nssl/mosaic2d_nc/tile1/$d1.netcdf $TMP/$d1-2d.nc3
|
||||
fi
|
||||
|
||||
# Copy the 2D rarar file into a netCDF-4 version, with various
|
||||
# compression settings.
|
||||
out1=radar_2d_compression.txt
|
||||
c0=100
|
||||
c1=200
|
||||
h=-h
|
||||
for ((s=0; s < 2 ; s++))
|
||||
do
|
||||
for ((d=0; d <= 9 ; d=d+2))
|
||||
do
|
||||
cmd="${execdir}/bm_file $h -f 3 -o $TMP/$d1-2d.nc4 -c 0:${d}:${s}:${c0}:${c1}"
|
||||
for ((v=1; v < 12; v++))
|
||||
do
|
||||
cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
|
||||
done
|
||||
cmd="$cmd $TMP/$d1-2d.nc3"
|
||||
echo "cmd=$cmd"
|
||||
if ! ($cmd >> $out1); then
|
||||
exit 1;
|
||||
fi
|
||||
h=
|
||||
done
|
||||
done
|
||||
|
||||
exit 0
|
||||
|
||||
# Get local copy of the radar 3D file.
|
||||
d1=20070803-2300
|
||||
if ! test -f $TMP/${d1}-3d.nc3; then
|
||||
cp /upc/share/testdata/nssl/mosaic3d_nc/tile1/20070803-2300.netcdf $TMP/${d1}-3d.nc3
|
||||
fi
|
||||
|
||||
# Test different compressions, with and without shuffle.
|
||||
out1=radar_3d_compression.txt
|
||||
c0=3
|
||||
c1=100
|
||||
c2=200
|
||||
h=-h
|
||||
for ((s=0; s < 2 ; s++))
|
||||
do
|
||||
for ((d=0; d <= 9 ; d++))
|
||||
do
|
||||
cmd="${execdir}/bm_file $h -f 3 -o $TMP/$d1.nc4 -c 0:${d}:${s}:${c0}:${c1}:${c2} $TMP/$d1.nc3"
|
||||
echo "cmd=$cmd"
|
||||
if ! ($cmd >> $out1); then
|
||||
exit 1;
|
||||
fi
|
||||
h=
|
||||
done
|
||||
done
|
||||
|
||||
# Try different chunk sizes with the same compession.
|
||||
out1=radar_3d_chunking.txt
|
||||
s=1
|
||||
d=3
|
||||
h=-h
|
||||
for c0 in 1 2 5
|
||||
do
|
||||
for c1 in 10 100 200 500
|
||||
do
|
||||
for c3 in 10 100 200 500
|
||||
do
|
||||
cmd="${execdir}/bm_file $h -f 3 -o $TMP/$d1.nc4 -c 0:${d}:${s}:${c0}:${c1}:${c2} $TMP/$d1.nc3"
|
||||
echo "cmd=$cmd"
|
||||
if ! ($cmd >> $out1); then
|
||||
exit 1;
|
||||
fi
|
||||
h=
|
||||
done
|
||||
done
|
||||
done
|
@ -1,94 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This shell runs a bunch of benchmarks on some specific files
|
||||
# available at Unidata. If you want to run this shell, you need these
|
||||
# data files.
|
||||
|
||||
# This script gets and benchmarks against some AR4 data.
|
||||
|
||||
# $Id: run_bm_ar4.sh,v 1.4 2010/01/11 19:27:11 ed Exp $
|
||||
|
||||
set -e
|
||||
echo ""
|
||||
#file_list="pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12.nc tauu_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12.nc thetao_O1.SRESA1B_2.CCSM.ocnm.2000-01_cat_2099-12.nc usi_O1.20C3M_8.CCSM.icem.1870-01_cat_1999-12.nc"
|
||||
#file_list="pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12.nc tauu_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12.nc usi_O1.20C3M_8.CCSM.icem.1870-01_cat_1999-12.nc"
|
||||
file_list="pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12.nc"
|
||||
echo " *** Getting sample AR4 files $file_list"
|
||||
|
||||
# Get the files.
|
||||
for f1 in $file_list
|
||||
do
|
||||
if ! test -f $f1; then
|
||||
wget ftp://ftp.unidata.ucar.edu/pub/netcdf/sample_data/ar4/$f1.gz
|
||||
gunzip $f1.gz
|
||||
fi
|
||||
done
|
||||
|
||||
echo "SUCCESS!!!"
|
||||
|
||||
out='run_bm_ar4_pr_out.csv'
|
||||
rm -rf $out
|
||||
echo " *** Benchmarking pr_A1 file with various chunksizes (output to ${out})..."
|
||||
|
||||
# Create netCDF-4 versions of the file, with different chunksizes.
|
||||
h=-h
|
||||
s=0
|
||||
pr_ar4_sample="pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12.nc"
|
||||
|
||||
file_num=0
|
||||
for d in -1
|
||||
do
|
||||
for c0 in 4 8 16 32 64
|
||||
do
|
||||
for c1 in 64
|
||||
do
|
||||
for c2 in 128
|
||||
do
|
||||
if test $d = -1; then
|
||||
file_out="pr_A1_${c0}_${c1}_${c2}.nc"
|
||||
else
|
||||
file_out="pr_A1_z${d}_${c0}_${c1}_${c2}.nc"
|
||||
fi
|
||||
out_files="$out_files $file_out"
|
||||
|
||||
# If the output file does not yet exist, create it.
|
||||
if test -f $file_out; then
|
||||
echo "found existing $file_out"
|
||||
else
|
||||
cmd="${execdir}/bm_file $h -f 3 -c 6:${d}:${s}:${c0}:${c1}:${c2} -o ${file_out} ${pr_ar4_sample}"
|
||||
echo "cmd=$cmd"
|
||||
# bash ./clear_cache.sh
|
||||
if ! ($cmd >> $out); then
|
||||
exit 1;
|
||||
fi
|
||||
fi
|
||||
|
||||
# Turn off header next time around.
|
||||
h=
|
||||
done
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
echo $out_files
|
||||
|
||||
# Do the horizonatal runs.
|
||||
#bash ./clear_cache.sh
|
||||
${execdir}/tst_ar4 -h $pr_ar4_sample
|
||||
for f1 in $out_files
|
||||
do
|
||||
# bash ./clear_cache.sh
|
||||
${execdir}/tst_ar4 ${f1}
|
||||
done
|
||||
|
||||
# Do the timeseries runs.
|
||||
#bash ./clear_cache.sh
|
||||
${execdir}/tst_ar4 -t -h $pr_ar4_sample
|
||||
for f1 in $out_files
|
||||
do
|
||||
# bash ./clear_cache.sh
|
||||
${execdir}/tst_ar4 -t ${f1}
|
||||
done
|
||||
|
||||
echo "SUCCESS!!!"
|
||||
exit 0
|
@ -3,11 +3,14 @@
|
||||
# This shell runs some benchmarks that Elena ran as described here:
|
||||
# http://hdfeos.org/workshops/ws06/presentations/Pourmal/HDF5_IO_Perf.pdf
|
||||
|
||||
# $Id: run_bm_elena.sh,v 1.2 2007/12/30 17:19:29 ed Exp $
|
||||
# Ed Hartnett
|
||||
|
||||
set -e
|
||||
# Load common values for netCDF shell script tests.
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
# Run benchmarks.
|
||||
echo ""
|
||||
|
||||
echo "*** Testing the benchmarking program bm_file for simple float file, no compression..."
|
||||
${execdir}/bm_file -h -d -f 3 -o tst_elena_out.nc -c 0:-1:0:1024:16:256 tst_elena_int_3D.nc
|
||||
${execdir}/bm_file -d -f 3 -o tst_elena_out.nc -c 0:-1:0:1024:256:256 tst_elena_int_3D.nc
|
||||
|
@ -1,122 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
# This shell runs a bunch of benchmarks on some specific files
|
||||
# available at Unidata. If you want to run this shell, you need these
|
||||
# data files.
|
||||
|
||||
# This script gets and benchmarks against some 2D radar data.
|
||||
|
||||
# $Id: run_bm_radar_2D.sh,v 1.9 2008/01/16 13:27:01 ed Exp $
|
||||
|
||||
set -e
|
||||
|
||||
# Radar 2D file. Make sure we have a local disk copy. Not much point
|
||||
# in benchmarking read and write times over NFS!
|
||||
TMP=/shecky/data
|
||||
d1=20070803-2300
|
||||
file_num=0
|
||||
for t in 1 2 4
|
||||
do
|
||||
file=${d1}_tile${t}-2d.nc3
|
||||
in_file[$file_num]=$file
|
||||
let file_num=$file_num+1
|
||||
if ! test -f $TMP/$file; then
|
||||
echo "getting file: $file"
|
||||
cp -f /upc/share/testdata/nssl/mosaic2d_nc/tile${t}/$d1.netcdf.gz $TMP
|
||||
gunzip -f $TMP/$d1.netcdf.gz
|
||||
cp $d1.netcdf $TMP/$file
|
||||
fi
|
||||
done
|
||||
num_in_files=${#in_file[@]}
|
||||
|
||||
# Copy the 2D rarar file into a netCDF-4 version, with various
|
||||
# CHUNKING settings.
|
||||
out1=radar_2d_chunking.csv
|
||||
out2=radar_2d_chunking_2.csv
|
||||
rm -rf $out1 $out2
|
||||
|
||||
# Turn on header (for the first run of bm_file).
|
||||
h=-h
|
||||
|
||||
# Turn off compression and shuffle filters.
|
||||
s=0
|
||||
d=-1
|
||||
|
||||
# file_num=0
|
||||
# for c0 in 251 1001 1501
|
||||
# do
|
||||
# for c1 in 251 1001 2001
|
||||
# do
|
||||
# # Build the command including chunk sizes for all 13 vars.
|
||||
# cmd="./bm_file $h -f 4 -o $TMP/$d1-2d_${c0}x${c1}.nc4 -c 0:${d}:${s}:${c0}:${c1}"
|
||||
# for ((v=1; v < 12; v++))
|
||||
# do
|
||||
# cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
|
||||
# done
|
||||
# cmd="$cmd $TMP/${in_file[${file_num}]}"
|
||||
# echo "cmd=$cmd"
|
||||
# if ! ($cmd); then
|
||||
# exit 1;
|
||||
# fi
|
||||
# h=
|
||||
|
||||
# # Switch to the next input file of three.
|
||||
# let file_num=$file_num+1
|
||||
# test $file_num -eq $num_in_files && file_num=0
|
||||
# done
|
||||
# done
|
||||
|
||||
|
||||
file_num=0
|
||||
for c0 in 251 1001 1501
|
||||
do
|
||||
for c1 in 251 1001 2001
|
||||
do
|
||||
for try in 0 1 2 3 4 5 6 7 8 9
|
||||
do
|
||||
# Confuse the disk buffering by copying the file each time, so
|
||||
# always reading a new file.
|
||||
# cp $TMP/${in_file[${file_num}]} $TMP/cp_${in_file[${file_num}]}
|
||||
|
||||
# Build the command including chunk sizes for all 13 vars.
|
||||
cmd="./bm_file $h -f 4 -c 0:${d}:${s}:${c0}:${c1}"
|
||||
for ((v=1; v < 12; v++))
|
||||
do
|
||||
cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
|
||||
done
|
||||
cmd="$cmd $TMP/${in_file[${file_num}]}"
|
||||
echo "cmd=$cmd"
|
||||
sudo bash ./clear_cache.sh
|
||||
if ! ($cmd >> $out1); then
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
cmd="./bm_file $h -f 3 -c 0:${d}:${s}:${c0}:${c1}"
|
||||
for ((v=1; v < 12; v++))
|
||||
do
|
||||
cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
|
||||
done
|
||||
cmd="$cmd $TMP/$d1-2d_${c0}x${c1}.nc4"
|
||||
echo "cmd=$cmd"
|
||||
sudo bash ./clear_cache.sh
|
||||
if ! ($cmd >> $out2); then
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
# Remove the copy. Next read will a "new" file.
|
||||
# rm $TMP/cp_${in_file[${file_num}]}
|
||||
|
||||
# Turn off header next time around.
|
||||
h=
|
||||
|
||||
# Switch to the next input file of three.
|
||||
let file_num=$file_num+1
|
||||
test $file_num -eq $num_in_files && file_num=0
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
exit 0
|
@ -1,65 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This shell runs a bunch of benchmarks on some specific files
|
||||
# available at Unidata. If you want to run this shell, you need these
|
||||
# data files.
|
||||
|
||||
# This script gets and benchmarks against some 2D radar data.
|
||||
|
||||
# $Id: run_bm_radar_2D_compression1.sh,v 1.2 2007/12/30 15:39:13 ed Exp $
|
||||
|
||||
set -e
|
||||
|
||||
# Radar 2D file. Make sure we have a local disk copy. Not much point
|
||||
# in benchmarking read and write times over NFS!
|
||||
TMP=/shecky/data
|
||||
d1=20070803-2300
|
||||
file_num=0
|
||||
for t in 1 2 4
|
||||
do
|
||||
file=${d1}_tile${t}-2d.nc3
|
||||
in_file[$file_num]=$file
|
||||
let file_num=$file_num+1
|
||||
if ! test -f $TMP/$file; then
|
||||
echo "getting file: $file"
|
||||
cp -f /upc/share/testdata/nssl/mosaic2d_nc/tile${t}/$d1.netcdf.gz $TMP
|
||||
gunzip -f $TMP/$d1.netcdf.gz
|
||||
cp $d1.netcdf $TMP/$file
|
||||
fi
|
||||
done
|
||||
num_in_files=${#in_file[@]}
|
||||
|
||||
# Copy the 2D rarar file into a netCDF-4 version, with various
|
||||
# compression settings.
|
||||
out1=radar_2d_compression.csv
|
||||
rm -rf $out1
|
||||
c0=1001
|
||||
c1=500
|
||||
h=-h
|
||||
file_num=0
|
||||
for ((s=0; s < 2 ; s++))
|
||||
do
|
||||
for ((d=-1; d <= 9 ; d++))
|
||||
do
|
||||
# Confuse the disk buffering by copying the file each time, so
|
||||
# always reading a new file.
|
||||
cp $TMP/${in_file[${file_num}]} $TMP/cp_${in_file[${file_num}]}
|
||||
|
||||
cmd="./bm_file $h -f 3 -d -o $TMP/$d1-2d.nc4 -c 0:${d}:${s}:${c0}:${c1}"
|
||||
for ((v=1; v < 12; v++))
|
||||
do
|
||||
cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
|
||||
done
|
||||
cmd="$cmd $TMP/cp_${in_file[${file_num}]}"
|
||||
echo "cmd=$cmd"
|
||||
if ! ($cmd >> $out1); then
|
||||
exit 1;
|
||||
fi
|
||||
rm $TMP/cp_${in_file[${file_num}]}
|
||||
h=
|
||||
let file_num=$file_num+1
|
||||
test $file_num -eq $num_in_files && file_num=0
|
||||
done
|
||||
done
|
||||
|
||||
exit 0
|
@ -1,80 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
# This shell runs a bunch of benchmarks on some specific files
|
||||
# available at Unidata. If you want to run this shell, you need these
|
||||
# data files.
|
||||
|
||||
# This script gets and benchmarks against some 2D radar data.
|
||||
|
||||
# $Id: run_bm_radar_2D_endianness1.sh,v 1.1 2008/01/03 16:19:08 ed Exp $
|
||||
|
||||
set -e
|
||||
|
||||
# Radar 2D file. Make sure we have a local disk copy. Not much point
|
||||
# in benchmarking read and write times over NFS!
|
||||
TMP=/shecky/data
|
||||
d1=20070803-2300
|
||||
file_num=0
|
||||
for t in 1 2 4
|
||||
do
|
||||
file=${d1}_tile${t}-2d.nc3
|
||||
in_file[$file_num]=$file
|
||||
let file_num=$file_num+1
|
||||
if ! test -f $TMP/$file; then
|
||||
echo "getting file: $file"
|
||||
cp -f /upc/share/testdata/nssl/mosaic2d_nc/tile${t}/$d1.netcdf.gz $TMP
|
||||
gunzip -f $TMP/$d1.netcdf.gz
|
||||
cp $d1.netcdf $TMP/$file
|
||||
fi
|
||||
done
|
||||
num_in_files=${#in_file[@]}
|
||||
|
||||
# Copy the 2D rarar file into a netCDF-4 version, with various
|
||||
# CHUNKING settings.
|
||||
out1=radar_2d_endianness.csv
|
||||
rm -rf $out1
|
||||
|
||||
# Turn on header (for the first run of bm_file).
|
||||
h=-h
|
||||
|
||||
# Turn off compression and shuffle filters.
|
||||
s=0
|
||||
d=-1
|
||||
|
||||
# Set good chunksizes.
|
||||
c0=501
|
||||
c1=1001
|
||||
file_num=0
|
||||
for ((end=0; end <= 2 ; end++))
|
||||
do
|
||||
# Confuse the disk buffering by copying the file each time, so
|
||||
# always reading a new file.
|
||||
cp $TMP/${in_file[${file_num}]} $TMP/cp_${in_file[${file_num}]}
|
||||
|
||||
# Build the command including chunk sizes for all 13 vars.
|
||||
cmd="./bm_file -e $end $h -f 3 -d -o $TMP/$d1-2d.nc4 -c 0:${d}:${s}:${c0}:${c1}"
|
||||
for ((v=1; v < 12; v++))
|
||||
do
|
||||
cmd="$cmd,${v}:${d}:${s}:${c0}:${c1}"
|
||||
done
|
||||
cmd="$cmd $TMP/cp_${in_file[${file_num}]}"
|
||||
echo "cmd=$cmd"
|
||||
if ! ($cmd >> $out1); then
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
# Remove the copy. Next read will a "new" file.
|
||||
rm $TMP/cp_${in_file[${file_num}]}
|
||||
|
||||
# Turn off header next time around.
|
||||
h=
|
||||
|
||||
# Switch to the next input file of three.
|
||||
let file_num=$file_num+1
|
||||
test $file_num -eq $num_in_files && file_num=0
|
||||
done
|
||||
|
||||
exit 0
|
@ -1,16 +1,13 @@
|
||||
#!/bin/sh
|
||||
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
# This shell just tests the bm_file program by running it a few times
|
||||
# on a simple test file. Then it uses ncdum to check that the output
|
||||
# is what it should be.
|
||||
|
||||
# $Id: run_bm_test1.sh,v 1.13 2008/01/04 15:57:48 ed Exp $
|
||||
# Ed Hartnett
|
||||
|
||||
set -e
|
||||
echo ""
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
for type_name in floats ints shorts
|
||||
do
|
||||
|
@ -1,20 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This shell gets files from the netCDF ftp site for testing.
|
||||
|
||||
set -e
|
||||
echo ""
|
||||
file_list="MSGCPP_CWP_NC3.nc MSGCPP_CWP_NC4.nc"
|
||||
echo "Getting KNMI test files $file_list"
|
||||
|
||||
for f1 in $file_list
|
||||
do
|
||||
if ! test -f $f1; then
|
||||
wget ftp://ftp.unidata.ucar.edu/pub/netcdf/sample_data/$f1.gz
|
||||
gunzip $f1.gz
|
||||
fi
|
||||
done
|
||||
|
||||
echo "SUCCESS!!!"
|
||||
|
||||
exit 0
|
28
nc_test4/run_knmi_bm.sh
Executable file
28
nc_test4/run_knmi_bm.sh
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This shell gets some files from the netCDF ftp site for testing,
|
||||
# then runs the tst_knmi benchmarking program.
|
||||
# Ed Hartnett
|
||||
|
||||
# Load common values for netCDF shell script tests.
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
# Get files if needed.
|
||||
echo ""
|
||||
file_list="MSGCPP_CWP_NC3.nc MSGCPP_CWP_NC4.nc"
|
||||
echo "Getting KNMI test files $file_list"
|
||||
for f1 in $file_list
|
||||
do
|
||||
if ! test -f $f1; then
|
||||
wget ftp://ftp.unidata.ucar.edu/pub/netcdf/sample_data/$f1.gz
|
||||
gunzip $f1.gz
|
||||
fi
|
||||
done
|
||||
|
||||
# Run the C program on these files.
|
||||
${execdir}/tst_knmi
|
||||
|
||||
echo "SUCCESS!!!"
|
||||
|
||||
exit 0
|
@ -1,25 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This shell file runs benchmarks on the 2D radar data on parallel platforms.
|
||||
|
||||
# $Id: run_par_bm_radar_2D.sh,v 1.1 2007/12/18 01:16:25 ed Exp $
|
||||
|
||||
set -e
|
||||
echo ""
|
||||
echo "Getting radar 2D data file from Unidata FTP site..."
|
||||
file=20070803-2300_tile1-2d.nc3
|
||||
if ! test -f $file; then
|
||||
wget ftp://ftp.unidata.ucar.edu/pub/netcdf/sample_data/$file
|
||||
fi
|
||||
|
||||
echo "*** Running bm_file for parallel access on $file..."
|
||||
header="-h"
|
||||
chunksizes="1501:2001"
|
||||
for numproc in 1 4 16
|
||||
do
|
||||
mpiexec -n $numproc ./bm_file -p -d ${header} -s 16 -f 4 -o tst_r2d.nc -c 0:-1:0:1501:2001 $file
|
||||
header=
|
||||
done
|
||||
echo '*** SUCCESS!!!'
|
||||
|
||||
exit 0
|
12
nc_test4/run_par_bm_test.sh → nc_test4/run_par_bm_test.sh.in
Executable file → Normal file
12
nc_test4/run_par_bm_test.sh → nc_test4/run_par_bm_test.sh.in
Executable file → Normal file
@ -1,26 +1,24 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This shell file tests the bm_ile program for parallel I/O.
|
||||
# Ed Hartnett
|
||||
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
# This shell file tests the bm_ile program for parallel I/O.
|
||||
|
||||
# $Id: run_par_bm_test.sh,v 1.5 2007/12/12 18:00:39 ed Exp $
|
||||
|
||||
set -e
|
||||
echo ""
|
||||
for type_name in floats ints shorts
|
||||
do
|
||||
echo "*** Running bm_file for parallel access on simple ${type_name} test files, 1D to 6D..."
|
||||
header="-h"
|
||||
for ((i=1; i <= 3; i++))
|
||||
for i in 1 2 3
|
||||
do
|
||||
test $i = 1 && chunksizes="100000"
|
||||
test $i = 2 && chunksizes="316:316"
|
||||
test $i = 3 && chunksizes="46:46:46"
|
||||
for numproc in 1 4 16
|
||||
do
|
||||
mpiexec -n $numproc ./bm_file -p -d ${header} -s ${numproc} -f 4 -o p_${type_name}2_${i}D_3.nc -c 0:-1:0:${chunksizes} ${type_name}2_${i}D_3.nc
|
||||
@MPIEXEC@ -n $numproc ./bm_file -p -d ${header} -s ${numproc} -f 4 -o p_${type_name}2_${i}D_3.nc -c 0:-1:0:${chunksizes} ${type_name}2_${i}D_3.nc
|
||||
${NCDUMP} -n tst_${type_name}2_${i}D p_${type_name}2_${i}D_3.nc > p_${type_name}2_${i}D.cdl
|
||||
diff tst_${type_name}2_${i}D.cdl p_${type_name}2_${i}D.cdl &> /dev/null
|
||||
header=
|
@ -1,17 +1,15 @@
|
||||
#!/bin/sh
|
||||
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
# This shell just tests the tst_chunks3 program by running it a few
|
||||
# times to generate a simple test file. Then it uses ncdump -s to
|
||||
# check that the output is what it should be.
|
||||
|
||||
# $Id: run_tst_chunks3.sh,v 1.2 2009/02/24 01:49:12 russ Exp $
|
||||
# Russ Rew
|
||||
|
||||
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
|
||||
. ../test_common.sh
|
||||
|
||||
set -e
|
||||
echo ""
|
||||
|
||||
echo "*** Running benchmarking program tst_chunks3 for tiny test file"
|
||||
compress_level=1
|
||||
dim1=6
|
||||
@ -21,8 +19,6 @@ chunk2=3
|
||||
dim3=4
|
||||
chunk3=1
|
||||
${execdir}/tst_chunks3 $compress_level $dim1 $chunk1 $dim2 $chunk2 $dim3 $chunk3
|
||||
${NCDUMP} -n tst_chunks -s tst_chunks3.nc > tst_chunks3.cdl
|
||||
diff tst_chunks3.cdl ref_chunks1.cdl
|
||||
echo '*** SUCCESS!!!'
|
||||
|
||||
echo ""
|
||||
@ -38,8 +34,6 @@ cachesize=10000000
|
||||
cachehash=10000
|
||||
cachepre=0.0
|
||||
${execdir}/tst_chunks3 $compress_level $dim1 $chunk1 $dim2 $chunk2 $dim3 $chunk3 $cachesize $cachehash $cachepre
|
||||
${NCDUMP} -n tst_chunks -s -h tst_chunks3.nc > tst_chunks3.cdl
|
||||
diff tst_chunks3.cdl ref_chunks2.cdl
|
||||
echo '*** SUCCESS!!!'
|
||||
|
||||
exit 0
|
||||
|
@ -4,7 +4,7 @@ See COPYRIGHT file for copying and redistribution conditions.
|
||||
|
||||
This program tests netcdf-4 performance with some AR-4 3D data.
|
||||
|
||||
$Id: tst_ar4.c,v 1.4 2010/01/11 19:27:11 ed Exp $
|
||||
Ed Hartnett
|
||||
*/
|
||||
|
||||
#include <nc_tests.h>
|
||||
@ -13,7 +13,11 @@ $Id: tst_ar4.c,v 1.4 2010/01/11 19:27:11 ed Exp $
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
/* From the data file we are using:
|
||||
/* Prototype from tst_utils.c. */
|
||||
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
|
||||
struct timeval *y);
|
||||
|
||||
/* From the data file we are using:
|
||||
|
||||
netcdf pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12 {
|
||||
dimensions:
|
||||
@ -239,5 +243,5 @@ main(int argc, char **argv)
|
||||
else
|
||||
printf("%d\t\t%d\n", (int)read_1_us, (int)avg_read_us);
|
||||
|
||||
return 0;
|
||||
FINAL_RESULTS;
|
||||
}
|
||||
|
@ -1,10 +1,10 @@
|
||||
/*
|
||||
Copyright 2009, UCAR/Unidata
|
||||
Copyright 2009-2018, UCAR/Unidata
|
||||
See COPYRIGHT file for copying and redistribution conditions.
|
||||
|
||||
This program tests netcdf-4 performance with some AR-4 3D data.
|
||||
|
||||
$Id: tst_ar4_3d.c,v 1.1 2010/01/11 19:28:28 ed Exp $
|
||||
Ed Hartnett
|
||||
*/
|
||||
|
||||
#include <nc_tests.h>
|
||||
@ -22,7 +22,11 @@ $Id: tst_ar4_3d.c,v 1.1 2010/01/11 19:28:28 ed Exp $
|
||||
#define SIXTY_FOUR_MEG (SIXTEEN_MEG * 4)
|
||||
#define ONE_TWENTY_EIGHT_MEG (SIXTEEN_MEG * 8)
|
||||
|
||||
/* From the data file we are using:
|
||||
/* Prototype from tst_utils.c. */
|
||||
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
|
||||
struct timeval *y);
|
||||
|
||||
/* From the data file we are using:
|
||||
|
||||
netcdf pr_A1.20C3M_8.CCSM.atmm.1870-01_cat_1999-12 {
|
||||
dimensions:
|
||||
@ -247,5 +251,5 @@ main(int argc, char **argv)
|
||||
else
|
||||
printf("%d\t\t%d\n", (int)read_1_us, (int)avg_read_us);
|
||||
|
||||
return 0;
|
||||
FINAL_RESULTS;
|
||||
}
|
||||
|
@ -1,10 +1,9 @@
|
||||
/*
|
||||
Copyright 2009, UCAR/Unidata
|
||||
See COPYRIGHT file for copying and redistribution conditions.
|
||||
/* Copyright 2009-2018, UCAR/Unidata
|
||||
See COPYRIGHT file for copying and redistribution conditions.
|
||||
|
||||
This program tests netcdf-4 performance with some AR-4 3D data.
|
||||
This program tests netcdf-4 performance with some AR-4 4D data.
|
||||
|
||||
$Id: tst_ar4_4d.c,v 1.2 2010/01/14 20:25:55 ed Exp $
|
||||
Ed Hartnett
|
||||
*/
|
||||
|
||||
#include <nc_tests.h>
|
||||
@ -22,7 +21,11 @@ $Id: tst_ar4_4d.c,v 1.2 2010/01/14 20:25:55 ed Exp $
|
||||
#define SIXTY_FOUR_MEG (SIXTEEN_MEG * 4)
|
||||
#define ONE_TWENTY_EIGHT_MEG (SIXTEEN_MEG * 8)
|
||||
|
||||
/* From the data file we are using:
|
||||
/* Prototype from tst_utils.c. */
|
||||
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
|
||||
struct timeval *y);
|
||||
|
||||
/* From the data file we are using:
|
||||
|
||||
../ncdump/ncdump -h -s thetao_O1.SRESA1B_2.CCSM.ocnm.2000-01_cat_2099-12.nc
|
||||
netcdf thetao_O1.SRESA1B_2.CCSM.ocnm.2000-01_cat_2099-12 {
|
||||
@ -409,5 +412,5 @@ main(int argc, char **argv)
|
||||
else
|
||||
printf("%d\t\t%d\n", (int)read_1_us, (int)avg_read_us);
|
||||
|
||||
return 0;
|
||||
FINAL_RESULTS;
|
||||
}
|
||||
|
@ -1,99 +0,0 @@
|
||||
/* This is part of the netCDF package.
|
||||
Copyright 2005 University Corporation for Atmospheric Research/Unidata
|
||||
See COPYRIGHT file for conditions of use.
|
||||
|
||||
This program does some benchmarking of netCDF files for the AR-5
|
||||
data.
|
||||
*/
|
||||
|
||||
#include <nc_tests.h>
|
||||
#include "err_macros.h"
|
||||
#include "netcdf.h"
|
||||
#include <unistd.h>
|
||||
#include <time.h>
|
||||
#include <sys/time.h> /* Extra high precision time info. */
|
||||
#include <../ncdump/nciter.h>
|
||||
|
||||
#define MILLION 1000000
|
||||
#define MAX_LEN 30
|
||||
#define TMP_FILE_NAME "tst_files2_tmp.out"
|
||||
|
||||
/* This function uses the ps command to find the amount of memory in
|
||||
use by the process. From the ps man page:
|
||||
|
||||
size SZ approximate amount of swap space that would be required if
|
||||
the process were to dirty all writable pages and then be
|
||||
swapped out. This number is very rough!
|
||||
*/
|
||||
void
|
||||
get_mem_used1(int *mem_used)
|
||||
{
|
||||
char cmd[NC_MAX_NAME + 1];
|
||||
char blob[MAX_LEN + 1] = "";
|
||||
FILE *fp;
|
||||
int num_char;
|
||||
|
||||
/* Run the ps command for this process, putting output (one number)
|
||||
* into file TMP_FILE_NAME. */
|
||||
sprintf(cmd, "ps -o size= %d > %s", getpid(), TMP_FILE_NAME);
|
||||
system(cmd);
|
||||
|
||||
/* Read the results and delete temp file. */
|
||||
if (!(fp = fopen(TMP_FILE_NAME, "r"))) exit;
|
||||
num_char = fread(blob, MAX_LEN, 1, fp);
|
||||
sscanf(blob, "%d", mem_used);
|
||||
fclose(fp);
|
||||
unlink(TMP_FILE_NAME);
|
||||
}
|
||||
|
||||
int
|
||||
main(int argc, char **argv)
|
||||
{
|
||||
|
||||
#define BUFSIZE 1000000 /* access data in megabyte sized pieces */
|
||||
#define THETAU_FILE "/machine/downloads/AR5_sample_data/thetao_O1.SRESA1B_2.CCSM.ocnm.2000-01_cat_2099-12.nc"
|
||||
#define NDIMS_DATA 4
|
||||
printf("\n*** Running some AR-5 benchmarks.\n");
|
||||
printf("*** testing various chunksizes for thetau file...\n");
|
||||
{
|
||||
int ncid, ncid_out;
|
||||
/*char var_buf[BUFSIZE];*/ /* buffer for variable data */
|
||||
/* nciter_t iter; */ /* opaque structure for iteration status */
|
||||
/* size_t start[NDIMS_DATA];
|
||||
size_t count[NDIMS_DATA];*/
|
||||
/*float *data = (float *)var_buf; */
|
||||
char file_out[NC_MAX_NAME + 1];
|
||||
/*int ndims, nvars, natts, unlimdimid;*/
|
||||
size_t cs[NDIMS_DATA] = {120, 4, 40, 32};
|
||||
|
||||
/* /\* Open input. *\/ */
|
||||
/* if (nc_open(THETAU_FILE, NC_NOWRITE, &ncid)) ERR; */
|
||||
|
||||
/* /\* Create output file. *\/ */
|
||||
/* sprintf(file_out, "thetau_%d_%d_%d_%d.nc", (int)cs[0], */
|
||||
/* (int)cs[1], (int)cs[2], (int)cs[3]); */
|
||||
/* if (nc_create(file_out, NC_NOWRITE, &ncid_out)) ERR; */
|
||||
|
||||
/* /\* Copy the easy ones. *\/ */
|
||||
/* /\* if (nc_inq(ncid, &ndims, &nvars, &natts, &unlimdimid)) ERR; */
|
||||
/* if (ndims != 5 || nvars != 9 || natts != 8 || unlimdimid != 0) ERR;*\/ */
|
||||
|
||||
/* /\* /\\* Copy the main data payload with Russ's new nciters. *\\/ *\/ */
|
||||
/* /\* varid = 8; *\/ */
|
||||
/* /\* if (nc_get_iter(ncid, varid, BUFSIZE, &iter)) ERR; *\/ */
|
||||
/* /\* while((nvals = nc_next_iter(&iter, start, count)) > 0) *\/ */
|
||||
/* /\* { *\/ */
|
||||
/* /\* /\\* read in a block of data *\\/ *\/ */
|
||||
/* /\* if (nc_get_vara_double(ncid, varid, start, count, data)) ERR; *\/ */
|
||||
|
||||
/* /\* /\\* now write the changed data back out *\\/ *\/ */
|
||||
/* /\* if (nc_out_vara_double(ncid, varid, start, count, data)) ERR; *\/ */
|
||||
/* /\* } *\/ */
|
||||
/* /\* if (nvals < 0) ERR; *\/ */
|
||||
|
||||
/* if (nc_close(ncid)) ERR; */
|
||||
/* if (nc_close(ncid_out)) ERR; */
|
||||
}
|
||||
SUMMARIZE_ERR;
|
||||
FINAL_RESULTS;
|
||||
}
|
@ -1,3 +1,12 @@
|
||||
/* This is part of the netCDF package. Copyright 2005-2018 University
|
||||
Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
|
||||
conditions of use.
|
||||
|
||||
Runs benchmarks on different chunking sizes.
|
||||
|
||||
Russ Rew, Ed Hartnett, Dennis Heimbigner
|
||||
*/
|
||||
|
||||
#include <config.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@ -9,14 +18,14 @@
|
||||
#include <sys/types.h>
|
||||
#endif
|
||||
#ifdef HAVE_SYS_TIMES_H
|
||||
# include <sys/times.h>
|
||||
#include <sys/times.h>
|
||||
#endif
|
||||
#ifdef HAVE_SYS_TIME_H
|
||||
#include <sys/time.h>
|
||||
#endif
|
||||
#include <assert.h>
|
||||
#ifdef HAVE_SYS_RESOURCE_H
|
||||
# include <sys/resource.h>
|
||||
#include <sys/resource.h>
|
||||
#endif
|
||||
#include "nc_tests.h" /* The ERR macro is here... */
|
||||
#include "netcdf.h"
|
||||
@ -298,7 +307,7 @@ main(int argc, char *argv[]) {
|
||||
count[1] = dims[1];
|
||||
count[2] = dims[2];
|
||||
|
||||
sprintf(time_mess," contiguous write %3ld %3ld %3ld",
|
||||
sprintf(time_mess," contiguous write %3d %3ld %3ld",
|
||||
1, dims[1], dims[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[0]; i++) {
|
||||
@ -310,7 +319,7 @@ main(int argc, char *argv[]) {
|
||||
printf("\n");
|
||||
contig_time = TMsec;
|
||||
|
||||
sprintf(time_mess," chunked write %3ld %3ld %3ld %3ld %3ld %3ld",
|
||||
sprintf(time_mess," chunked write %3d %3ld %3ld %3ld %3ld %3ld",
|
||||
1, dims[1], dims[2], chunks[0], chunks[1], chunks[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[0]; i++) {
|
||||
@ -326,7 +335,7 @@ main(int argc, char *argv[]) {
|
||||
else
|
||||
printf(" %5.2g x slower\n", 1.0/ratio);
|
||||
|
||||
sprintf(time_mess," compressed write %3ld %3ld %3ld %3ld %3ld %3ld",
|
||||
sprintf(time_mess," compressed write %3d %3ld %3ld %3ld %3ld %3ld",
|
||||
1, dims[1], dims[2], chunks[0], chunks[1], chunks[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[0]; i++) {
|
||||
@ -351,7 +360,7 @@ main(int argc, char *argv[]) {
|
||||
count[1] = 1;
|
||||
count[2] = dims[2];
|
||||
|
||||
sprintf(time_mess," contiguous write %3ld %3ld %3ld",
|
||||
sprintf(time_mess," contiguous write %3ld %3d %3ld",
|
||||
dims[0], 1, dims[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[1]; i++) {
|
||||
@ -363,7 +372,7 @@ main(int argc, char *argv[]) {
|
||||
printf("\n");
|
||||
contig_time = TMsec;
|
||||
|
||||
sprintf(time_mess," chunked write %3ld %3ld %3ld %3ld %3ld %3ld",
|
||||
sprintf(time_mess," chunked write %3ld %3d %3ld %3ld %3ld %3ld",
|
||||
dims[0], 1, dims[2], chunks[0], chunks[1], chunks[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[1]; i++) {
|
||||
@ -379,7 +388,7 @@ main(int argc, char *argv[]) {
|
||||
else
|
||||
printf(" %5.2g x slower\n", 1.0/ratio);
|
||||
|
||||
sprintf(time_mess," compressed write %3ld %3ld %3ld %3ld %3ld %3ld",
|
||||
sprintf(time_mess," compressed write %3ld %3d %3ld %3ld %3ld %3ld",
|
||||
dims[0], 1, dims[2], chunks[0], chunks[1], chunks[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[1]; i++) {
|
||||
@ -404,7 +413,7 @@ main(int argc, char *argv[]) {
|
||||
count[1] = dims[1];
|
||||
count[2] = 1;
|
||||
|
||||
sprintf(time_mess," contiguous write %3ld %3ld %3ld",
|
||||
sprintf(time_mess," contiguous write %3ld %3ld %3d",
|
||||
dims[0], dims[1], 1);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[2]; i++) {
|
||||
@ -416,7 +425,7 @@ main(int argc, char *argv[]) {
|
||||
printf("\n");
|
||||
contig_time = TMsec;
|
||||
|
||||
sprintf(time_mess," chunked write %3ld %3ld %3ld %3ld %3ld %3ld",
|
||||
sprintf(time_mess," chunked write %3ld %3ld %3d %3ld %3ld %3ld",
|
||||
dims[0], dims[1], 1, chunks[0], chunks[1], chunks[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[2]; i++) {
|
||||
@ -432,7 +441,7 @@ main(int argc, char *argv[]) {
|
||||
else
|
||||
printf(" %5.2g x slower\n", 1.0/ratio);
|
||||
|
||||
sprintf(time_mess," compressed write %3ld %3ld %3ld %3ld %3ld %3ld",
|
||||
sprintf(time_mess," compressed write %3ld %3ld %3d %3ld %3ld %3ld",
|
||||
dims[0], dims[1], 1, chunks[0], chunks[1], chunks[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[2]; i++) {
|
||||
@ -457,7 +466,7 @@ main(int argc, char *argv[]) {
|
||||
count[1] = dims[1];
|
||||
count[2] = dims[2];
|
||||
|
||||
sprintf(time_mess," contiguous read %3ld %3ld %3ld",
|
||||
sprintf(time_mess," contiguous read %3d %3ld %3ld",
|
||||
1, dims[1], dims[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[0]; i++) {
|
||||
@ -469,7 +478,7 @@ main(int argc, char *argv[]) {
|
||||
printf("\n");
|
||||
contig_time = TMsec;
|
||||
|
||||
sprintf(time_mess," chunked read %3ld %3ld %3ld %3ld %3ld %3ld",
|
||||
sprintf(time_mess," chunked read %3d %3ld %3ld %3ld %3ld %3ld",
|
||||
1, dims[1], dims[2] , chunks[0], chunks[1], chunks[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[0]; i++) {
|
||||
@ -485,7 +494,7 @@ main(int argc, char *argv[]) {
|
||||
else
|
||||
printf(" %5.2g x slower\n", 1.0/ratio);
|
||||
|
||||
sprintf(time_mess," compressed read %3ld %3ld %3ld %3ld %3ld %3ld",
|
||||
sprintf(time_mess," compressed read %3d %3ld %3ld %3ld %3ld %3ld",
|
||||
1, dims[1], dims[2] , chunks[0], chunks[1], chunks[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[0]; i++) {
|
||||
@ -510,7 +519,7 @@ main(int argc, char *argv[]) {
|
||||
count[1] = 1;
|
||||
count[2] = dims[2];
|
||||
|
||||
sprintf(time_mess," contiguous read %3ld %3ld %3ld",
|
||||
sprintf(time_mess," contiguous read %3ld %3d %3ld",
|
||||
dims[0], 1, dims[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[1]; i++) {
|
||||
@ -522,7 +531,7 @@ main(int argc, char *argv[]) {
|
||||
printf("\n");
|
||||
contig_time = TMsec;
|
||||
|
||||
sprintf(time_mess," chunked read %3ld %3ld %3ld %3ld %3ld %3ld",
|
||||
sprintf(time_mess," chunked read %3ld %3d %3ld %3ld %3ld %3ld",
|
||||
dims[0], 1, dims[2], chunks[0], chunks[1], chunks[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[1]; i++) {
|
||||
@ -538,7 +547,7 @@ main(int argc, char *argv[]) {
|
||||
else
|
||||
printf(" %5.2g x slower\n", 1.0/ratio);
|
||||
|
||||
sprintf(time_mess," compressed read %3ld %3ld %3ld %3ld %3ld %3ld",
|
||||
sprintf(time_mess," compressed read %3ld %3d %3ld %3ld %3ld %3ld",
|
||||
dims[0], 1, dims[2], chunks[0], chunks[1], chunks[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[1]; i++) {
|
||||
@ -563,7 +572,7 @@ main(int argc, char *argv[]) {
|
||||
count[1] = dims[1];
|
||||
count[2] = 1;
|
||||
|
||||
sprintf(time_mess," contiguous read %3ld %3ld %3ld",
|
||||
sprintf(time_mess," contiguous read %3ld %3ld %3d",
|
||||
dims[0], dims[1], 1);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[2]; i++) {
|
||||
@ -575,7 +584,7 @@ main(int argc, char *argv[]) {
|
||||
printf("\n");
|
||||
contig_time = TMsec;
|
||||
|
||||
sprintf(time_mess," chunked read %3ld %3ld %3ld %3ld %3ld %3ld",
|
||||
sprintf(time_mess," chunked read %3ld %3ld %3d %3ld %3ld %3ld",
|
||||
dims[0], dims[1], 1, chunks[0], chunks[1], chunks[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[2]; i++) {
|
||||
@ -591,7 +600,7 @@ main(int argc, char *argv[]) {
|
||||
else
|
||||
printf(" %5.2g x slower\n", 1.0/ratio);
|
||||
|
||||
sprintf(time_mess," compressed read %3ld %3ld %3ld %3ld %3ld %3ld",
|
||||
sprintf(time_mess," compressed read %3ld %3ld %3d %3ld %3ld %3ld",
|
||||
dims[0], dims[1], 1, chunks[0], chunks[1], chunks[2]);
|
||||
TIMING_START ;
|
||||
for(i = 0; i < dims[2]; i++) {
|
||||
|
@ -1,10 +1,10 @@
|
||||
/*
|
||||
Copyright 2007, UCAR/Unidata
|
||||
Copyright 2007-2018, UCAR/Unidata
|
||||
See COPYRIGHT file for copying and redistribution conditions.
|
||||
|
||||
This program creates a test file.
|
||||
|
||||
$Id: tst_create_files.c,v 1.11 2008/01/09 16:30:23 ed Exp $
|
||||
Ed Hartnett
|
||||
*/
|
||||
#include <config.h>
|
||||
#include <nc_tests.h>
|
||||
@ -21,9 +21,6 @@
|
||||
int
|
||||
main(int argc, char **argv)
|
||||
{
|
||||
int nc_argc = argc;
|
||||
int nc_argv = argv;
|
||||
|
||||
printf("\n*** Create some files for testing benchmarks.\n");
|
||||
|
||||
#ifdef LARGE_FILE_TESTS
|
||||
|
@ -1,14 +1,15 @@
|
||||
/* This is part of the netCDF package.
|
||||
Copyright 2005 University Corporation for Atmospheric Research/Unidata
|
||||
See COPYRIGHT file for conditions of use.
|
||||
/* This is part of the netCDF package. Copyright 2005-2018 University
|
||||
Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
|
||||
conditions of use.
|
||||
|
||||
Test netcdf-4 variables.
|
||||
$Id: tst_files2.c,v 1.11 2010/01/31 19:00:44 ed Exp $
|
||||
This is a benchmark test which times how long it takes to create
|
||||
some files.
|
||||
|
||||
Ed Hartnett
|
||||
*/
|
||||
|
||||
#include <nc_tests.h>
|
||||
#include "err_macros.h"
|
||||
#include "netcdf.h"
|
||||
#include <unistd.h>
|
||||
#include <time.h>
|
||||
#include <sys/time.h> /* Extra high precision time info. */
|
||||
@ -20,12 +21,16 @@
|
||||
|
||||
void *last_sbrk;
|
||||
|
||||
/* This function uses the ps command to find the amount of memory in
|
||||
use by the process. From the ps man page:
|
||||
/* Prototype from tst_utils.c. */
|
||||
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
|
||||
struct timeval *y);
|
||||
|
||||
size SZ approximate amount of swap space that would be required if
|
||||
the process were to dirty all writable pages and then be
|
||||
swapped out. This number is very rough!
|
||||
/* This function uses the ps command to find the amount of memory in
|
||||
use by the process. From the ps man page:
|
||||
|
||||
size SZ approximate amount of swap space that would be required if
|
||||
the process were to dirty all writable pages and then be
|
||||
swapped out. This number is very rough!
|
||||
*/
|
||||
int
|
||||
get_mem_used1(int *mem_used)
|
||||
@ -33,7 +38,6 @@ get_mem_used1(int *mem_used)
|
||||
char cmd[NC_MAX_NAME + 1];
|
||||
char blob[MAX_LEN + 1] = "";
|
||||
FILE *fp;
|
||||
int num_char;
|
||||
|
||||
/* Run the ps command for this process, putting output (one number)
|
||||
* into file TMP_FILE_NAME. */
|
||||
@ -42,7 +46,7 @@ get_mem_used1(int *mem_used)
|
||||
|
||||
/* Read the results and delete temp file. */
|
||||
if (!(fp = fopen(TMP_FILE_NAME, "r"))) ERR;
|
||||
num_char = fread(blob, MAX_LEN, 1, fp);
|
||||
fread(blob, MAX_LEN, 1, fp);
|
||||
sscanf(blob, "%d", mem_used);
|
||||
fclose(fp);
|
||||
unlink(TMP_FILE_NAME);
|
||||
@ -71,7 +75,7 @@ get_mem_used2(int *mem_used)
|
||||
}
|
||||
else
|
||||
*mem_used = -1;
|
||||
fclose(pf);
|
||||
fclose(pf);
|
||||
}
|
||||
|
||||
void
|
||||
@ -95,16 +99,12 @@ create_sample_file(char *file_name, int ndims, int *dim_len,
|
||||
float *data_out;
|
||||
size_t start[MAX_DIMS], count[MAX_DIMS];
|
||||
int slab_nelems;
|
||||
int i, d, ret;
|
||||
int i, d;
|
||||
|
||||
if (ndims != MAX_DIMS && ndims != MAX_DIMS - 1) ERR_RET;
|
||||
|
||||
/* Create a file. */
|
||||
ret = nc_create(file_name, NC_NOCLOBBER|mode, &ncid);
|
||||
if (ret == NC_EEXIST)
|
||||
return NC_NOERR;
|
||||
else if (ret)
|
||||
ERR_RET;
|
||||
if (nc_create(file_name, NC_CLOBBER|mode, &ncid)) ERR_RET;
|
||||
|
||||
/* Initialize sample data. Slab of data will be full extent of last
|
||||
* two dimensions. */
|
||||
@ -181,8 +181,8 @@ main(int argc, char **argv)
|
||||
#define NUM_TRIES 6
|
||||
int *ncid_in;
|
||||
int mem_used, mem_used2;
|
||||
int mem_per_file;
|
||||
int num_files[NUM_TRIES] = {1, 1, 1, 1, 1, 1};
|
||||
/* int mem_per_file; */
|
||||
int num_files[NUM_TRIES] = {1, 5, 10, 20, 35, 50};
|
||||
char file_name[NUM_TRIES][NC_MAX_NAME + 1];
|
||||
int num_vars[NUM_TRIES];
|
||||
size_t cache_size[NUM_TRIES];
|
||||
@ -190,10 +190,10 @@ main(int argc, char **argv)
|
||||
char mode_name[NUM_TRIES][8];
|
||||
int ndims[NUM_TRIES];
|
||||
int dim_len[NUM_TRIES][MAX_DIMS];
|
||||
int dim_4d[MAX_DIMS] = {NC_UNLIMITED, 10, 1000, 1000};
|
||||
int dim_4d[MAX_DIMS] = {NC_UNLIMITED, 10, 100, 100};
|
||||
char dimstr[30];
|
||||
char chunkstr[30];
|
||||
int num_recs[NUM_TRIES] = {1, 1, 1};
|
||||
int num_recs[NUM_TRIES] = {1, 1, 1, 1, 1, 1};
|
||||
struct timeval start_time, end_time, diff_time;
|
||||
struct timeval close_start_time, close_end_time, close_diff_time;
|
||||
int open_us, close_us, create_us;
|
||||
@ -202,11 +202,9 @@ main(int argc, char **argv)
|
||||
int d, f, t;
|
||||
|
||||
printf("dims\t\tchunks\t\tformat\tnum_files\tcache(kb)\tnum_vars\tmem(kb)\t"
|
||||
"open_time(us)\tclose_time(us)\tcreate_time(us)\n");
|
||||
"open_time(us/file)\tclose_time(us/file)\tcreate_time(us/file)\n");
|
||||
for (t = 0; t < NUM_TRIES; t++)
|
||||
{
|
||||
/* Set up filename. */
|
||||
sprintf(file_name[t], "tst_files2_%d.nc", t);
|
||||
strcpy(mode_name[t], "netcdf4");
|
||||
mode[t] = NC_NETCDF4;
|
||||
cache_size[t] = 16000000;
|
||||
@ -215,88 +213,92 @@ main(int argc, char **argv)
|
||||
for (d = 0; d < ndims[t]; d++)
|
||||
dim_len[t][d] = dim_4d[d];
|
||||
|
||||
/* Create sample file (unless it already exists). */
|
||||
if (gettimeofday(&start_time, NULL)) ERR;
|
||||
if (create_sample_file(file_name[t], ndims[t], dim_len[t], num_vars[t],
|
||||
mode[t], num_recs[t])) ERR;
|
||||
|
||||
/* How long did it take? */
|
||||
if (gettimeofday(&end_time, NULL)) ERR;
|
||||
if (nc4_timeval_subtract(&diff_time, &end_time, &start_time)) ERR;
|
||||
create_us = ((int)diff_time.tv_sec * MILLION + (int)diff_time.tv_usec);
|
||||
|
||||
/* Change the cache settings. */
|
||||
if (nc_set_chunk_cache(cache_size[t], 20000, .75)) ERR;
|
||||
|
||||
/* We need storage for an array of ncids. */
|
||||
if (!(ncid_in = malloc(num_files[t] * sizeof(int)))) ERR;
|
||||
|
||||
/* How much memory is in use now? */
|
||||
if (get_mem_used1(&mem_used)) ERR;
|
||||
/* get_mem_used2(&mem_used);
|
||||
get_mem_used3(&mem_used);*/
|
||||
|
||||
/* Open the first file to get chunksizes. */
|
||||
if (gettimeofday(&start_time, NULL)) ERR;
|
||||
if (nc_open(file_name[t], 0, &ncid_in[0])) ERR;
|
||||
if (nc_inq_var_chunking(ncid_in[0], 0, &storage, chunksize)) ERR;
|
||||
|
||||
/* Now reopen this file a large number of times. */
|
||||
for (f = 1; f < num_files[t]; f++)
|
||||
if (nc_open(file_name[t], 0, &ncid_in[f])) ERR_RET;
|
||||
|
||||
/* How long did it take per file? */
|
||||
if (gettimeofday(&end_time, NULL)) ERR;
|
||||
if (nc4_timeval_subtract(&diff_time, &end_time, &start_time)) ERR;
|
||||
open_us = ((int)diff_time.tv_sec * MILLION + (int)diff_time.tv_usec);
|
||||
|
||||
/* How much memory is in use by this process now? */
|
||||
if (get_mem_used1(&mem_used2)) ERR;
|
||||
|
||||
/* Close all netcdf files. */
|
||||
if (gettimeofday(&close_start_time, NULL)) ERR;
|
||||
/* Create sample files. */
|
||||
if (gettimeofday(&start_time, NULL)) ERR;
|
||||
for (f = 0; f < num_files[t]; f++)
|
||||
if (nc_close(ncid_in[f])) ERR_RET;
|
||||
{
|
||||
/* Set up filename. */
|
||||
sprintf(file_name[t], "tst_files2_%d_%d.nc", t, f);
|
||||
if (create_sample_file(file_name[t], ndims[t], dim_len[t], num_vars[t],
|
||||
mode[t], num_recs[t])) ERR;
|
||||
|
||||
/* How long did it take to close all files? */
|
||||
if (gettimeofday(&close_end_time, NULL)) ERR;
|
||||
if (nc4_timeval_subtract(&close_diff_time, &close_end_time, &close_start_time)) ERR;
|
||||
close_us = ((int)close_diff_time.tv_sec * MILLION + (int)close_diff_time.tv_usec);
|
||||
/* How long did it take? */
|
||||
if (gettimeofday(&end_time, NULL)) ERR;
|
||||
if (nc4_timeval_subtract(&diff_time, &end_time, &start_time)) ERR;
|
||||
create_us = ((int)diff_time.tv_sec * MILLION + (int)diff_time.tv_usec) / num_files[t];
|
||||
}
|
||||
|
||||
/* We're done with this. */
|
||||
free(ncid_in);
|
||||
/* /\* Change the cache settings. *\/ */
|
||||
/* if (nc_set_chunk_cache(cache_size[t], 20000, .75)) ERR; */
|
||||
|
||||
/* How much memory was used for each open file? */
|
||||
mem_per_file = mem_used2/num_files[t];
|
||||
/* We need storage for an array of ncids. */
|
||||
if (!(ncid_in = malloc(num_files[t] * sizeof(int)))) ERR;
|
||||
|
||||
/* Prepare the dimensions string. */
|
||||
if (ndims[t] == MAX_DIMS)
|
||||
sprintf(dimstr, "%dx%dx%dx%d", dim_len[t][0], dim_len[t][1],
|
||||
dim_len[t][2], dim_len[t][3]);
|
||||
else
|
||||
sprintf(dimstr, "%dx%dx%d", dim_len[t][0], dim_len[t][1],
|
||||
dim_len[t][2]);
|
||||
/* How much memory is in use now? */
|
||||
if (get_mem_used1(&mem_used)) ERR;
|
||||
|
||||
/* Prepare the chunksize string. */
|
||||
if (storage == NC_CHUNKED)
|
||||
{
|
||||
if (ndims[t] == MAX_DIMS)
|
||||
sprintf(chunkstr, "%dx%dx%dx%d", (int)chunksize[0], (int)chunksize[1],
|
||||
(int)chunksize[2], (int)chunksize[3]);
|
||||
else
|
||||
sprintf(chunkstr, "%dx%dx%d", (int)chunksize[0], (int)chunksize[1],
|
||||
(int)chunksize[2]);
|
||||
}
|
||||
else
|
||||
strcpy(chunkstr, "contig ");
|
||||
/* Open the first file to get chunksizes. */
|
||||
if (gettimeofday(&start_time, NULL)) ERR;
|
||||
if (nc_open(file_name[t], 0, &ncid_in[0])) ERR;
|
||||
if (nc_inq_var_chunking(ncid_in[0], 0, &storage, chunksize)) ERR;
|
||||
|
||||
/* Output results. */
|
||||
printf("%s\t%s\t%s\t%d\t\t%d\t\t%d\t\t%d\t\t%d\t\t%d\t\t%d\n",
|
||||
dimstr, chunkstr, mode_name[t], num_files[t], (int)(cache_size[t]/1024),
|
||||
num_vars[t], mem_used2, open_us, close_us, create_us);
|
||||
/* Now reopen this file a large number of times. */
|
||||
for (f = 1; f < num_files[t]; f++)
|
||||
if (nc_open(file_name[t], 0, &ncid_in[f])) ERR_RET;
|
||||
|
||||
/* How long did it take per file? */
|
||||
if (gettimeofday(&end_time, NULL)) ERR;
|
||||
if (nc4_timeval_subtract(&diff_time, &end_time, &start_time)) ERR;
|
||||
open_us = ((int)diff_time.tv_sec * MILLION + (int)diff_time.tv_usec) / num_files[t];
|
||||
|
||||
/* How much memory is in use by this process now? */
|
||||
if (get_mem_used1(&mem_used2)) ERR;
|
||||
|
||||
/* Close all netcdf files. */
|
||||
if (gettimeofday(&close_start_time, NULL)) ERR;
|
||||
for (f = 0; f < num_files[t]; f++)
|
||||
if (nc_close(ncid_in[f])) ERR_RET;
|
||||
|
||||
/* How long did it take to close all files? */
|
||||
if (gettimeofday(&close_end_time, NULL)) ERR;
|
||||
if (nc4_timeval_subtract(&close_diff_time, &close_end_time, &close_start_time)) ERR;
|
||||
close_us = ((int)close_diff_time.tv_sec * MILLION +
|
||||
(int)close_diff_time.tv_usec) / num_files[t];
|
||||
|
||||
/* We're done with this. */
|
||||
free(ncid_in);
|
||||
|
||||
/* How much memory was used for each open file? */
|
||||
/* mem_per_file = mem_used2/num_files[t]; */
|
||||
|
||||
/* Prepare the dimensions string. */
|
||||
if (ndims[t] == MAX_DIMS)
|
||||
sprintf(dimstr, "%dx%dx%dx%d", dim_len[t][0], dim_len[t][1],
|
||||
dim_len[t][2], dim_len[t][3]);
|
||||
else
|
||||
sprintf(dimstr, "%dx%dx%d", dim_len[t][0], dim_len[t][1],
|
||||
dim_len[t][2]);
|
||||
|
||||
/* Prepare the chunksize string. */
|
||||
if (storage == NC_CHUNKED)
|
||||
{
|
||||
if (ndims[t] == MAX_DIMS)
|
||||
sprintf(chunkstr, "%dx%dx%dx%d", (int)chunksize[0], (int)chunksize[1],
|
||||
(int)chunksize[2], (int)chunksize[3]);
|
||||
else
|
||||
sprintf(chunkstr, "%dx%dx%d", (int)chunksize[0], (int)chunksize[1],
|
||||
(int)chunksize[2]);
|
||||
}
|
||||
else
|
||||
strcpy(chunkstr, "contig ");
|
||||
|
||||
/* Output results. */
|
||||
printf("%s\t%s\t%s\t%d\t\t%d\t\t%d\t\t%d\t\t%d\t\t%d\t\t%d\n",
|
||||
dimstr, chunkstr, mode_name[t], num_files[t], (int)(cache_size[t]/1024),
|
||||
num_vars[t], mem_used2, open_us, close_us, create_us);
|
||||
}
|
||||
}
|
||||
SUMMARIZE_ERR;
|
||||
SUMMARIZE_ERR;
|
||||
printf("Test for memory consumption...\n");
|
||||
{
|
||||
#define NUM_TRIES_100 100
|
||||
|
@ -1,21 +1,19 @@
|
||||
/* This is part of the netCDF package.
|
||||
Copyright 2005 University Corporation for Atmospheric Research/Unidata
|
||||
See COPYRIGHT file for conditions of use.
|
||||
/* This is part of the netCDF package. Copyright 2005-2018 University
|
||||
Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
|
||||
conditions of use.
|
||||
|
||||
Test internal netcdf-4 file code.
|
||||
$Id: tst_files3.c,v 1.5 2010/02/02 17:19:28 ed Exp $
|
||||
This is a benchmark program which tests file writes with compressed
|
||||
data.
|
||||
|
||||
Ed Hartnett
|
||||
*/
|
||||
|
||||
#include <config.h>
|
||||
#include <stdio.h>
|
||||
#include <nc_tests.h>
|
||||
#include "err_macros.h"
|
||||
#include "netcdf.h"
|
||||
#include <hdf5.h>
|
||||
#include <unistd.h>
|
||||
#include <time.h>
|
||||
#include <sys/time.h> /* Extra high precision time info. */
|
||||
#include <string.h>
|
||||
|
||||
#define NDIMS1 1
|
||||
#define NDIMS 3
|
||||
@ -25,14 +23,15 @@
|
||||
#define Z_LEN 128
|
||||
#define NUM_TRIES 200
|
||||
|
||||
/* Prototype from tst_utils.c. */
|
||||
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
|
||||
struct timeval *y);
|
||||
|
||||
int dump_file2(const float *data, int docompression, int usedefdim)
|
||||
{
|
||||
int ncmode, ncid, dimids[NDIMS], var;
|
||||
int ncid, dimids[NDIMS], var;
|
||||
size_t start[NDIMS] = {0, 0, 0};
|
||||
size_t count[NDIMS] = {1, 1, Z_LEN};
|
||||
/* size_t count[NDIMS] = {X_LEN, Y_LEN, Z_LEN};*/
|
||||
|
||||
ncmode = NC_CLOBBER|NC_NETCDF4;
|
||||
|
||||
if (nc_create(FILE_NAME, NC_NETCDF4, &ncid)) ERR_RET;
|
||||
if (nc_def_dim(ncid, "time", X_LEN, &dimids[0])) ERR_RET;
|
||||
@ -96,9 +95,8 @@ int dump_file3(const float *data, int docompression, int usedefdim)
|
||||
|
||||
int dump_hdf_file(const float *data, int docompression)
|
||||
{
|
||||
hid_t file_id, dataset_id, dataspace_id, propid;
|
||||
hid_t file_id, dataset_id, propid;
|
||||
hid_t file_spaceid, mem_spaceid, access_plistid, xfer_plistid;
|
||||
herr_t status;
|
||||
hsize_t dims[NDIMS] = {X_LEN, Y_LEN, Z_LEN};
|
||||
hsize_t start[NDIMS] = {0, 0, 0};
|
||||
hsize_t count[NDIMS] = {1, 1, Z_LEN};
|
||||
|
@ -6,7 +6,9 @@
|
||||
but they use HDF5 the same way that netCDF-4 does, so if these
|
||||
tests don't work, than netCDF-4 won't work either.
|
||||
|
||||
Ed Hartnett
|
||||
*/
|
||||
|
||||
#include <config.h>
|
||||
#include <nc_tests.h>
|
||||
#include "err_macros.h"
|
||||
@ -19,6 +21,10 @@
|
||||
#define FILE_NAME "tst_h_many_atts.h5"
|
||||
#define GRP_NAME "group1"
|
||||
|
||||
/* Prototype from tst_utils.c. */
|
||||
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
|
||||
struct timeval *y);
|
||||
|
||||
int
|
||||
main()
|
||||
{
|
||||
|
@ -1,9 +1,12 @@
|
||||
/** \file
|
||||
/* This is part of the netCDF package. Copyright 2005-2018 University
|
||||
Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
|
||||
conditions of use.
|
||||
|
||||
Performance test from KNMI.
|
||||
This is a benchmarking program that depends on some KNMI files from
|
||||
the Unidata ftp site. The files are opened and read, and
|
||||
performance is timed.
|
||||
|
||||
Copyright 2009, UCAR/Unidata. See \ref copyright file for copying and
|
||||
redistribution conditions.
|
||||
Ed Hartnett
|
||||
*/
|
||||
|
||||
#include <nc_tests.h>
|
||||
@ -28,6 +31,10 @@ redistribution conditions.
|
||||
#define TIME_LEN 1560
|
||||
#define NUM_TS 1
|
||||
|
||||
/* Prototype from tst_utils.c. */
|
||||
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
|
||||
struct timeval *y);
|
||||
|
||||
extern const char* nc_strerror(int ncerr);
|
||||
static int
|
||||
complain(int stat)
|
||||
@ -77,24 +84,9 @@ read_file(char *filename)
|
||||
int
|
||||
main(int argc, char **argv)
|
||||
{
|
||||
int c, header = 0, verbose = 0, timeseries = 0;
|
||||
int ncid, varid, storage;
|
||||
char name_in[NC_MAX_NAME + 1];
|
||||
size_t len;
|
||||
size_t cs[NDIMS3] = {0, 0, 0};
|
||||
int cache = MEGABYTE;
|
||||
int ndims, dimid[NDIMS3];
|
||||
float hor_data[LAT_LEN * LON_LEN];
|
||||
int read_1_us, avg_read_us;
|
||||
float ts_data[TIME_LEN];
|
||||
size_t start[NDIMS3], count[NDIMS3];
|
||||
int deflate, shuffle, deflate_level;
|
||||
struct timeval start_time, end_time, diff_time;
|
||||
|
||||
printf("\n*** Testing netcdf-4 vs. netcdf-3 performance.\n");
|
||||
if (complain(read_file(FILE_NAME_1))) ERR;
|
||||
if (complain(read_file(FILE_NAME_2))) ERR;
|
||||
|
||||
SUMMARIZE_ERR;
|
||||
FINAL_RESULTS;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user