mirror of
https://github.com/Unidata/netcdf-c.git
synced 2024-12-09 08:11:38 +08:00
36102e3c32
re: Issue https://github.com/Unidata/netcdf-c/issues/2190 The primary purpose of this PR is to improve the utf8 support for windows. This is persuant to a change in Windows that supports utf8 natively (almost). The almost means that it is still utf16 internally and the set of characters representable by utf8 is larger than those representable by utf16. This leaves open the question in the Issue about handling the Windows 1252 character set. This required the following changes: 1. Test the Windows build and major version in order to see if native utf8 is supported. 2. If native utf8 is supported, Modify dpathmgr.c to call the 8-bit version of the windows fopen() and open() functions. 3. In support of this, programs that use XGetOpt (Windows versions) need to get the command line as utf8 and then parse to arc+argv as utf8. This requires using a homegrown command line parser named XCommandLineToArgvA. 4. Add a utility program called "acpget" that prints out the current Windows code page and locale. Additionally, some technical debt was cleaned up as follows: 1. Unify all the places which attempt to read all or a part of a file into the dutil.c#NC_readfile code. 2. Similary unify all the code that creates temp files into dutil.c#NC_mktmp code. 3. Convert almost all remaining calls to fopen() and open() to NCfopen() and NCopen3(). This is to ensure that path management is used consistently. This touches a number of files. 4. extern->EXTERNL as needed to get it to work under Windows.
233 lines
7.2 KiB
C
233 lines
7.2 KiB
C
/* This is part of the netCDF package. Copyright 2005-2018 University
|
|
Corporation for Atmospheric Research/Unidata See COPYRIGHT file for
|
|
conditions of use.
|
|
|
|
This is a benchmark program which tests file writes with compressed
|
|
data.
|
|
|
|
Ed Hartnett
|
|
*/
|
|
|
|
#include <nc_tests.h>
|
|
#include "err_macros.h"
|
|
#include <hdf5.h>
|
|
#include <unistd.h>
|
|
#include <time.h>
|
|
#include <sys/time.h> /* Extra high precision time info. */
|
|
|
|
#define NDIMS1 1
|
|
#define NDIMS 3
|
|
#define FILE_NAME "tst_files3.nc"
|
|
#define X_LEN 120
|
|
#define Y_LEN 64
|
|
#define Z_LEN 128
|
|
#define NUM_TRIES 200
|
|
|
|
/* Prototype from tst_utils.c. */
|
|
int nc4_timeval_subtract(struct timeval *result, struct timeval *x,
|
|
struct timeval *y);
|
|
|
|
int dump_file2(const float *data, int docompression, int usedefdim)
|
|
{
|
|
int ncid, dimids[NDIMS], var;
|
|
size_t start[NDIMS] = {0, 0, 0};
|
|
size_t count[NDIMS] = {1, 1, Z_LEN};
|
|
|
|
if (nc_create(FILE_NAME, NC_NETCDF4, &ncid)) ERR_RET;
|
|
if (nc_def_dim(ncid, "time", X_LEN, &dimids[0])) ERR_RET;
|
|
if (nc_def_dim(ncid, "lat", Y_LEN, &dimids[1])) ERR_RET;
|
|
if (nc_def_dim(ncid, "lon", Z_LEN, &dimids[2])) ERR_RET;
|
|
if (nc_def_var(ncid, "test", NC_FLOAT, NDIMS, dimids, &var)) ERR_RET;
|
|
if (docompression)
|
|
if (nc_def_var_deflate(ncid, var, 1, 1, 1)) ERR_RET;
|
|
if (nc_enddef(ncid)) ERR_RET;
|
|
for (start[0] = 0; start[0] < X_LEN; start[0]++)
|
|
for (start[1] = 0; start[1] < Y_LEN; start[1]++)
|
|
if (nc_put_vara_float(ncid, var, start, count, data)) ERR_RET;
|
|
if (nc_close(ncid)) ERR_RET;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int dump_file(const float *data, int docompression, int usedefdim)
|
|
{
|
|
int ncmode, ncid, dimids[NDIMS], var;
|
|
size_t start[NDIMS] = {0, 0, 0}, count[NDIMS] = {X_LEN, Y_LEN, Z_LEN};
|
|
ptrdiff_t stride[NDIMS] = {1, 1, 1};
|
|
|
|
ncmode = NC_CLOBBER|NC_NETCDF4;
|
|
|
|
if (nc_create(FILE_NAME, ncmode, &ncid)) ERR_RET;
|
|
if (nc_def_dim(ncid, "time", X_LEN, &dimids[0])) ERR_RET;
|
|
if (nc_def_dim(ncid, "lat", Y_LEN, &dimids[1])) ERR_RET;
|
|
if (nc_def_dim(ncid, "lon", Z_LEN, &dimids[2])) ERR_RET;
|
|
if (nc_def_var(ncid, "test", NC_FLOAT, NDIMS, dimids, &var)) ERR_RET;
|
|
if (docompression)
|
|
if (nc_def_var_deflate(ncid, var, 1, 1, 1)) ERR_RET;
|
|
if (nc_enddef(ncid)) ERR_RET;
|
|
if (nc_put_vars_float(ncid, var, start, count, stride, data)) ERR_RET;
|
|
if (nc_close(ncid)) ERR_RET;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int dump_file3(const float *data, int docompression, int usedefdim)
|
|
{
|
|
int ncmode, ncid, dimids[NDIMS], var;
|
|
size_t start[NDIMS] = {0, 0, 0}, count[NDIMS] = {X_LEN, Y_LEN, Z_LEN};
|
|
ptrdiff_t stride[NDIMS] = {1, 1, 1};
|
|
|
|
ncmode = NC_CLOBBER|NC_NETCDF4;
|
|
|
|
if (nc_create(FILE_NAME, ncmode, &ncid)) ERR_RET;
|
|
if (nc_def_dim(ncid, "time", X_LEN, &dimids[0])) ERR_RET;
|
|
if (nc_def_dim(ncid, "lat", Y_LEN, &dimids[1])) ERR_RET;
|
|
if (nc_def_dim(ncid, "lon", Z_LEN, &dimids[2])) ERR_RET;
|
|
if (nc_def_var(ncid, "test", NC_FLOAT, NDIMS, dimids, &var)) ERR_RET;
|
|
if (docompression)
|
|
if (nc_def_var_deflate(ncid, var, 1, 1, 1)) ERR_RET;
|
|
if (nc_enddef(ncid)) ERR_RET;
|
|
if (nc_put_vars_float(ncid, var, start, count, stride, data)) ERR_RET;
|
|
if (nc_close(ncid)) ERR_RET;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int dump_hdf_file(const float *data, int docompression)
|
|
{
|
|
hid_t file_id, dataset_id, propid;
|
|
hid_t file_spaceid, mem_spaceid, access_plistid, xfer_plistid;
|
|
hsize_t dims[NDIMS] = {X_LEN, Y_LEN, Z_LEN};
|
|
hsize_t start[NDIMS] = {0, 0, 0};
|
|
hsize_t count[NDIMS] = {1, 1, Z_LEN};
|
|
|
|
/* create file */
|
|
file_id = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC,
|
|
H5P_DEFAULT, H5P_DEFAULT);
|
|
|
|
/* create property for dataset */
|
|
propid = H5Pcreate(H5P_DATASET_CREATE);
|
|
|
|
if (docompression)
|
|
{
|
|
if (H5Pset_layout(propid, H5D_CHUNKED) < 0) ERR;
|
|
if (H5Pset_chunk(propid, NDIMS, dims) < 0) ERR;
|
|
/* values[0]=9; */
|
|
/* status = H5Pset_filter(propid, H5Z_FILTER_DEFLATE,0,1,&values[0]); */
|
|
/* printf("deflat estatus is: %i\n",status); */
|
|
/* sets defalte level */
|
|
if (H5Pset_deflate(propid, 1)) ERR;
|
|
}
|
|
if ((file_spaceid = H5Screate_simple(NDIMS, dims, dims)) < 0) ERR;
|
|
|
|
/* Set up the cache. */
|
|
if ((access_plistid = H5Pcreate(H5P_DATASET_ACCESS)) < 0) ERR;
|
|
if (H5Pset_chunk_cache(access_plistid, CHUNK_CACHE_NELEMS,
|
|
CHUNK_CACHE_SIZE, CHUNK_CACHE_PREEMPTION) < 0) ERR;
|
|
|
|
/* Create the dataset. */
|
|
if ((dataset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_FLOAT, file_spaceid,
|
|
H5P_DEFAULT, propid, access_plistid)) < 0) ERR;
|
|
|
|
/* if ((file_spaceid = H5Dget_space(dataset_id)) < 0) ERR;*/
|
|
if ((mem_spaceid = H5Screate_simple(NDIMS, count, NULL)) < 0) ERR;
|
|
if ((xfer_plistid = H5Pcreate(H5P_DATASET_XFER)) < 0) ERR;
|
|
|
|
/* Write the dataset. */
|
|
for (start[0] = 0; start[0] < X_LEN; start[0]++)
|
|
for (start[1] = 0; start[1] < Y_LEN; start[1]++)
|
|
{
|
|
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET, start, NULL,
|
|
count, NULL) < 0) ERR_RET;
|
|
if (H5Dwrite(dataset_id, H5T_NATIVE_FLOAT, mem_spaceid, file_spaceid,
|
|
xfer_plistid, data) < 0) ERR_RET;
|
|
}
|
|
|
|
/* Close property lists. */
|
|
if (H5Pclose(propid) < 0) ERR;
|
|
if (H5Pclose(access_plistid) < 0) ERR;
|
|
if (H5Pclose(xfer_plistid) < 0) ERR;
|
|
|
|
/* Close spaces. */
|
|
if (H5Sclose(file_spaceid) < 0) ERR;
|
|
if (H5Sclose(mem_spaceid) < 0) ERR;
|
|
|
|
/* End access to the dataset and release resources used by it. */
|
|
if (H5Dclose(dataset_id) < 0) ERR;
|
|
|
|
/* close file */
|
|
if (H5Fclose(file_id) < 0) ERR;
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
get_mem_used2(int *mem_used)
|
|
{
|
|
char buf[30];
|
|
FILE *pf;
|
|
|
|
snprintf(buf, 30, "/proc/%u/statm", (unsigned)getpid());
|
|
pf = NCfopen(buf, "r");
|
|
if (pf) {
|
|
unsigned size; /* total program size */
|
|
unsigned resident;/* resident set size */
|
|
unsigned share;/* shared pages */
|
|
unsigned text;/* text (code) */
|
|
unsigned lib;/* library */
|
|
unsigned data;/* data/stack */
|
|
/*unsigned dt; dirty pages (unused in Linux 2.6)*/
|
|
fscanf(pf, "%u %u %u %u %u %u", &size, &resident, &share,
|
|
&text, &lib, &data);
|
|
*mem_used = data;
|
|
}
|
|
else
|
|
*mem_used = -1;
|
|
fclose(pf);
|
|
}
|
|
|
|
int main(void)
|
|
{
|
|
float data[X_LEN * Y_LEN * Z_LEN];
|
|
int i;
|
|
|
|
printf("\n*** Testing netcdf-4 file functions with caching.\n");
|
|
|
|
/* Initialize data. */
|
|
for (i = 0; i < (X_LEN * Y_LEN * Z_LEN); i++)
|
|
data[i] = i;
|
|
printf("*** testing a bunch of file writes with compressed data...\n");
|
|
{
|
|
int mem_used, mem_used1;
|
|
|
|
printf("*** testing netcdf-4 writes...\n");
|
|
for (i = 0; i < NUM_TRIES; i++)
|
|
{
|
|
get_mem_used2(&mem_used);
|
|
if (dump_file3(data, 1, 0)) ERR_RET;
|
|
get_mem_used2(&mem_used1);
|
|
if (mem_used1 - mem_used)
|
|
printf("delta %d bytes of memory for try %d\n", mem_used1 - mem_used, i);
|
|
}
|
|
printf("*** testing HDF5 writes...\n");
|
|
for (i = 0; i < NUM_TRIES; i++)
|
|
{
|
|
get_mem_used2(&mem_used);
|
|
if (dump_hdf_file(data, 1)) ERR_RET;
|
|
get_mem_used2(&mem_used1);
|
|
if (mem_used1 - mem_used)
|
|
printf("delta %d bytes of memory for try %d\n", mem_used1 - mem_used, i);
|
|
}
|
|
printf("*** testing netcdf-4 writes again...\n");
|
|
for (i = 0; i < NUM_TRIES; i++)
|
|
{
|
|
get_mem_used2(&mem_used);
|
|
if (dump_file2(data, 1, 0)) ERR_RET;
|
|
get_mem_used2(&mem_used1);
|
|
if (mem_used1 - mem_used)
|
|
printf("delta %d bytes of memory for try %d\n", mem_used1 - mem_used, i);
|
|
}
|
|
}
|
|
SUMMARIZE_ERR;
|
|
FINAL_RESULTS;
|
|
}
|