mirror of
https://github.com/HDFGroup/hdf5.git
synced 2024-11-27 02:10:55 +08:00
61ab6a6b46
Added features Description: There were no automatic tests for transfering zero elements. Solution: t_dset.c: Added two new patterns of ZROW (zero rows for process 0) and ZCOL(zero columns for process 0). ZROW test was added but it failed because the current library does not accept it. Not compiled in now. Need to fix the library before turning it back on again and also to add the ZCOL test. t_mdset.c: Added statement to show progress. Also the MPI_Barrier() call get processes synchornoized. It eliminates the racing condition but this is not a permenant solution. The library code needs to be fixed. testphdf5.c: Added a bunch of MPI_Type_XXX debug code. Added the -md option to skip the multiple datasets tests. Changed the cosmitic appearance of the banner messages. testphdf5.h: When an error is detected, the old way was to call MPI_Finalize() before exiting. This sometimes hangs because some processes may be waiting for a message of a different tag. Changed to call MPI_Abort() for now so that the whole MPI job would abort rather than hanging due resource limits exceeded. Added the definition of ZROW and ZCOL. Platforms tested: Modi4 -64.
62 lines
1.6 KiB
C
62 lines
1.6 KiB
C
#include <testphdf5.h>
|
|
|
|
#define DIM 2
|
|
#define SIZE 32
|
|
#define NUMITEMS 500 /* 988 */
|
|
|
|
void multiple_dset_write(char *filename)
|
|
{
|
|
int i, j, nprocs, rank;
|
|
hid_t iof, plist, dataset, memspace, filespace;
|
|
hssize_t chunk_origin [DIM];
|
|
hsize_t chunk_dims [DIM], file_dims [DIM];
|
|
double outme [SIZE][SIZE];
|
|
|
|
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
|
|
MPI_Comm_size (MPI_COMM_WORLD, &nprocs);
|
|
|
|
VRFY((nprocs <= SIZE), "nprocs <= SIZE");
|
|
|
|
chunk_origin [0] = 0;
|
|
chunk_origin [1] = rank * (SIZE / nprocs);
|
|
chunk_dims [0] = SIZE;
|
|
chunk_dims [1] = SIZE / nprocs;
|
|
|
|
for (i = 0; i < DIM; i++)
|
|
file_dims [i] = SIZE;
|
|
|
|
for (i = 0; i < SIZE; i++)
|
|
for (j = 0; j < SIZE; j++)
|
|
outme [i][j] = rank;
|
|
|
|
plist = H5Pcreate (H5P_FILE_ACCESS);
|
|
H5Pset_fapl_mpio(plist, MPI_COMM_WORLD, MPI_INFO_NULL);
|
|
iof = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
|
|
H5Pclose (plist);
|
|
|
|
memspace = H5Screate_simple (DIM, chunk_dims, NULL);
|
|
filespace = H5Screate_simple (DIM, file_dims, NULL);
|
|
H5Sselect_hyperslab (filespace, H5S_SELECT_SET, chunk_origin, NULL, chunk_dims
|
|
, NULL);
|
|
|
|
for (i = 0; i < NUMITEMS; i++) {
|
|
char dname [100];
|
|
|
|
sprintf (dname, "dataset %d", i);
|
|
dataset = H5Dcreate (iof, dname, H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT);
|
|
VRFY((dataset > 0), "dataset create succeeded");
|
|
|
|
H5Dwrite (dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme);
|
|
|
|
H5Dclose (dataset);
|
|
if (! ((i+1) % 10)) {
|
|
printf("created %d datasets\n", i+1);
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
|
}
|
|
}
|
|
|
|
H5Sclose (filespace);
|
|
H5Sclose (memspace);
|
|
H5Fclose (iof);
|
|
}
|