mirror of
https://github.com/HDFGroup/hdf5.git
synced 2024-11-21 01:04:10 +08:00
[svn-r9071] Purpose:
Make collective chunk IO test more general Description: Previously collective chunk IO test is only fit for processor =4 with the dimension size to be set small; sometimes people would like to test with more than 4 processors(5,6 or more), the test therefore failed. Solution: To make the test case more general, dimensional size of the data is set to be large(right now 288 for each dimension), the disjoint hyperslab selection is re-calculated. Now the test cases should pass with 5,6 or 12 processors. Note, there is nothing wrong with the implementation of the library, it is the test case that causes the failure with the number of processor greater than 4. Platforms tested: Only at eirene, since only the test code is modified a little and it is very slow to test the parallel case. Misc. update:
This commit is contained in:
parent
c221205410
commit
2a866e549f
@ -78,8 +78,11 @@ coll_chunk3(void)
|
||||
{
|
||||
|
||||
char *filename;
|
||||
int mpi_size;
|
||||
MPI_Comm comm = MPI_COMM_WORLD;
|
||||
MPI_Comm_size(comm,&mpi_size);
|
||||
filename = (char *) GetTestParameters();
|
||||
coll_chunktest(filename,4,BYROW_CONT);
|
||||
coll_chunktest(filename,mpi_size,BYROW_CONT);
|
||||
|
||||
}
|
||||
|
||||
@ -88,8 +91,12 @@ coll_chunk4(void)
|
||||
{
|
||||
|
||||
char *filename;
|
||||
int mpi_size;
|
||||
|
||||
MPI_Comm comm = MPI_COMM_WORLD;
|
||||
MPI_Comm_size(comm,&mpi_size);
|
||||
filename = (char *) GetTestParameters();
|
||||
coll_chunktest(filename,4,BYROW_DISCONT);
|
||||
coll_chunktest(filename,mpi_size*2,BYROW_DISCONT);
|
||||
|
||||
}
|
||||
|
||||
@ -207,7 +214,7 @@ coll_chunktest(char* filename,int chunk_factor,int select_factor) {
|
||||
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
|
||||
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_NAME,&prop_value);
|
||||
VRFY((status >= 0),"testing property list get succeeded");
|
||||
if(chunk_factor == 4 && select_factor == BYROW_DISCONT) { /* suppose to use independent */
|
||||
if(chunk_factor == mpi_size*2 && select_factor == BYROW_DISCONT) { /* suppose to use independent */
|
||||
VRFY((prop_value == 0), "H5Dwrite shouldn't use MPI Collective IO call");
|
||||
}
|
||||
else {
|
||||
@ -290,7 +297,7 @@ coll_chunktest(char* filename,int chunk_factor,int select_factor) {
|
||||
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
|
||||
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_NAME,&prop_value);
|
||||
VRFY((status >= 0),"testing property list get succeeded");
|
||||
if(chunk_factor == 4 && select_factor == BYROW_DISCONT) { /* suppose to use independent */
|
||||
if(chunk_factor == mpi_size*2 && select_factor == BYROW_DISCONT) { /* suppose to use independent */
|
||||
VRFY((prop_value == 0), "H5Dread shouldn't use MPI Collective IO call");
|
||||
}
|
||||
else {
|
||||
@ -343,12 +350,18 @@ ccslab_set(int mpi_rank, int mpi_size, hssize_t start[], hsize_t count[],
|
||||
break;
|
||||
case BYROW_DISCONT:
|
||||
/* Each process takes several disjoint blocks. */
|
||||
block[0] = 2;
|
||||
block[1] = 2;
|
||||
block[0] = 1;
|
||||
block[1] = 1;
|
||||
/*
|
||||
stride[0] = 3;
|
||||
stride[1] = 6;
|
||||
count[0] = 2;
|
||||
count[1] = 3;
|
||||
*/
|
||||
stride[0] = 3;
|
||||
stride[1] = 3;
|
||||
count[0] = (SPACE_DIM1/mpi_size)/(stride[0]*block[0]);
|
||||
count[1] =(SPACE_DIM2)/(stride[1]*block[1]);
|
||||
start[0] = SPACE_DIM1/mpi_size*mpi_rank;
|
||||
start[1] = 0;
|
||||
if (VERBOSE_MED) printf("slab_set BYROW_DISCONT\n");
|
||||
|
@ -122,8 +122,8 @@
|
||||
#define FACC_MPIPOSIX 0x8 /* MPIPOSIX */
|
||||
|
||||
/*Constants for collective chunk definitions */
|
||||
#define SPACE_DIM1 24
|
||||
#define SPACE_DIM2 24
|
||||
#define SPACE_DIM1 288
|
||||
#define SPACE_DIM2 288
|
||||
#define BYROW_CONT 1
|
||||
#define BYROW_DISCONT 2
|
||||
#define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name"
|
||||
|
Loading…
Reference in New Issue
Block a user