[svn-r8924] Purpose:

To test collective chunk IO properly.

Description:
See the previous message.

Solution:
See the previous message.

Platforms tested:
arabica(Sol 2.7), eirene(Linux), copper(AIX)
Misc. update:
This commit is contained in:
MuQun Yang 2004-07-21 18:42:10 -05:00
parent 1232d53a32
commit c7ca89eeda
3 changed files with 59 additions and 21 deletions

View File

@ -67,10 +67,18 @@ coll_chunk3(){
}
void
coll_chunk4(){
char *filename;
filename = (char *) GetTestParameters();
coll_chunktest(filename,4,BYROW_DISCONT);
}
void
coll_chunktest(char* filename,int chunk_factor,int select_factor) {
hid_t file,dataset, file_dataspace;
hid_t acc_plist,xfer_plist,crp_plist;
hsize_t dims[RANK], chunk_dims[RANK];
@ -80,9 +88,9 @@ coll_chunktest(char* filename,int chunk_factor,int select_factor) {
herr_t status;
hssize_t start[RANK];
hsize_t count[RANK],stride[RANK],block[RANK];
int prop_value;
/* char * filename;*/
int mpi_size,mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@ -166,22 +174,37 @@ coll_chunktest(char* filename,int chunk_factor,int select_factor) {
status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((status>= 0),"MPIO collective transfer property succeeded");
prop_value = 1;
status = H5Pinsert(xfer_plist,PROP_NAME,sizeof(int),&prop_value,
NULL,NULL,NULL,NULL,NULL);
VRFY((status >= 0),"testing property list inserted succeeded");
/* write data collectively */
status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, file_dataspace,
xfer_plist, data_array1);
VRFY((status >= 0),"dataset write succeeded");
status = H5Pget(xfer_plist,PROP_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
if(chunk_factor == 4 && select_factor == BYROW_DISCONT) { /* suppose to use independent */
if(prop_value == 1)
printf("H5Dwrite shouldn't use MPI Collective IO call, something is wrong \n");
}
else {
if(prop_value == 0)
printf("H5Dwrite doesn't use MPI Collective IO call, something is wrong \n");
}
status = H5Dclose(dataset);
VRFY((status >= 0),"");
/* check whether using collective IO */
/* Should use H5Pget and H5Pinsert to handle this test. */
/* status = H5Pclose(xfer_plist);
VRFY((status >= 0),"");
status = H5Premove(xfer_plist,PROP_NAME);
VRFY((status >= 0),"property list removed");
status = H5Pclose(xfer_plist);
VRFY((status >= 0),"property list closed");
*/
status = H5Sclose(file_dataspace);
VRFY((status >= 0),"");
@ -228,32 +251,42 @@ coll_chunktest(char* filename,int chunk_factor,int select_factor) {
/* fill dataset with test data */
ccdataset_fill(start, stride,count,block, data_origin1);
/* read data collectively */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
VRFY((xfer_plist >= 0),"");
status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((status>= 0),"MPIO collective transfer property succeeded");
prop_value = 1;
status = H5Pinsert(xfer_plist,PROP_NAME,sizeof(int),&prop_value,
NULL,NULL,NULL,NULL,NULL);
VRFY((status >= 0),"testing property list inserted succeeded");
status = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, file_dataspace,
xfer_plist, data_array1);
VRFY((status >= 0), "");
/* printf("mpi rank %d\n",mpi_rank);
if(mpi_rank == 2) {
for (i = 0; i < SPACE_DIM1; i++) {
for (j = 0; j < SPACE_DIM2;j++) {
printf("i, j, data, %d, %d, %d \n",i,j,*(data_array1));
data_array1++;
}
xfer_plist, data_array1);
VRFY((status >=0),"dataset read succeeded");
status = H5Pget(xfer_plist,PROP_NAME,&prop_value);
VRFY((status >= 0),"testing property list get succeeded");
if(chunk_factor == 4 && select_factor == BYROW_DISCONT) { /* suppose to use independent */
if(prop_value == 1)
printf("H5Dread shouldn't use MPI Collective IO call, something is wrong \n");
}
}*/
else {
if(prop_value == 0)
printf("H5Dread doesn't use MPI Collective IO call, something is wrong \n");
}
/* verify the read data with original expected data */
status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1);
if (status) nerrors++;
status = H5Premove(xfer_plist,PROP_NAME);
VRFY((status >= 0),"property list removed");
status = H5Pclose(xfer_plist);
VRFY((status >= 0),"property list closed");
/* close dataset collectively */
status=H5Dclose(dataset);
VRFY((status >= 0), "");
status=H5Pclose(xfer_plist);
VRFY((status >= 0),"");
/* release all IDs created */
H5Sclose(file_dataspace);

View File

@ -45,7 +45,7 @@ int doindependent=1; /* independent test */
unsigned dobig=0; /* "big" dataset tests */
/* FILENAME and filenames must have the same number of names */
const char *FILENAME[14]={
const char *FILENAME[15]={
"ParaEg1",
"ParaEg2",
"ParaEg3",
@ -59,8 +59,9 @@ const char *FILENAME[14]={
"ParaCC1",
"ParaCC2",
"ParaCC3",
"ParaCC4",
NULL};
char filenames[14][PATH_MAX];
char filenames[15][PATH_MAX];
hid_t fapl; /* file access property list */
#ifdef USE_PAUSE
@ -475,6 +476,8 @@ int main(int argc, char **argv)
"noncontiguous collective chunk io",filenames[11]);
AddTest("coll. chunked 3", coll_chunk3,NULL,
"muliti-chunk collective chunk io",filenames[12]);
AddTest("coll. chunked 4", coll_chunk4,NULL,
"collective to independent chunk io",filenames[13]);
/* Display testing information */
TestInfo(argv[0]);

View File

@ -129,6 +129,7 @@
#define BYROW_CONT 1
#define BYROW_DISCONT 2
#define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name"
#define PROP_NAME "__test__ccfoo___"
/* type definitions */
typedef struct H5Ptest_param_t /* holds extra test parameters */
{
@ -173,6 +174,7 @@ void dataset_fillvalue(void);
void coll_chunk1();
void coll_chunk2();
void coll_chunk3();
void coll_chunk4();
/* some commonly used routines for collective chunk IO tests*/
void ccslab_set(int mpi_rank,int mpi_size,hssize_t start[],hsize_t count[],
hsize_t stride[],hsize_t block[],int mode);