mirror of
https://github.com/HDFGroup/hdf5.git
synced 2024-12-03 02:32:04 +08:00
061ed07676
New features Description: Some testers found the filename lengths too short. Changed it to use the FILENAME_MAX usually defined in stdio.h. If not, set it to 512 which should be sufficient for users but should not exceed any system limits. Also added a new test parameters of ndatasets so that the tester can specific a different number of datasets for the multiple datasets tests. Changed the datatype of datasets created to DOUBLE. This eliminates the current racing conditions. But the racing bugs during conversion still need to be tracked down and squashed. Platforms tested: Modi4 -64.
85 lines
2.3 KiB
C
85 lines
2.3 KiB
C
/* $Id$ */
|
|
|
|
/*
|
|
* Parallel tests for file operations
|
|
*/
|
|
|
|
#include <testphdf5.h>
|
|
|
|
/*
|
|
* test file access by communicator besides COMM_WORLD.
|
|
* Split COMM_WORLD into two, one (even_comm) contains the original
|
|
* processes of even ranks. The other (odd_comm) contains the original
|
|
* processes of odd ranks. Processes in even_comm creates a file, then
|
|
* cloose it, using even_comm. Processes in old_comm just do a barrier
|
|
* using odd_comm. Then they all do a barrier using COMM_WORLD.
|
|
* If the file creation and cloose does not do correct collective action
|
|
* according to the communicator argument, the processes will freeze up
|
|
* sooner or later due to barrier mixed up.
|
|
*/
|
|
void
|
|
test_split_comm_access(char *filename)
|
|
{
|
|
int mpi_size, mpi_rank;
|
|
MPI_Comm comm;
|
|
MPI_Info info = MPI_INFO_NULL;
|
|
int color, mrc;
|
|
int newrank, newprocs;
|
|
hid_t fid; /* file IDs */
|
|
hid_t acc_tpl; /* File access properties */
|
|
herr_t ret; /* generic return value */
|
|
|
|
if (verbose)
|
|
printf("Split Communicator access test on file %s\n",
|
|
filename);
|
|
|
|
/* set up MPI parameters */
|
|
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
|
|
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
|
|
color = mpi_rank%2;
|
|
mrc = MPI_Comm_split (MPI_COMM_WORLD, color, mpi_rank, &comm);
|
|
VRFY((mrc==MPI_SUCCESS), "");
|
|
MPI_Comm_size(comm,&newprocs);
|
|
MPI_Comm_rank(comm,&newrank);
|
|
|
|
if (color){
|
|
/* odd-rank processes */
|
|
mrc = MPI_Barrier(comm);
|
|
VRFY((mrc==MPI_SUCCESS), "");
|
|
}else{
|
|
/* even-rank processes */
|
|
int sub_mpi_rank; /* rank in the sub-comm */
|
|
MPI_Comm_rank(comm,&sub_mpi_rank);
|
|
|
|
/* setup file access template */
|
|
acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
|
|
VRFY((acc_tpl >= 0), "");
|
|
|
|
/* set Parallel access with communicator */
|
|
ret = H5Pset_fapl_mpio(acc_tpl, comm, info);
|
|
VRFY((ret >= 0), "");
|
|
|
|
/* create the file collectively */
|
|
fid=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl);
|
|
VRFY((fid >= 0), "H5Fcreate succeeded");
|
|
|
|
/* Release file-access template */
|
|
ret=H5Pclose(acc_tpl);
|
|
VRFY((ret >= 0), "");
|
|
|
|
/* close the file */
|
|
ret=H5Fclose(fid);
|
|
VRFY((ret >= 0), "");
|
|
|
|
/* detele the test file */
|
|
if (sub_mpi_rank == 0){
|
|
mrc = MPI_File_delete(filename, info);
|
|
VRFY((mrc==MPI_SUCCESS), "");
|
|
}
|
|
}
|
|
mrc = MPI_Barrier(MPI_COMM_WORLD);
|
|
VRFY((mrc==MPI_SUCCESS), "final MPI_Barrier succeeded");
|
|
}
|
|
|
|
|