[svn-r25921] - wrap MPI 3 usage with MPI_VERSION compile time check.

- build out unbuffered stdout/err for windows
This commit is contained in:
Mohamad Chaarawi 2014-12-23 09:54:47 -05:00
parent 443bc1200f
commit cbd0460b1b
4 changed files with 47 additions and 10 deletions

View File

@ -1454,10 +1454,16 @@ H5FD_mpio_read(H5FD_t *_file, H5FD_mem_t UNUSED type, hid_t dxpl_id, haddr_t add
int mpi_code; /* mpi return code */
MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */
int size_i; /* Integer copy of 'size' to read */
#if MPI_VERSION == 3
MPI_Count bytes_read; /* Number of bytes read in */
int n;
MPI_Count type_size; /* MPI datatype used for I/O's size */
MPI_Count io_size; /* Actual number of bytes requested */
#else
int bytes_read; /* Number of bytes read in */
int type_size; /* MPI datatype used for I/O's size */
int io_size; /* Actual number of bytes requested */
#endif
int n;
H5P_genplist_t *plist = NULL; /* Property list pointer */
hbool_t use_view_this_time = FALSE;
herr_t ret_value = SUCCEED;
@ -1574,11 +1580,19 @@ H5FD_mpio_read(H5FD_t *_file, H5FD_mem_t UNUSED type, hid_t dxpl_id, haddr_t add
}
/* How many bytes were actually read? */
if (MPI_SUCCESS != (mpi_code=MPI_Get_elements_x(&mpi_stat, buf_type, &bytes_read)))
#if MPI_VERSION == 3
if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, buf_type, &bytes_read)))
#else
if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_read)))
#endif
HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code)
/* Get the type's size */
if (MPI_SUCCESS != (mpi_code=MPI_Type_size_x(buf_type,&type_size)))
#if MPI_VERSION == 3
if (MPI_SUCCESS != (mpi_code = MPI_Type_size_x(buf_type, &type_size)))
#else
if (MPI_SUCCESS != (mpi_code = MPI_Type_size(buf_type, &type_size)))
#endif
HMPI_GOTO_ERROR(FAIL, "MPI_Type_size failed", mpi_code)
/* Compute the actual number of bytes requested */
@ -1734,10 +1748,16 @@ H5FD_mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
MPI_Status mpi_stat; /* Status from I/O operation */
MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */
int mpi_code; /* MPI return code */
#if MPI_VERSION == 3
MPI_Count bytes_written;
int size_i;
MPI_Count type_size; /* MPI datatype used for I/O's size */
MPI_Count io_size; /* Actual number of bytes requested */
#else
int bytes_written;
int type_size; /* MPI datatype used for I/O's size */
int io_size; /* Actual number of bytes requested */
#endif
int size_i;
hbool_t use_view_this_time = FALSE;
H5P_genplist_t *plist = NULL; /* Property list pointer */
herr_t ret_value = SUCCEED;
@ -1864,11 +1884,19 @@ H5FD_mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
}
/* How many bytes were actually written? */
#if MPI_VERSION == 3
if(MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, buf_type, &bytes_written)))
#else
if(MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_written)))
#endif
HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code)
/* Get the type's size */
#if MPI_VERSION == 3
if(MPI_SUCCESS != (mpi_code = MPI_Type_size_x(buf_type, &type_size)))
#else
if(MPI_SUCCESS != (mpi_code = MPI_Type_size(buf_type, &type_size)))
#endif
HMPI_GOTO_ERROR(FAIL, "MPI_Type_size failed", mpi_code)
/* Compute the actual number of bytes requested */

View File

@ -168,18 +168,23 @@ H5S_mpio_create_point_datatype (size_t elmt_size, hsize_t num_points,
if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
elmt_type_created = TRUE;
#if MPI_VERSION == 3
/* Create an MPI datatype for the whole point selection */
if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block((int)num_points, 1, disp, elmt_type, new_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_indexed_block failed", mpi_code)
#else
/* Allocate block sizes for MPI datatype call */
if(NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * num_points)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks")
/* Would be nice to have Create_Hindexed_block to avoid this array of all ones */
for(u = 0; u < num_points; u++)
blocks[u] = 1;
/* Create an MPI datatype for the whole point selection */
if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)num_points, blocks, disp, elmt_type, new_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_indexed_block failed", mpi_code)
#endif
/* Commit MPI datatype for later use */
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))

View File

@ -5037,9 +5037,11 @@ int main(int argc, char **argv)
{
int mpi_size, mpi_rank; /* mpi variables */
#ifndef H5_HAVE_WIN32_API
/* Un-buffer the stdout and stderr */
setbuf(stderr, NULL);
setbuf(stdout, NULL);
HDsetbuf(stderr, NULL);
HDsetbuf(stdout, NULL);
#endif
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);

View File

@ -307,9 +307,11 @@ int main(int argc, char **argv)
H5Ptest_param_t io_mode_confusion_params;
H5Ptest_param_t rr_obj_flush_confusion_params;
#ifndef H5_HAVE_WIN32_API
/* Un-buffer the stdout and stderr */
setbuf(stderr, NULL);
setbuf(stdout, NULL);
HDsetbuf(stderr, NULL);
HDsetbuf(stdout, NULL);
#endif
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);