2
0
mirror of https://github.com/HDFGroup/hdf5.git synced 2025-04-12 17:31:09 +08:00

[svn-r18565] Description:

Minor whitespace and compiler warning cleanups

Tested on:
    Mac OS X/32 10.6.3 (amazon) w/debug
    (too minor to require h5committest)
This commit is contained in:
Quincey Koziol 2010-04-15 13:01:56 -05:00
parent 2bacd3d4d7
commit 13a6885db3
3 changed files with 46 additions and 50 deletions

@ -1163,7 +1163,7 @@ if(H5DEBUG(D))
* Note: even there is no selection for this process, the process still
* needs to contribute MPI NONE TYPE.
*/
if(chunk_io_option[u] == 1) {
if(chunk_io_option[u] == H5D_CHUNK_IO_MODE_COL) {
#ifdef H5D_DEBUG
if(H5DEBUG(D))
HDfprintf(H5DEBUG(D),"inside collective chunk IO mpi_rank = %d, chunk index = %Zu\n", mpi_rank, u);
@ -1206,7 +1206,7 @@ if(H5DEBUG(D))
HDassert(chunk_io_option[u] == 0);
#if !defined(H5_MPI_COMPLEX_DERIVED_DATATYPE_WORKS) || !defined(H5_MPI_SPECIAL_COLLECTIVE_IO_WORKS)
/* Check if this process has somethign to do with this chunk */
/* Check if this process has something to do with this chunk */
if(chunk_info) {
H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */
H5D_chunk_ud_t udata; /* B-tree pass-through */
@ -1849,7 +1849,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info, H5D_chunk_map_t *fm,
MPI_Comm comm;
int ic, root;
int mpi_code;
int mem_cleanup = 0;
hbool_t mem_cleanup = FALSE;
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
int new_value;
htri_t check_prop;
@ -1890,8 +1890,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info, H5D_chunk_map_t *fm,
tempbuf = mergebuf + total_chunks;
if(mpi_rank == root)
recv_io_mode_info = (uint8_t *)H5MM_malloc(total_chunks * mpi_size);
mem_cleanup = 1;
mem_cleanup = TRUE;
/* Obtain the regularity and selection information for all chunks in this process. */
chunk_node = H5SL_first(fm->sel_chunks);
@ -1911,7 +1910,7 @@ H5D_obtain_mpio_mode(H5D_io_info_t* io_info, H5D_chunk_map_t *fm,
chunk_node = H5SL_next(chunk_node);
} /* end while */
/*Gather all the information */
/* Gather all the information */
if(MPI_SUCCESS != (mpi_code = MPI_Gather(io_mode_info, total_chunks, MPI_BYTE, recv_io_mode_info, total_chunks, MPI_BYTE, root, comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Gather failed", mpi_code)

@ -637,7 +637,6 @@ done:
FUNC_LEAVE_API(ret_value)
}
/*-------------------------------------------------------------------------
* Function: H5Pset_dxpl_mpio_chunk_opt_num
@ -1407,15 +1406,15 @@ H5FD_mpio_read(H5FD_t *_file, H5FD_mem_t UNUSED type, hid_t dxpl_id, haddr_t add
{
H5FD_mpio_t *file = (H5FD_mpio_t*)_file;
MPI_Offset mpi_off;
MPI_Status mpi_stat;
MPI_Status mpi_stat; /* Status from I/O operation */
int mpi_code; /* mpi return code */
MPI_Datatype buf_type=MPI_BYTE; /* MPI description of the selection in memory */
MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */
int size_i; /* Integer copy of 'size' to read */
int bytes_read; /* Number of bytes read in */
int n;
int type_size; /* MPI datatype used for I/O's size */
int io_size; /* Actual number of bytes requested */
H5P_genplist_t *plist; /* Property list pointer */
H5P_genplist_t *plist = NULL; /* Property list pointer */
hbool_t use_view_this_time = FALSE;
herr_t ret_value = SUCCEED;
@ -1498,6 +1497,7 @@ H5FD_mpio_read(H5FD_t *_file, H5FD_mem_t UNUSED type, hid_t dxpl_id, haddr_t add
fprintf(stdout, "H5FD_mpio_read: using MPIO collective mode\n");
#endif
/* Peek the collective_opt property to check whether the application wants to do IO individually. */
HDassert(plist);
coll_opt_mode = (H5FD_mpio_collective_opt_t)H5P_peek_unsigned(plist, H5D_XFER_MPIO_COLLECTIVE_OPT_NAME);
if(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO) {
@ -1692,15 +1692,15 @@ H5FD_mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
{
H5FD_mpio_t *file = (H5FD_mpio_t*)_file;
MPI_Offset mpi_off;
MPI_Status mpi_stat;
MPI_Datatype buf_type=MPI_BYTE; /* MPI description of the selection in memory */
MPI_Status mpi_stat; /* Status from I/O operation */
MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */
int mpi_code; /* MPI return code */
int size_i, bytes_written;
int type_size; /* MPI datatype used for I/O's size */
int io_size; /* Actual number of bytes requested */
hbool_t use_view_this_time = FALSE;
H5P_genplist_t *plist; /* Property list pointer */
herr_t ret_value=SUCCEED;
H5P_genplist_t *plist = NULL; /* Property list pointer */
herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI(H5FD_mpio_write, FAIL)
@ -1730,13 +1730,13 @@ H5FD_mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
fprintf(stdout, "in H5FD_mpio_write mpi_off=%ld size_i=%d\n", (long)mpi_off, size_i);
#endif
/* Obtain the data transfer properties */
if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list")
if(type == H5FD_MEM_DRAW) {
H5FD_mpio_xfer_t xfer_mode; /* I/O tranfer mode */
/* Obtain the data transfer properties */
if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a file access property list")
/* Obtain the data transfer properties */
xfer_mode = (H5FD_mpio_xfer_t)H5P_peek_unsigned(plist, H5D_XFER_IO_XFER_MODE_NAME);
@ -1790,6 +1790,7 @@ H5FD_mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
fprintf(stdout, "H5FD_mpio_write: using MPIO collective mode\n");
#endif
/* Peek the collective_opt property to check whether the application wants to do IO individually. */
HDassert(plist);
coll_opt_mode = (H5FD_mpio_collective_opt_t)H5P_peek_unsigned(plist, H5D_XFER_MPIO_COLLECTIVE_OPT_NAME);
/*OKAY: CAST DISCARDS CONST QUALIFIER*/

@ -482,9 +482,6 @@ done:
FUNC_LEAVE_NOAPI(ret_value);
}
/*-------------------------------------------------------------------------
* Function: H5S_mpio_span_hyper_type
@ -511,7 +508,6 @@ H5S_mpio_span_hyper_type( const H5S_t *space,
hsize_t *extra_offset,
hbool_t *is_derived_type )
{
MPI_Datatype span_type;
H5S_hyper_span_t *ospan;
H5S_hyper_span_info_t *odown;
@ -588,9 +584,9 @@ H5S_obtain_datatype(const hsize_t size[],
MPI_Datatype bas_type;
MPI_Datatype temp_type;
MPI_Datatype tempinner_type;
MPI_Datatype *inner_type;
int *blocklen;
MPI_Aint *disp;
MPI_Datatype *inner_type = NULL;
int *blocklen = NULL;
MPI_Aint *disp = NULL;
MPI_Aint stride;
H5S_hyper_span_info_t *down;
H5S_hyper_span_t *tspan;
@ -612,7 +608,7 @@ H5S_obtain_datatype(const hsize_t size[],
down = span->down;
tspan = span;
/* obtain the number of span tree for this dimension */
/* Obtain the number of span tree nodes for this dimension */
outercount = 0;
while(tspan) {
tspan = tspan->next;
@ -623,32 +619,27 @@ H5S_obtain_datatype(const hsize_t size[],
/* MPI2 hasn't been widely acccepted, adding H5_HAVE_MPI2 for the future use */
#ifdef H5_HAVE_MPI2
MPI_Type_extent(MPI_Aint, &sizeaint);
MPI_Type_extent(MPI_Datatype, &sizedtype);
blocklen = (int *)HDcalloc((size_t)outercount, sizeof(int));
disp = (MPI_Aint *)HDcalloc((size_t)outercount, sizeaint);
inner_type = (MPI_Datatype *)HDcalloc((size_t)outercount, sizedtype);
#else
blocklen = (int *)HDcalloc((size_t)outercount, sizeof(int));
disp = (MPI_Aint *)HDcalloc((size_t)outercount, sizeof(MPI_Aint));
inner_type = (MPI_Datatype *)HDcalloc((size_t)outercount, sizeof(MPI_Datatype));
#endif
tspan = span;
outercount = 0;
/* if this is the fastest changing dimension, it is the base case for derived datatype. */
if(down == NULL){
if(down == NULL) {
HDassert(dimindex <= 1);
if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE,&bas_type)))
if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &bas_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code);
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&bas_type)))
@ -664,42 +655,42 @@ H5S_obtain_datatype(const hsize_t size[],
if(MPI_SUCCESS != (mpi_code = MPI_Type_hindexed(outercount, blocklen, disp, bas_type, span_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_hindexed failed", mpi_code);
} /* end if */
else {/* dimindex is the rank of the dimension */
else { /* dimindex is the rank of the dimension */
HDassert(dimindex > 1);
/* Calculate the total bytes of the lower dimension */
/* Calculate the total bytes of the lower dimensions */
total_lowd = 1; /* one dimension down */
total_lowd1 = 1; /* two dimensions down */
total_lowd1 = 1; /* two dimensions down */
for ( i = dimindex-1; i > 0; i--)
total_lowd = total_lowd * size[i];
for(i = dimindex - 1; i > 0; i--)
total_lowd = total_lowd * size[i];
for ( i = dimindex-1; i > 1; i--)
total_lowd1 = total_lowd1 * size[i];
for(i = dimindex - 1; i > 1; i--)
total_lowd1 = total_lowd1 * size[i];
while(tspan) {
while(tspan) {
/* Displacement should be in byte and should have dimension information */
/* First using MPI Type vector to build derived data type for this span only */
/* Need to calculate the disp in byte for this dimension. */
/* Calculate the total bytes of the lower dimension */
disp[outercount] = tspan->low*total_lowd*elmt_size;
disp[outercount] = tspan->low * total_lowd * elmt_size;
blocklen[outercount] = 1;
/* generating inner derived datatype by using MPI_Type_hvector */
if(FAIL == H5S_obtain_datatype(size,tspan->down->head,&temp_type,elmt_size,dimindex-1))
if(FAIL == H5S_obtain_datatype(size, tspan->down->head, &temp_type, elmt_size, dimindex - 1))
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't obtain MPI derived data type")
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&temp_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code);
/* building the inner vector datatype */
stride = total_lowd*elmt_size;
stride = total_lowd * elmt_size;
innercount = tspan->nelem;
if(MPI_SUCCESS != (mpi_code = MPI_Type_hvector(innercount,1,stride,temp_type,&tempinner_type)))
if(MPI_SUCCESS != (mpi_code = MPI_Type_hvector(innercount, 1, stride, temp_type, &tempinner_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_hvector failed", mpi_code);
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&tempinner_type)))
@ -719,18 +710,23 @@ H5S_obtain_datatype(const hsize_t size[],
} /* end else */
if(inner_type != NULL && down != NULL) {
for(i = 0; i < outercount; i++)
if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[i])))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_free failed", mpi_code);
} /* end if */
if(inner_type != NULL)
done:
if(inner_type != NULL) {
if(down != NULL) {
for(i = 0; i < outercount; i++)
if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&inner_type[i])))
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code);
} /* end if */
HDfree(inner_type);
} /* end if */
if(blocklen != NULL)
HDfree(blocklen);
if(disp != NULL)
HDfree(disp);
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_obtain_datatype() */