[svn-r6514] Purpose:

Bug fix and Update

Description:
    From Quincey's comments on the code I checked in last night:

        - In H5F_close call, the "private" processes should call the
          H5F_flush with the "CLEAR_ONLY" flag.
        - There's no need for a special case for FPHDF5 in the
          FD_real_alloc function since FPHDF5 doesn't define an alloc
          function.
        - The return type of H5Pset_fapl_fphdf5 should be herr_t instead
          of hid_t. I don't know how it got that way in the first place.
        - The variable names for MPI types and the structure typedefs
          should be switched: H5FP_request/H5FP_request_t to
          H5FP_request_t/H5FP_request and so on.
        - In the H5FP.c module, I was commiting the H5FP_request MPI
          datatype but using the wrong offset field...

Platforms tested:
    Linux...will test on others, but these are mostly FPHDF5 changes.

Misc. update:
This commit is contained in:
Bill Wendling 2003-03-20 12:39:06 -05:00
parent 9f7ef95fcd
commit a90774e8cc
7 changed files with 146 additions and 147 deletions

View File

@ -2979,6 +2979,10 @@ H5F_close(H5F_t *f)
/* Only flush at this point if the file will be closed */
if (closing) {
/* Dump debugging info */
H5AC_debug(f);
H5F_istore_stats(f, FALSE);
#ifdef H5_HAVE_FPHDF5
/*
* We only want the captain to perform the flush of the metadata
@ -2988,19 +2992,24 @@ H5F_close(H5F_t *f)
H5FD_fphdf5_is_captain(f->shared->lf)) {
#endif /* H5_HAVE_FPHDF5 */
/* Dump debugging info */
H5AC_debug(f);
H5F_istore_stats(f, FALSE);
/* Flush and destroy all caches */
if (H5F_flush(f, H5AC_dxpl_id, H5F_SCOPE_LOCAL,
H5F_FLUSH_INVALIDATE | H5F_FLUSH_CLOSING) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache");
#ifdef H5_HAVE_FPHDF5
} else {
/*
* If this isn't the captain process, flush but only clear
* the flags.
*/
if (H5F_flush(f, H5AC_dxpl_id, H5F_SCOPE_LOCAL,
H5F_FLUSH_INVALIDATE | H5F_FLUSH_CLOSING | H5F_FLUSH_CLEAR_ONLY) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache");
}
/* Wait for the captain to finish up... */
/* Let's all meet up now... */
if (H5FD_is_fphdf5_driver(f->shared->lf))
MPI_Barrier(H5FP_SAP_BARRIER_COMM);
#endif /* H5_HAVE_FPHDF5 */

View File

@ -1890,16 +1890,6 @@ H5FD_real_alloc(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, hsize_t size)
assert(type >= 0 && type < H5FD_MEM_NTYPES);
assert(size > 0);
#ifdef H5_HAVE_FPHDF5
/*
* When we're using the FPHDF5 driver, and this section of code is
* only reached via the SAP. So just update the EOA and be done with
* it.
*/
if ((ret_value = H5FD_update_eoa(file, type, dxpl_id, size)) == HADDR_UNDEF)
HGOTO_ERROR(H5E_VFL, H5E_NOSPACE, HADDR_UNDEF,
"driver eoa update request failed");
#else
/*
* Dispatch to driver `alloc' callback or extend the end-of-address
* marker
@ -1913,7 +1903,6 @@ H5FD_real_alloc(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, hsize_t size)
HGOTO_ERROR(H5E_VFL, H5E_NOSPACE, HADDR_UNDEF,
"driver eoa update request failed");
}
#endif /* H5_HAVE_FPHDF5 */
done:
FUNC_LEAVE_NOAPI(ret_value);

View File

@ -197,7 +197,7 @@ done:
* Modifications:
*-------------------------------------------------------------------------
*/
hid_t
herr_t
H5Pset_fapl_fphdf5(hid_t fapl_id, MPI_Comm comm, MPI_Comm barrier_comm,
MPI_Info info, unsigned sap_rank)
{
@ -207,7 +207,7 @@ H5Pset_fapl_fphdf5(hid_t fapl_id, MPI_Comm comm, MPI_Comm barrier_comm,
herr_t ret_value;
FUNC_ENTER_API(H5Pset_fapl_fphdf5, FAIL);
H5TRACE5("i","iMcMcMiIu",fapl_id,comm,barrier_comm,info,sap_rank);
H5TRACE5("e","iMcMcMiIu",fapl_id,comm,barrier_comm,info,sap_rank);
if (fapl_id == H5P_DEFAULT)
HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL,
@ -1402,7 +1402,7 @@ H5FD_fphdf5_write(H5FD_t *_file, H5FD_mem_t mem_type, hid_t dxpl_id,
FUNC_ENTER_NOAPI(H5FD_fphdf5_write, FAIL);
HDfprintf(stderr, "%s: Entering: rank==%d, addr==%Hd, size==%d\n",
HDfprintf(stderr, "%s: Entering: rank==%d, addr==%a, size==%Zu\n",
FUNC, file->mpi_rank, addr, size);
/* check args */
@ -1536,7 +1536,7 @@ H5FD_fphdf5_write_real(H5FD_t *_file, hid_t dxpl_id, MPI_Offset mpi_off, int siz
FUNC_ENTER_NOAPI(H5FD_fphdf5_write_real, FAIL);
HDfprintf(stderr, "%s: %d: Entering: addr=%Hd, size=%d\n", FUNC,
HDfprintf(stderr, "%s: %d: Entering: addr=%a, size=%Zu\n", FUNC,
H5FD_fphdf5_mpi_rank(_file), (haddr_t)mpi_off, size);
/* check args */
@ -1607,7 +1607,7 @@ HDfprintf(stderr, "%s: %d: Entering: addr=%Hd, size=%d\n", FUNC,
int i;
sleep(3);
HDfprintf(stderr, "%s: writing at %Hd\n", FUNC, (haddr_t)mpi_off);
HDfprintf(stderr, "%s: writing at %a\n", FUNC, (haddr_t)mpi_off);
for (i = 0; i < size; ++i) {
if (i % 7 == 0)

View File

@ -29,10 +29,10 @@
#define INTERFACE_INIT NULL
static int interface_initialize_g = 0;
MPI_Datatype H5FP_request_t; /* MPI datatype for the H5FP_request obj*/
MPI_Datatype H5FP_reply_t; /* MPI datatype for the H5FP_reply obj */
MPI_Datatype H5FP_read_t; /* MPI datatype for the H5FP_read obj */
MPI_Datatype H5FP_alloc_t; /* MPI datatype for the H5FP_alloc obj */
MPI_Datatype H5FP_request; /* MPI datatype for the H5FP_request_t type */
MPI_Datatype H5FP_reply; /* MPI datatype for the H5FP_reply_t type */
MPI_Datatype H5FP_read; /* MPI datatype for the H5FP_read_t type */
MPI_Datatype H5FP_alloc; /* MPI datatype for the H5FP_alloc_t type */
/* SAP specific variables */
MPI_Comm H5FP_SAP_COMM; /* Comm we use: Supplied by user */
@ -72,10 +72,10 @@ H5FPinit(MPI_Comm comm, int sap_rank, MPI_Comm *sap_comm, MPI_Comm *sap_barrier_
H5TRACE4("e","McIs*Mc*Mc",comm,sap_rank,sap_comm,sap_barrier_comm);
/* initialize to NULL so that we can release if an error occurs */
H5FP_request_t = MPI_DATATYPE_NULL;
H5FP_reply_t = MPI_DATATYPE_NULL;
H5FP_read_t = MPI_DATATYPE_NULL;
H5FP_alloc_t = MPI_DATATYPE_NULL;
H5FP_request = MPI_DATATYPE_NULL;
H5FP_reply = MPI_DATATYPE_NULL;
H5FP_read = MPI_DATATYPE_NULL;
H5FP_alloc = MPI_DATATYPE_NULL;
*sap_comm = H5FP_SAP_COMM = MPI_COMM_NULL;
*sap_barrier_comm = H5FP_SAP_BARRIER_COMM = MPI_COMM_NULL;
@ -144,20 +144,20 @@ H5FPinit(MPI_Comm comm, int sap_rank, MPI_Comm *sap_comm, MPI_Comm *sap_barrier_
done:
if (ret_value == FAIL) {
/* we've encountered an error...clean up */
if (H5FP_request_t != MPI_DATATYPE_NULL)
if (MPI_Type_free(&H5FP_request_t) != MPI_SUCCESS)
if (H5FP_request != MPI_DATATYPE_NULL)
if (MPI_Type_free(&H5FP_request) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_free failed");
if (H5FP_reply_t != MPI_DATATYPE_NULL)
if (MPI_Type_free(&H5FP_reply_t) != MPI_SUCCESS)
if (H5FP_reply != MPI_DATATYPE_NULL)
if (MPI_Type_free(&H5FP_reply) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_free failed");
if (H5FP_read_t != MPI_DATATYPE_NULL)
if (MPI_Type_free(&H5FP_read_t) != MPI_SUCCESS)
if (H5FP_read != MPI_DATATYPE_NULL)
if (MPI_Type_free(&H5FP_read) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_free failed");
if (H5FP_alloc_t != MPI_DATATYPE_NULL)
if (MPI_Type_free(&H5FP_alloc_t) != MPI_SUCCESS)
if (H5FP_alloc != MPI_DATATYPE_NULL)
if (MPI_Type_free(&H5FP_alloc) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_free failed");
if (H5FP_SAP_BARRIER_COMM != MPI_COMM_NULL)
@ -208,16 +208,16 @@ H5FPfinalize(void)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "Error stopping the SAP");
/* Release the MPI types we created */
if (MPI_Type_free(&H5FP_request_t) != MPI_SUCCESS)
if (MPI_Type_free(&H5FP_request) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_free failed");
if (MPI_Type_free(&H5FP_reply_t) != MPI_SUCCESS)
if (MPI_Type_free(&H5FP_reply) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_free failed");
if (MPI_Type_free(&H5FP_read_t) != MPI_SUCCESS)
if (MPI_Type_free(&H5FP_read) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_free failed");
if (MPI_Type_free(&H5FP_alloc_t) != MPI_SUCCESS)
if (MPI_Type_free(&H5FP_alloc) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_free failed");
/* Release the barrier communicator */
@ -315,8 +315,8 @@ done:
/*
* Function: H5FP_commit_sap_datatypes
* Purpose: Commit the H5FP_request_t, H5FP_reply_t, H5FP_read_t, and
* H5FP_alloc_t structure datatypes to MPI.
* Purpose: Commit the H5FP_request, H5FP_reply, H5FP_read, and
* H5FP_alloc structure datatypes to MPI.
* Return: Success: SUCCEED
* Failure: FAIL
* Programmer: Bill Wendling, 26. July, 2002
@ -325,17 +325,19 @@ done:
static herr_t
H5FP_commit_sap_datatypes(void)
{
int block_length[5], i;
MPI_Aint displs[5];
MPI_Datatype old_types[5];
H5FP_request sap_req;
H5FP_read sap_read;
H5FP_alloc sap_alloc;
herr_t ret_value = SUCCEED;
int block_length[5];
int i;
MPI_Aint displs[5];
MPI_Datatype old_types[5];
H5FP_request_t sap_req;
H5FP_reply_t sap_reply;
H5FP_read_t sap_read;
H5FP_alloc_t sap_alloc;
herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI(H5FP_commit_sap_datatypes, FAIL);
/* Commit the H5FP_request_t datatype */
/* Commit the H5FP_request datatype */
block_length[0] = 8;
block_length[1] = 1;
block_length[2] = 4;
@ -346,7 +348,7 @@ H5FP_commit_sap_datatypes(void)
old_types[2] = MPI_LONG_LONG_INT;
old_types[3] = HADDR_AS_MPI_TYPE;
old_types[4] = MPI_UNSIGNED_CHAR;
MPI_Address(&sap_req.req_type, &displs[0]);
MPI_Address(&sap_req.req_id, &displs[0]);
MPI_Address(&sap_req.feature_flags, &displs[1]);
MPI_Address(&sap_req.meta_block_size, &displs[2]);
MPI_Address(&sap_req.addr, &displs[3]);
@ -356,26 +358,28 @@ H5FP_commit_sap_datatypes(void)
for (i = 4; i >= 0; --i)
displs[i] -= displs[0];
if (MPI_Type_struct(5, block_length, displs, old_types,
&H5FP_request_t) != MPI_SUCCESS)
if (MPI_Type_struct(5, block_length, displs, old_types, &H5FP_request) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_struct failed");
if (MPI_Type_commit(&H5FP_request_t) != MPI_SUCCESS)
if (MPI_Type_commit(&H5FP_request) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_commit failed");
/* Commit the H5FP_reply_t datatype */
/* Commit the H5FP_reply datatype */
block_length[0] = 4;
displs[0] = 0;
old_types[0] = MPI_INT;
MPI_Address(&sap_reply.req_id, &displs[0]);
if (MPI_Type_struct(1, block_length, displs, old_types,
&H5FP_reply_t) != MPI_SUCCESS)
/* Calculate the displacements */
for (i = 0; i >= 0; --i)
displs[i] -= displs[0];
if (MPI_Type_struct(1, block_length, displs, old_types, &H5FP_reply) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_struct failed");
if (MPI_Type_commit(&H5FP_reply_t) != MPI_SUCCESS)
if (MPI_Type_commit(&H5FP_reply) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_commit failed");
/* Commit the H5FP_read_t datatype */
/* Commit the H5FP_read datatype */
block_length[0] = 5;
block_length[1] = 1;
old_types[0] = MPI_UNSIGNED;
@ -387,14 +391,13 @@ H5FP_commit_sap_datatypes(void)
for (i = 1; i >= 0; --i)
displs[i] -= displs[0];
if (MPI_Type_struct(2, block_length, displs, old_types,
&H5FP_read_t) != MPI_SUCCESS)
if (MPI_Type_struct(2, block_length, displs, old_types, &H5FP_read) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_struct failed");
if (MPI_Type_commit(&H5FP_read_t) != MPI_SUCCESS)
if (MPI_Type_commit(&H5FP_read) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_commit failed");
/* Commit the H5FP_alloc_t datatype */
/* Commit the H5FP_alloc datatype */
block_length[0] = 4;
block_length[1] = 1;
old_types[0] = MPI_UNSIGNED;
@ -406,11 +409,10 @@ H5FP_commit_sap_datatypes(void)
for (i = 1; i >= 0; --i)
displs[i] -= displs[0];
if (MPI_Type_struct(2, block_length, displs, old_types,
&H5FP_alloc_t) != MPI_SUCCESS)
if (MPI_Type_struct(2, block_length, displs, old_types, &H5FP_alloc) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_struct failed");
if (MPI_Type_commit(&H5FP_alloc_t) != MPI_SUCCESS)
if (MPI_Type_commit(&H5FP_alloc) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Type_commit failed");
done:
@ -429,7 +431,7 @@ done:
static herr_t
H5FP_request_sap_stop(void)
{
H5FP_request req;
H5FP_request_t req;
int mrc, my_rank;
herr_t ret_value = SUCCEED;
@ -444,7 +446,7 @@ H5FP_request_sap_stop(void)
req.req_id = 0;
req.proc_rank = my_rank;
if (MPI_Send(&req, 1, H5FP_request_t, (int)H5FP_sap_rank,
if (MPI_Send(&req, 1, H5FP_request, (int)H5FP_sap_rank,
H5FP_TAG_REQUEST, H5FP_SAP_COMM) != MPI_SUCCESS)
HGOTO_ERROR(H5E_INTERNAL, H5E_MPI, FAIL, "MPI_Send failed");

View File

@ -74,7 +74,7 @@ H5FP_request_open(H5FP_obj_t obj_type, haddr_t maxaddr,
hsize_t sdata_block_size, hsize_t threshold,
hsize_t alignment, unsigned *file_id, unsigned *req_id)
{
H5FP_request req;
H5FP_request_t req;
MPI_Status mpi_status;
int mrc, my_rank;
int ret_value = SUCCEED;
@ -108,7 +108,7 @@ H5FP_request_open(H5FP_obj_t obj_type, haddr_t maxaddr,
req.threshold = threshold;
req.alignment = alignment;
if ((mrc = MPI_Send(&req, 1, H5FP_request_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Send(&req, 1, H5FP_request, (int)H5FP_sap_rank,
H5FP_TAG_REQUEST, H5FP_SAP_COMM)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
@ -139,7 +139,7 @@ H5FP_request_lock(unsigned file_id, unsigned char *obj_oid,
H5FP_lock_t rw_lock, int last, unsigned *req_id,
H5FP_status_t *status)
{
H5FP_request req;
H5FP_request_t req;
int mrc, my_rank;
int ret_value = SUCCEED;
@ -164,7 +164,7 @@ H5FP_request_lock(unsigned file_id, unsigned char *obj_oid,
req.proc_rank = my_rank;
H5FP_COPY_OID(req.oid, obj_oid);
if ((mrc = MPI_Send(&req, 1, H5FP_request_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Send(&req, 1, H5FP_request, (int)H5FP_sap_rank,
H5FP_TAG_REQUEST, H5FP_SAP_COMM)) != MPI_SUCCESS) {
*status = H5FP_STATUS_LOCK_FAILED;
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
@ -175,12 +175,12 @@ H5FP_request_lock(unsigned file_id, unsigned char *obj_oid,
* On the last lock in the lock-group to be acquired, we expect a
* reply from the SAP
*/
H5FP_reply sap_reply;
MPI_Status mpi_status;
H5FP_reply_t sap_reply;
MPI_Status mpi_status;
HDmemset(&mpi_status, 0, sizeof(mpi_status));
if ((mrc = MPI_Recv(&sap_reply, 1, H5FP_reply_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Recv(&sap_reply, 1, H5FP_reply, (int)H5FP_sap_rank,
H5FP_TAG_REPLY, H5FP_SAP_COMM, &mpi_status)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Recv failed", mrc);
@ -211,7 +211,7 @@ herr_t
H5FP_request_release_lock(unsigned file_id, unsigned char *obj_oid,
int last, unsigned *req_id, H5FP_status_t *status)
{
H5FP_request req;
H5FP_request_t req;
int mrc, my_rank;
herr_t ret_value = SUCCEED;
@ -235,7 +235,7 @@ H5FP_request_release_lock(unsigned file_id, unsigned char *obj_oid,
req.md_size = 0;
req.proc_rank = my_rank;
if ((mrc = MPI_Send(&req, 1, H5FP_request_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Send(&req, 1, H5FP_request, (int)H5FP_sap_rank,
H5FP_TAG_REQUEST, H5FP_SAP_COMM)) != MPI_SUCCESS) {
*status = H5FP_STATUS_LOCK_RELEASE_FAILED;
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
@ -246,12 +246,12 @@ H5FP_request_release_lock(unsigned file_id, unsigned char *obj_oid,
* On the last lock released in this lock-group, we expect a
* reply from the SAP
*/
H5FP_reply sap_reply;
MPI_Status mpi_status;
H5FP_reply_t sap_reply;
MPI_Status mpi_status;
HDmemset(&mpi_status, 0, sizeof(mpi_status));
if ((mrc = MPI_Recv(&sap_reply, 1, H5FP_reply_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Recv(&sap_reply, 1, H5FP_reply, (int)H5FP_sap_rank,
H5FP_TAG_REPLY, H5FP_SAP_COMM, &mpi_status)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Recv failed", mrc);
@ -288,8 +288,8 @@ H5FP_request_read_metadata(H5FD_t *file, unsigned file_id, hid_t dxpl_id,
size_t size, uint8_t **buf, int *bytes_read,
unsigned *req_id, H5FP_status_t *status)
{
H5FP_request req;
H5FP_read sap_read;
H5FP_request_t req;
H5FP_read_t sap_read;
MPI_Status mpi_status;
int mrc, my_rank;
herr_t ret_value = SUCCEED;
@ -315,13 +315,13 @@ H5FP_request_read_metadata(H5FD_t *file, unsigned file_id, hid_t dxpl_id,
req.proc_rank = my_rank;
req.addr = addr;
if ((mrc = MPI_Send(&req, 1, H5FP_request_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Send(&req, 1, H5FP_request, (int)H5FP_sap_rank,
H5FP_TAG_REQUEST, H5FP_SAP_COMM)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
HDmemset(&mpi_status, 0, sizeof(mpi_status));
if ((mrc = MPI_Recv(&sap_read, 1, H5FP_read_t, (int)H5FP_sap_rank, H5FP_TAG_READ,
if ((mrc = MPI_Recv(&sap_read, 1, H5FP_read, (int)H5FP_sap_rank, H5FP_TAG_READ,
H5FP_SAP_COMM, &mpi_status)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Recv failed", mrc);
@ -388,9 +388,9 @@ H5FP_request_write_metadata(H5FD_t *file, unsigned file_id, hid_t dxpl_id,
int mdata_size, const char *mdata,
unsigned *req_id, H5FP_status_t *status)
{
H5FP_reply sap_reply;
H5FP_reply_t sap_reply;
MPI_Status mpi_status;
H5FP_request req;
H5FP_request_t req;
int mrc, my_rank;
herr_t ret_value = SUCCEED;
@ -415,7 +415,7 @@ H5FP_request_write_metadata(H5FD_t *file, unsigned file_id, hid_t dxpl_id,
req.addr = addr;
req.md_size = mdata_size;
if ((mrc = MPI_Send(&req, 1, H5FP_request_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Send(&req, 1, H5FP_request, (int)H5FP_sap_rank,
H5FP_TAG_REQUEST, H5FP_SAP_COMM)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
@ -425,7 +425,7 @@ H5FP_request_write_metadata(H5FD_t *file, unsigned file_id, hid_t dxpl_id,
HDmemset(&mpi_status, 0, sizeof(mpi_status));
if ((mrc = MPI_Recv(&sap_reply, 1, H5FP_reply_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Recv(&sap_reply, 1, H5FP_reply, (int)H5FP_sap_rank,
H5FP_TAG_REPLY, H5FP_SAP_COMM, &mpi_status)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Recv failed", mrc);
@ -481,8 +481,8 @@ herr_t
H5FP_request_flush_metadata(H5FD_t *file, unsigned file_id, hid_t dxpl_id,
unsigned *req_id, H5FP_status_t *status)
{
H5FP_reply sap_reply;
H5FP_request req;
H5FP_reply_t sap_reply;
H5FP_request_t req;
MPI_Status mpi_status;
int mrc, my_rank;
int ret_value = SUCCEED;
@ -504,13 +504,13 @@ H5FP_request_flush_metadata(H5FD_t *file, unsigned file_id, hid_t dxpl_id,
req.file_id = file_id;
req.proc_rank = my_rank;
if ((mrc = MPI_Send(&req, 1, H5FP_request_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Send(&req, 1, H5FP_request, (int)H5FP_sap_rank,
H5FP_TAG_REQUEST, H5FP_SAP_COMM)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
HDmemset(&mpi_status, 0, sizeof(mpi_status));
if ((mrc = MPI_Recv(&sap_reply, 1, H5FP_reply_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Recv(&sap_reply, 1, H5FP_reply, (int)H5FP_sap_rank,
H5FP_TAG_REPLY, H5FP_SAP_COMM, &mpi_status)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Recv failed", mrc);
@ -564,8 +564,8 @@ herr_t
H5FP_request_close(H5FD_t *file, unsigned file_id, unsigned *req_id,
H5FP_status_t *status)
{
H5FP_reply sap_reply;
H5FP_request req;
H5FP_reply_t sap_reply;
H5FP_request_t req;
MPI_Status mpi_status;
int mrc;
herr_t ret_value = SUCCEED;
@ -583,13 +583,13 @@ H5FP_request_close(H5FD_t *file, unsigned file_id, unsigned *req_id,
req.file_id = file_id;
req.proc_rank = H5FD_fphdf5_mpi_rank(file);
if ((mrc = MPI_Send(&req, 1, H5FP_request_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Send(&req, 1, H5FP_request, (int)H5FP_sap_rank,
H5FP_TAG_REQUEST, H5FP_SAP_COMM)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
HDmemset(&mpi_status, 0, sizeof(mpi_status));
if ((mrc = MPI_Recv(&sap_reply, 1, H5FP_reply_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Recv(&sap_reply, 1, H5FP_reply, (int)H5FP_sap_rank,
H5FP_TAG_REPLY, H5FP_SAP_COMM, &mpi_status)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Recv failed", mrc);
@ -615,8 +615,8 @@ herr_t
H5FP_request_allocate(H5FD_t *file, H5FD_mem_t mem_type, hsize_t size,
haddr_t *addr, unsigned *req_id, H5FP_status_t *status)
{
H5FP_alloc sap_alloc;
H5FP_request req;
H5FP_alloc_t sap_alloc;
H5FP_request_t req;
MPI_Status mpi_status;
int mrc;
herr_t ret_value = SUCCEED;
@ -637,13 +637,13 @@ H5FP_request_allocate(H5FD_t *file, H5FD_mem_t mem_type, hsize_t size,
req.mem_type = mem_type;
req.meta_block_size = size; /* use this field as the size to allocate */
if ((mrc = MPI_Send(&req, 1, H5FP_request_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Send(&req, 1, H5FP_request, (int)H5FP_sap_rank,
H5FP_TAG_REQUEST, H5FP_SAP_COMM)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
HDmemset(&mpi_status, 0, sizeof(mpi_status));
if ((mrc = MPI_Recv(&sap_alloc, 1, H5FP_alloc_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Recv(&sap_alloc, 1, H5FP_alloc, (int)H5FP_sap_rank,
H5FP_TAG_ALLOC, H5FP_SAP_COMM, &mpi_status)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Recv failed", mrc);
@ -670,8 +670,8 @@ herr_t
H5FP_request_free(H5FD_t *file, H5FD_mem_t mem_type, haddr_t addr,
hsize_t size, unsigned *req_id, H5FP_status_t *status)
{
H5FP_reply sap_reply;
H5FP_request req;
H5FP_reply_t sap_reply;
H5FP_request_t req;
MPI_Status mpi_status;
int mrc;
herr_t ret_value = SUCCEED;
@ -693,13 +693,13 @@ H5FP_request_free(H5FD_t *file, H5FD_mem_t mem_type, haddr_t addr,
req.addr = addr;
req.meta_block_size = size; /* use this field as the size to free */
if ((mrc = MPI_Send(&req, 1, H5FP_request_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Send(&req, 1, H5FP_request, (int)H5FP_sap_rank,
H5FP_TAG_REQUEST, H5FP_SAP_COMM)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
HDmemset(&mpi_status, 0, sizeof(mpi_status));
if ((mrc = MPI_Recv(&sap_reply, 1, H5FP_reply_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Recv(&sap_reply, 1, H5FP_reply, (int)H5FP_sap_rank,
H5FP_TAG_REPLY, H5FP_SAP_COMM, &mpi_status)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Recv failed", mrc);
@ -746,7 +746,7 @@ H5FP_gen_request_id()
static herr_t
H5FP_dump_to_file(H5FD_t *file, hid_t dxpl_id)
{
H5FP_read sap_read;
H5FP_read_t sap_read;
hid_t new_dxpl_id = FAIL;
H5P_genplist_t *plist = NULL, *old_plist;
unsigned dumping = 1;
@ -788,7 +788,7 @@ H5FP_dump_to_file(H5FD_t *file, hid_t dxpl_id)
HDmemset(&mpi_status, 0, sizeof(mpi_status));
if ((mrc = MPI_Recv(&sap_read, 1, H5FP_read_t, (int)H5FP_sap_rank,
if ((mrc = MPI_Recv(&sap_read, 1, H5FP_read, (int)H5FP_sap_rank,
H5FP_TAG_DUMP, H5FP_SAP_COMM, &mpi_status)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Recv failed", mrc);

View File

@ -183,9 +183,9 @@ typedef struct {
hsize_t alignment; /* Alignment (really!) */
haddr_t addr; /* Address of the metadata */
unsigned char oid[H5R_OBJ_REF_BUF_SIZE]; /* Buffer to store OID of object */
} H5FP_request;
} H5FP_request_t;
extern MPI_Datatype H5FP_request_t; /* MPI datatype for the H5FP_request obj*/
extern MPI_Datatype H5FP_request; /* MPI datatype for the H5FP_request obj */
/*===----------------------------------------------------------------------===
* H5FP_reply
@ -198,9 +198,9 @@ typedef struct {
unsigned file_id; /* File ID assigned to an open file */
H5FP_status_t status; /* Status of the request */
unsigned md_size; /* Size of the metadata sent in next msg */
} H5FP_reply;
} H5FP_reply_t;
extern MPI_Datatype H5FP_reply_t; /* MPI datatype for the H5FP_reply obj */
extern MPI_Datatype H5FP_reply; /* MPI datatype for the H5FP_reply obj */
/*===----------------------------------------------------------------------===
* H5FP_read
@ -215,9 +215,9 @@ typedef struct {
H5FD_mem_t mem_type; /* Type of memory updated, if req'd */
unsigned md_size; /* Size of the metadata sent in next msg */
MPI_Offset addr; /* Address of the metadata */
} H5FP_read;
} H5FP_read_t;
extern MPI_Datatype H5FP_read_t; /* MPI datatype for the H5FP_read obj */
extern MPI_Datatype H5FP_read; /* MPI datatype for the H5FP_read obj */
/*===----------------------------------------------------------------------===
* H5FP_alloc
@ -231,9 +231,9 @@ typedef struct {
H5FP_status_t status; /* Status of the request */
H5FD_mem_t mem_type; /* Type of memory updated, if req'd */
haddr_t addr; /* Address of the metadata */
} H5FP_alloc;
} H5FP_alloc_t;
extern MPI_Datatype H5FP_alloc_t; /* MPI datatype for the H5FP_alloc obj */
extern MPI_Datatype H5FP_alloc; /* MPI datatype for the H5FP_alloc obj */
/* Handy #define for copying OIDs */
#define H5FP_COPY_OID(dst, src) HDmemcpy((dst), (src), H5R_OBJ_REF_BUF_SIZE)

View File

@ -107,7 +107,7 @@ typedef struct {
static H5TB_TREE *file_info_tree;
/* local functions */
static herr_t H5FP_sap_receive(H5FP_request *req, int source, int tag, char **buf);
static herr_t H5FP_sap_receive(H5FP_request_t *req, int source, int tag, char **buf);
/* local functions to generate unique ids for messages */
static unsigned H5FP_gen_sap_file_id(void);
@ -151,16 +151,16 @@ static H5FP_mdata_mod *H5FP_new_file_mod_node(unsigned rank,
static herr_t H5FP_free_mod_node(H5FP_mdata_mod *info);
/* local request handling functions */
static herr_t H5FP_sap_handle_open_request(H5FP_request *req, unsigned md_size);
static herr_t H5FP_sap_handle_lock_request(H5FP_request *req);
static herr_t H5FP_sap_handle_release_lock_request(H5FP_request *req);
static herr_t H5FP_sap_handle_read_request(H5FP_request *req);
static herr_t H5FP_sap_handle_write_request(H5FP_request *req,
static herr_t H5FP_sap_handle_open_request(H5FP_request_t *req, unsigned md_size);
static herr_t H5FP_sap_handle_lock_request(H5FP_request_t *req);
static herr_t H5FP_sap_handle_release_lock_request(H5FP_request_t *req);
static herr_t H5FP_sap_handle_read_request(H5FP_request_t *req);
static herr_t H5FP_sap_handle_write_request(H5FP_request_t *req,
char *mdata,
unsigned md_size);
static herr_t H5FP_sap_handle_flush_request(H5FP_request *req);
static herr_t H5FP_sap_handle_close_request(H5FP_request *req);
static herr_t H5FP_sap_handle_alloc_request(H5FP_request *req);
static herr_t H5FP_sap_handle_flush_request(H5FP_request_t *req);
static herr_t H5FP_sap_handle_close_request(H5FP_request_t *req);
static herr_t H5FP_sap_handle_alloc_request(H5FP_request_t *req);
/*
*===----------------------------------------------------------------------===
@ -185,7 +185,7 @@ H5FP_sap_receive_loop(void)
herr_t ret_value = SUCCEED;
int comm_size;
int stop = 0;
H5FP_request req;
H5FP_request_t req;
FUNC_ENTER_NOAPI(H5FP_sap_receive_loop, FAIL);
@ -266,7 +266,7 @@ done:
* Modifications:
*/
static herr_t
H5FP_sap_receive(H5FP_request *req, int source, int tag, char **buf)
H5FP_sap_receive(H5FP_request_t *req, int source, int tag, char **buf)
{
MPI_Status status;
herr_t ret_value = SUCCEED;
@ -276,7 +276,7 @@ H5FP_sap_receive(H5FP_request *req, int source, int tag, char **buf)
HDmemset(&status, 0, sizeof(status));
if ((mrc = MPI_Recv(req, 1, H5FP_request_t, source, tag,
if ((mrc = MPI_Recv(req, 1, H5FP_request, source, tag,
H5FP_SAP_COMM, &status)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Recv failed", mrc);
@ -752,7 +752,7 @@ H5FP_remove_file_id_from_list(unsigned file_id)
static herr_t
H5FP_send_reply(unsigned to, unsigned req_id, unsigned file_id, H5FP_status_t status)
{
H5FP_reply reply;
H5FP_reply_t reply;
herr_t ret_value = SUCCEED;
int mrc;
@ -762,7 +762,7 @@ H5FP_send_reply(unsigned to, unsigned req_id, unsigned file_id, H5FP_status_t st
reply.file_id = file_id;
reply.status = status;
if ((mrc = MPI_Send(&reply, 1, H5FP_reply_t, (int)to, H5FP_TAG_REPLY,
if ((mrc = MPI_Send(&reply, 1, H5FP_reply, (int)to, H5FP_TAG_REPLY,
H5FP_SAP_COMM)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
@ -782,7 +782,7 @@ done:
static herr_t
H5FP_dump(H5FP_file_info *info, unsigned to, unsigned req_id, unsigned file_id)
{
H5FP_read r;
H5FP_read_t r;
H5TB_NODE *node;
herr_t ret_value = SUCCEED;
int mrc;
@ -809,7 +809,7 @@ H5FP_dump(H5FP_file_info *info, unsigned to, unsigned req_id, unsigned file_id)
r.addr = m->addr;
r.md_size = m->md_size;
if ((mrc = MPI_Send(&r, 1, H5FP_read_t, (int)to, H5FP_TAG_DUMP,
if ((mrc = MPI_Send(&r, 1, H5FP_read, (int)to, H5FP_TAG_DUMP,
H5FP_SAP_COMM)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
@ -826,7 +826,7 @@ H5FP_dump(H5FP_file_info *info, unsigned to, unsigned req_id, unsigned file_id)
r.md_size = 0;
r.status = H5FP_STATUS_DUMPING_FINISHED;
if ((mrc = MPI_Send(&r, 1, H5FP_read_t, (int)to, H5FP_TAG_DUMP,
if ((mrc = MPI_Send(&r, 1, H5FP_read, (int)to, H5FP_TAG_DUMP,
H5FP_SAP_COMM)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
@ -835,9 +835,8 @@ H5FP_dump(H5FP_file_info *info, unsigned to, unsigned req_id, unsigned file_id)
if ((info->mod_tree = H5TB_dmake((H5TB_cmp_t)H5FP_file_mod_cmp,
sizeof(H5FP_mdata_mod), FALSE)) == NULL)
HGOTO_ERROR(H5E_FPHDF5, H5E_CANTMAKETREE, NULL, "cannot make TBBT tree");
HGOTO_ERROR(H5E_FPHDF5, H5E_CANTMAKETREE, FAIL, "cannot make TBBT tree");
info->mod_tree = NULL;
info->num_mods = 0;
done:
@ -875,7 +874,7 @@ H5FP_gen_sap_file_id()
* Modifications:
*/
static herr_t
H5FP_sap_handle_open_request(H5FP_request *req, unsigned UNUSED md_size)
H5FP_sap_handle_open_request(H5FP_request_t *req, unsigned UNUSED md_size)
{
herr_t ret_value = SUCCEED;
int mrc;
@ -918,7 +917,7 @@ done:
* Modifications:
*/
static herr_t
H5FP_sap_handle_lock_request(H5FP_request *req)
H5FP_sap_handle_lock_request(H5FP_request_t *req)
{
struct lock_group {
unsigned char oid[sizeof(req->oid)];
@ -1123,7 +1122,7 @@ HDfprintf(stderr, "%s: locking failure (%d)!!\n", FUNC, ret_value);
* Modifications:
*/
static herr_t
H5FP_sap_handle_release_lock_request(H5FP_request *req)
H5FP_sap_handle_release_lock_request(H5FP_request_t *req)
{
struct release_group {
unsigned char oid[sizeof(req->oid)];
@ -1232,10 +1231,10 @@ done:
* Modifications:
*/
static herr_t
H5FP_sap_handle_read_request(H5FP_request *req)
H5FP_sap_handle_read_request(H5FP_request_t *req)
{
H5FP_file_info *info;
H5FP_read r;
H5FP_read_t r;
herr_t ret_value = SUCCEED;
char *metadata = NULL;
int mrc;
@ -1276,7 +1275,7 @@ H5FP_sap_handle_read_request(H5FP_request *req)
}
}
if ((mrc = MPI_Send(&r, 1, H5FP_read_t, (int)req->proc_rank,
if ((mrc = MPI_Send(&r, 1, H5FP_read, (int)req->proc_rank,
H5FP_TAG_READ, H5FP_SAP_COMM)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
@ -1304,7 +1303,7 @@ done:
* Modifications:
*/
static herr_t
H5FP_sap_handle_write_request(H5FP_request *req, char *mdata, unsigned md_size)
H5FP_sap_handle_write_request(H5FP_request_t *req, char *mdata, unsigned md_size)
{
H5FP_file_info *info;
H5FP_status_t exit_state = H5FP_STATUS_OK;
@ -1388,7 +1387,7 @@ done:
* Modifications:
*/
static herr_t
H5FP_sap_handle_flush_request(H5FP_request *req)
H5FP_sap_handle_flush_request(H5FP_request_t *req)
{
H5FP_file_info *info;
H5FP_status_t exit_state = H5FP_STATUS_OK;
@ -1430,7 +1429,7 @@ done:
* Modifications:
*/
static herr_t
H5FP_sap_handle_close_request(H5FP_request *req)
H5FP_sap_handle_close_request(H5FP_request_t *req)
{
H5FP_file_info *info;
H5FP_status_t exit_state = H5FP_STATUS_OK;
@ -1468,9 +1467,9 @@ done:
* Modifications:
*/
static herr_t
H5FP_sap_handle_alloc_request(H5FP_request *req)
H5FP_sap_handle_alloc_request(H5FP_request_t *req)
{
H5FP_alloc sap_alloc;
H5FP_alloc_t sap_alloc;
H5FP_file_info *info;
int mrc;
herr_t ret_value = SUCCEED;
@ -1502,7 +1501,7 @@ H5FP_sap_handle_alloc_request(H5FP_request *req)
HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL,
"SAP unable to allocate file memory");
if ((mrc = MPI_Send(&sap_alloc, 1, H5FP_alloc_t, (int)req->proc_rank,
if ((mrc = MPI_Send(&sap_alloc, 1, H5FP_alloc, (int)req->proc_rank,
H5FP_TAG_ALLOC, H5FP_SAP_COMM)) != MPI_SUCCESS)
HMPI_GOTO_ERROR(FAIL, "MPI_Send failed", mrc);
}