[svn-r29077] Description:

Normalize against the trunk, in preparation for final merge.

Tested on:
    MacOSX/64 10.11.3 (amazon) w/serial & parallel
    (h5committest not required on this branch)
This commit is contained in:
Quincey Koziol 2016-02-10 12:55:55 -05:00
parent f1283d59b0
commit c8a6c6030a
21 changed files with 393 additions and 518 deletions

View File

@ -88,8 +88,6 @@ hid_t H5AC_dxpl_id;
hid_t H5AC_coll_read_dxpl_id = (-1); hid_t H5AC_coll_read_dxpl_id = (-1);
#endif /* H5_HAVE_PARALLEL */ #endif /* H5_HAVE_PARALLEL */
/* global flag for collective API sanity checks */
/* DXPL to be used in operations that will not result in I/O calls */ /* DXPL to be used in operations that will not result in I/O calls */
hid_t H5AC_noio_dxpl_id = (-1); hid_t H5AC_noio_dxpl_id = (-1);
@ -216,7 +214,7 @@ H5AC__init_package(void)
/* Get the property list object */ /* Get the property list object */
if (NULL == (xfer_plist = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id))) if (NULL == (xfer_plist = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get new property list object") HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get new property list object")
/* set metadata dxpl type */ /* Insert the dxpl type property */
dxpl_type = H5FD_METADATA_DXPL; dxpl_type = H5FD_METADATA_DXPL;
if(H5P_set(xfer_plist, H5FD_DXPL_TYPE_NAME, &dxpl_type) < 0) if(H5P_set(xfer_plist, H5FD_DXPL_TYPE_NAME, &dxpl_type) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set dxpl type property") HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "can't set dxpl type property")
@ -551,7 +549,6 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed.") HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed.")
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(f->shared->cache); aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(f->shared->cache);
if(aux_ptr) if(aux_ptr)
/* Sanity check */ /* Sanity check */
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC); HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);

View File

@ -2067,7 +2067,7 @@ HDfprintf(stdout, "%d:H5AC_propagate...:%u: (u/uu/i/iu/r/ru) = %zu/%u/%zu/%u/%zu
/* clear collective access flag on half of the entries in the /* clear collective access flag on half of the entries in the
cache and mark them as independent in case they need to be cache and mark them as independent in case they need to be
evicted later. All ranks are guranteed to mark the same entires evicted later. All ranks are guranteed to mark the same entries
since we don't modify the order of the collectively accessed since we don't modify the order of the collectively accessed
entries except through collective access. */ entries except through collective access. */
if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0) if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)

View File

@ -181,13 +181,6 @@ typedef H5C_cache_entry_t H5AC_info_t;
/* Typedef for metadata cache (defined in H5Cpkg.h) */ /* Typedef for metadata cache (defined in H5Cpkg.h) */
typedef H5C_t H5AC_t; typedef H5C_t H5AC_t;
#ifdef H5_HAVE_PARALLEL
/* Definitions for "collective metadata write" property */
#define H5AC_COLLECTIVE_META_WRITE_NAME "H5AC_collective_metadata_write"
#define H5AC_COLLECTIVE_META_WRITE_SIZE sizeof(hbool_t)
#define H5AC_COLLECTIVE_META_WRITE_DEF 0
#endif /* H5_HAVE_PARALLEL */
#define H5AC_METADATA_TAG_NAME "H5AC_metadata_tag" #define H5AC_METADATA_TAG_NAME "H5AC_metadata_tag"
#define H5AC_METADATA_TAG_SIZE sizeof(haddr_t) #define H5AC_METADATA_TAG_SIZE sizeof(haddr_t)
#define H5AC_METADATA_TAG_DEF H5AC__INVALID_TAG #define H5AC_METADATA_TAG_DEF H5AC__INVALID_TAG
@ -196,17 +189,17 @@ typedef H5C_t H5AC_t;
/* Dataset transfer property list for metadata calls */ /* Dataset transfer property list for metadata calls */
H5_DLLVAR hid_t H5AC_dxpl_id; H5_DLLVAR hid_t H5AC_dxpl_id;
extern hid_t H5AC_ind_read_dxpl_id; H5_DLLVAR hid_t H5AC_ind_read_dxpl_id;
#ifdef H5_HAVE_PARALLEL #ifdef H5_HAVE_PARALLEL
extern hid_t H5AC_coll_read_dxpl_id; H5_DLLVAR hid_t H5AC_coll_read_dxpl_id;
#endif /* H5_HAVE_PARALLEL */ #endif /* H5_HAVE_PARALLEL */
/* DXPL to be used in operations that will not result in I/O calls */ /* DXPL to be used in operations that will not result in I/O calls */
extern hid_t H5AC_noio_dxpl_id; H5_DLLVAR hid_t H5AC_noio_dxpl_id;
/* DXPL to be used for raw data I/O operations when one is not /* DXPL to be used for raw data I/O operations when one is not
provided by the user (fill values in H5Dcreate) */ provided by the user (fill values in H5Dcreate) */
extern hid_t H5AC_rawdata_dxpl_id; H5_DLLVAR hid_t H5AC_rawdata_dxpl_id;
/* Default cache configuration. */ /* Default cache configuration. */

529
src/H5C.c
View File

@ -93,7 +93,6 @@
#include "H5MFprivate.h" /* File memory management */ #include "H5MFprivate.h" /* File memory management */
#include "H5MMprivate.h" /* Memory management */ #include "H5MMprivate.h" /* Memory management */
#include "H5Pprivate.h" /* Property lists */ #include "H5Pprivate.h" /* Property lists */
#include "H5SLprivate.h" /* Skip lists */
/****************/ /****************/
@ -182,7 +181,7 @@ static herr_t H5C_mark_tagged_entries(H5C_t * cache_ptr,
static herr_t H5C_flush_marked_entries(H5F_t * f, static herr_t H5C_flush_marked_entries(H5F_t * f,
hid_t dxpl_id); hid_t dxpl_id);
static herr_t H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr, static herr_t H5C__generate_image(const H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
hid_t dxpl_id, int64_t *entry_size_change_ptr); hid_t dxpl_id, int64_t *entry_size_change_ptr);
#if H5C_DO_TAGGING_SANITY_CHECKS #if H5C_DO_TAGGING_SANITY_CHECKS
@ -225,6 +224,9 @@ hbool_t H5_PKG_INIT_VAR = FALSE;
/* Declare a free list to manage the H5C_t struct */ /* Declare a free list to manage the H5C_t struct */
H5FL_DEFINE_STATIC(H5C_t); H5FL_DEFINE_STATIC(H5C_t);
/* Declare extern free list to manage the H5C_collective_write_t struct */
H5FL_EXTERN(H5C_collective_write_t);
/**************************************************************************** /****************************************************************************
@ -1006,7 +1008,7 @@ H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG; flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flush_flags, NULL, NULL) < 0) if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flush_flags, NULL, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "H5C_flush_single_entry() failed.") HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't flush entry")
#if H5C_DO_SANITY_CHECKS #if H5C_DO_SANITY_CHECKS
if ( entry_was_dirty ) if ( entry_was_dirty )
@ -1913,8 +1915,10 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->aux_next = NULL; entry_ptr->aux_next = NULL;
entry_ptr->aux_prev = NULL; entry_ptr->aux_prev = NULL;
#ifdef H5_HAVE_PARALLEL
entry_ptr->coll_next = NULL; entry_ptr->coll_next = NULL;
entry_ptr->coll_prev = NULL; entry_ptr->coll_prev = NULL;
#endif /* H5_HAVE_PARALLEL */
H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
@ -2041,19 +2045,18 @@ H5C_insert_entry(H5F_t * f,
/* Make sure the size of the collective entries in the cache remain in check */ /* Make sure the size of the collective entries in the cache remain in check */
if(H5P_USER_TRUE == f->coll_md_read) { if(H5P_USER_TRUE == f->coll_md_read) {
if(cache_ptr->max_cache_size*80 < cache_ptr->coll_list_size*100) { if(cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) {
if(H5C_clear_coll_entries(cache_ptr, 1) < 0) if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed.") HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
} /* end if */ } /* end if */
} /* end if */ } /* end if */
else { else {
if(cache_ptr->max_cache_size*40 < cache_ptr->coll_list_size*100) { if(cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) {
if(H5C_clear_coll_entries(cache_ptr, 1) < 0) if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed.") HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear collective metadata entries")
} /* end if */ } /* end if */
} /* end else */ } /* end else */
} /* end if */ } /* end if */
entry_ptr->ind_access_while_coll = FALSE;
#endif #endif
done: done:
@ -2724,52 +2727,52 @@ H5C_protect(H5F_t * f,
the entry in their cache still have to participate in the the entry in their cache still have to participate in the
bcast. */ bcast. */
#ifdef H5_HAVE_PARALLEL #ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && coll_access && if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && coll_access) {
!(entry_ptr->is_dirty) && !(entry_ptr->coll_access)) { if(!(entry_ptr->is_dirty) && !(entry_ptr->coll_access)) {
MPI_Comm comm; /* File MPI Communicator */ MPI_Comm comm; /* File MPI Communicator */
int mpi_code; /* MPI error code */ int mpi_code; /* MPI error code */
int buf_size; int buf_size;
if(MPI_COMM_NULL == (comm = H5F_mpi_get_comm(f))) if(MPI_COMM_NULL == (comm = H5F_mpi_get_comm(f)))
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed") HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "get_comm request failed")
if(entry_ptr->image_ptr == NULL) { if(entry_ptr->image_ptr == NULL) {
int mpi_rank; int mpi_rank;
size_t image_size; size_t image_size;
if((mpi_rank = H5F_mpi_get_rank(f)) < 0) if((mpi_rank = H5F_mpi_get_rank(f)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank") HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "Can't get MPI rank")
if(entry_ptr->compressed) if(entry_ptr->compressed)
image_size = entry_ptr->compressed_size; image_size = entry_ptr->compressed_size;
else else
image_size = entry_ptr->size; image_size = entry_ptr->size;
HDassert(image_size > 0); HDassert(image_size > 0);
if(NULL == (entry_ptr->image_ptr = H5MM_malloc(image_size + H5C_IMAGE_EXTRA_SPACE))) if(NULL == (entry_ptr->image_ptr = H5MM_malloc(image_size + H5C_IMAGE_EXTRA_SPACE)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer") HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
#if H5C_DO_MEMORY_SANITY_CHECKS #if H5C_DO_MEMORY_SANITY_CHECKS
HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size,
H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE); H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
if(0 == mpi_rank) if(0 == mpi_rank)
if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id, NULL) < 0) if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "Can't get Image") HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "can't generate entry's image")
} /* end if */
HDassert(entry_ptr->image_ptr);
H5_CHECKED_ASSIGN(buf_size, int, entry_ptr->size, size_t);
if(MPI_SUCCESS != (mpi_code = MPI_Bcast(entry_ptr->image_ptr, buf_size, MPI_BYTE, 0, comm)))
HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
/* Mark the entry as collective and insert into the collective list */
entry_ptr->coll_access = TRUE;
H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
} /* end if */ } /* end if */
else if(entry_ptr->coll_access) {
HDassert(entry_ptr->image_ptr); H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
} /* end else-if */
H5_CHECKED_ASSIGN(buf_size, int, entry_ptr->size, size_t);
if(MPI_SUCCESS != (mpi_code = MPI_Bcast(entry_ptr->image_ptr, buf_size, MPI_BYTE, 0, comm)))
HMPI_GOTO_ERROR(NULL, "MPI_Bcast failed", mpi_code)
entry_ptr->coll_access = TRUE;
H5C__INSERT_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
} /* end if */ } /* end if */
else if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && coll_access && entry_ptr->coll_access) {
H5C__MOVE_TO_TOP_IN_COLL_LIST(cache_ptr, entry_ptr, NULL)
} /* end else-if */
#endif /* H5_HAVE_PARALLEL */ #endif /* H5_HAVE_PARALLEL */
#if H5C_DO_TAGGING_SANITY_CHECKS #if H5C_DO_TAGGING_SANITY_CHECKS
@ -3057,16 +3060,16 @@ H5C_protect(H5F_t * f,
#ifdef H5_HAVE_PARALLEL #ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) { if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
/* Make sure the size of the collective entries in the cache remain in check */ /* Make sure the size of the collective entries in the cache remain in check */
if(TRUE == coll_access) { if(coll_access) {
if(H5P_USER_TRUE == f->coll_md_read) { if(H5P_USER_TRUE == f->coll_md_read) {
if(cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100) if(cache_ptr->max_cache_size * 80 < cache_ptr->coll_list_size * 100)
if(H5C_clear_coll_entries(cache_ptr, 1) < 0) if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "H5C_clear_coll_entries() failed.") HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
} /* end if */ } /* end if */
else { else {
if(cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100) if(cache_ptr->max_cache_size * 40 < cache_ptr->coll_list_size * 100)
if(H5C_clear_coll_entries(cache_ptr, 1) < 0) if(H5C_clear_coll_entries(cache_ptr, TRUE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "H5C_clear_coll_entries() failed.") HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, NULL, "can't clear collective metadata entries")
} /* end else */ } /* end else */
} /* end if */ } /* end if */
} /* end if */ } /* end if */
@ -7707,7 +7710,11 @@ done:
*/ */
herr_t herr_t
H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr, H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
unsigned flags, int64_t *entry_size_change_ptr, H5SL_t *collective_write_list) unsigned flags, int64_t *entry_size_change_ptr, H5SL_t
#ifndef H5_HAVE_PARALLEL
H5_ATTR_UNUSED
#endif /* NDEBUG */
*collective_write_list)
{ {
H5C_t * cache_ptr; /* Cache for file */ H5C_t * cache_ptr; /* Cache for file */
hbool_t destroy; /* external flag */ hbool_t destroy; /* external flag */
@ -7718,11 +7725,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
hbool_t write_entry; /* internal flag */ hbool_t write_entry; /* internal flag */
hbool_t destroy_entry; /* internal flag */ hbool_t destroy_entry; /* internal flag */
hbool_t was_dirty; hbool_t was_dirty;
haddr_t new_addr = HADDR_UNDEF;
haddr_t old_addr = HADDR_UNDEF;
haddr_t entry_addr = HADDR_UNDEF; haddr_t entry_addr = HADDR_UNDEF;
size_t new_len = 0;
size_t new_compressed_len = 0;
herr_t ret_value = SUCCEED; /* Return value */ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE FUNC_ENTER_PACKAGE
@ -7807,7 +7810,6 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
/* serialize the entry if necessary, and then write it to disk. */ /* serialize the entry if necessary, and then write it to disk. */
if(write_entry) { if(write_entry) {
unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
/* The entry is dirty, and we are doing either a flush, /* The entry is dirty, and we are doing either a flush,
* or a flush destroy. In either case, serialize the * or a flush destroy. In either case, serialize the
@ -7847,225 +7849,9 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
} /* end if */ } /* end if */
if(!(entry_ptr->image_up_to_date)) { if(!(entry_ptr->image_up_to_date)) {
/* reset cache_ptr->slist_changed so we can detect slist /* Generate the entry's image */
* modifications in the pre_serialize call. if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id, entry_size_change_ptr) < 0)
*/ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
cache_ptr->slist_changed = FALSE;
/* make note of the entry's current address */
old_addr = entry_ptr->addr;
/* Call client's pre-serialize callback, if there's one */
if ( ( entry_ptr->type->pre_serialize != NULL ) &&
( (entry_ptr->type->pre_serialize)(f, dxpl_id,
(void *)entry_ptr,
entry_ptr->addr,
entry_ptr->size,
entry_ptr->compressed_size,
&new_addr, &new_len,
&new_compressed_len,
&serialize_flags) < 0 ) )
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
/* set cache_ptr->slist_change_in_pre_serialize if the
* slist was modified.
*/
if(cache_ptr->slist_changed)
cache_ptr->slist_change_in_pre_serialize = TRUE;
/* Check for any flags set in the pre-serialize callback */
if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
/* Check for unexpected flags from serialize callback */
if(serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG |
H5C__SERIALIZE_MOVED_FLAG |
H5C__SERIALIZE_COMPRESSED_FLAG))
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
#ifdef H5_HAVE_PARALLEL
/* In the parallel case, resizes and moves in
* the serialize operation can cause problems.
* If they occur, scream and die.
*
* At present, in the parallel case, the aux_ptr
* will only be set if there is more than one
* process. Thus we can use this to detect
* the parallel case.
*
* This works for now, but if we start using the
* aux_ptr for other purposes, we will have to
* change this test accordingly.
*
* NB: While this test detects entryies that attempt
* to resize or move themselves during a flush
* in the parallel case, it will not detect an
* entry that dirties, resizes, and/or moves
* other entries during its flush.
*
* From what Quincey tells me, this test is
* sufficient for now, as any flush routine that
* does the latter will also do the former.
*
* If that ceases to be the case, further
* tests will be necessary.
*/
if(cache_ptr->aux_ptr != NULL)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occured in parallel case.")
#endif /* H5_HAVE_PARALLEL */
/* Resize the buffer if required */
if ( ( ( ! entry_ptr->compressed ) &&
( serialize_flags & H5C__SERIALIZE_RESIZED_FLAG ) ) ||
( ( entry_ptr->compressed ) &&
( serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG ) ) )
{
size_t new_image_size;
if(entry_ptr->compressed)
new_image_size = new_compressed_len;
else
new_image_size = new_len;
HDassert(new_image_size > 0);
/* Release the current image */
if(entry_ptr->image_ptr)
entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
/* Allocate a new image buffer */
if(NULL == (entry_ptr->image_ptr = H5MM_malloc(new_image_size + H5C_IMAGE_EXTRA_SPACE)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
#if H5C_DO_MEMORY_SANITY_CHECKS
HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + new_image_size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
} /* end if */
/* If required, update the entry and the cache data structures
* for a resize.
*/
if(serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, \
entry_ptr, new_len)
/* update the hash table for the size change*/
H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, \
entry_ptr->size, \
new_len, entry_ptr, \
!(entry_ptr->is_dirty));
/* The entry can't be protected since we are
* in the process of flushing it. Thus we must
* update the replacement policy data
* structures for the size change. The macro
* deals with the pinned case.
*/
H5C__UPDATE_RP_FOR_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
/* as we haven't updated the cache data structures for
* for the flush or flush destroy yet, the entry should
* be in the slist. Thus update it for the size change.
*/
HDassert(entry_ptr->in_slist);
H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, \
new_len)
/* if defined, update *entry_size_change_ptr for the
* change in entry size.
*/
if(entry_size_change_ptr != NULL)
*entry_size_change_ptr = (int64_t)new_len - (int64_t)(entry_ptr->size);
/* finally, update the entry for its new size */
entry_ptr->size = new_len;
} /* end if */
/* If required, udate the entry and the cache data structures
* for a move
*/
if(serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
#if H5C_DO_SANITY_CHECKS
int64_t saved_slist_len_increase;
int64_t saved_slist_size_increase;
#endif /* H5C_DO_SANITY_CHECKS */
H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
if(entry_ptr->addr == old_addr) {
/* we must update cache data structures for the
* change in address.
*/
/* delete the entry from the hash table and the slist */
H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr)
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr)
/* update the entry for its new address */
entry_ptr->addr = new_addr;
/* and then reinsert in the index and slist */
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
#if H5C_DO_SANITY_CHECKS
/* save cache_ptr->slist_len_increase and
* cache_ptr->slist_size_increase before the
* reinsertion into the slist, and restore
* them afterwards to avoid skewing our sanity
* checking.
*/
saved_slist_len_increase = cache_ptr->slist_len_increase;
saved_slist_size_increase = cache_ptr->slist_size_increase;
#endif /* H5C_DO_SANITY_CHECKS */
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL)
#if H5C_DO_SANITY_CHECKS
cache_ptr->slist_len_increase = saved_slist_len_increase;
cache_ptr->slist_size_increase = saved_slist_size_increase;
#endif /* H5C_DO_SANITY_CHECKS */
}
else /* move is alread done for us -- just do sanity checks */
HDassert(entry_ptr->addr == new_addr);
} /* end if */
if(serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG) {
/* just save the new compressed entry size in
* entry_ptr->compressed_size. We don't need to
* do more, as compressed size is only used for I/O.
*/
HDassert(entry_ptr->compressed);
entry_ptr->compressed_size = new_compressed_len;
}
} /* end if ( serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET ) */
/* Serialize object into buffer */
{
size_t image_len;
if(entry_ptr->compressed)
image_len = entry_ptr->compressed_size;
else
image_len = entry_ptr->size;
/* reset cache_ptr->slist_changed so we can detect slist
* modifications in the serialize call.
*/
cache_ptr->slist_changed = FALSE;
if(entry_ptr->type->serialize(f, entry_ptr->image_ptr,
image_len, (void *)entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
/* set cache_ptr->slist_change_in_serialize if the
* slist was modified.
*/
if(cache_ptr->slist_changed)
cache_ptr->slist_change_in_pre_serialize = TRUE;
#if H5C_DO_MEMORY_SANITY_CHECKS
HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + image_len,
H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE));
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
entry_ptr->image_up_to_date = TRUE;
}
} /* end if ( ! (entry_ptr->image_up_to_date) ) */ } /* end if ( ! (entry_ptr->image_up_to_date) ) */
/* Finally, write the image to disk. /* Finally, write the image to disk.
@ -8091,10 +7877,10 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
#ifdef H5_HAVE_PARALLEL #ifdef H5_HAVE_PARALLEL
if(collective_write_list) { if(collective_write_list) {
H5C_collective_write_t *item = NULL; H5C_collective_write_t *item;
if(NULL == (item = (H5C_collective_write_t *)H5MM_malloc(sizeof(H5C_collective_write_t)))) if(NULL == (item = (H5C_collective_write_t *)H5FL_MALLOC(H5C_collective_write_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate skip list item") HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "unable to allocate skip list item")
item->length = image_size; item->length = image_size;
item->free_buf = FALSE; item->free_buf = FALSE;
@ -8103,7 +7889,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
if(H5SL_insert(collective_write_list, item, &item->offset) < 0) { if(H5SL_insert(collective_write_list, item, &item->offset) < 0) {
H5MM_free(item); H5MM_free(item);
HGOTO_ERROR(H5E_HEAP, H5E_CANTINSERT, FAIL, "unable to insert skip list item") HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
} /* end if */ } /* end if */
} /* end if */ } /* end if */
else else
@ -8778,7 +8564,6 @@ H5C_load_entry(H5F_t * f,
entry->clear_on_unprotect = FALSE; entry->clear_on_unprotect = FALSE;
entry->flush_immediately = FALSE; entry->flush_immediately = FALSE;
entry->coll_access = coll_access; entry->coll_access = coll_access;
entry->ind_access_while_coll = FALSE;
#endif /* H5_HAVE_PARALLEL */ #endif /* H5_HAVE_PARALLEL */
entry->flush_in_progress = FALSE; entry->flush_in_progress = FALSE;
entry->destroy_in_progress = FALSE; entry->destroy_in_progress = FALSE;
@ -8799,8 +8584,10 @@ H5C_load_entry(H5F_t * f,
entry->aux_next = NULL; entry->aux_next = NULL;
entry->aux_prev = NULL; entry->aux_prev = NULL;
#ifdef H5_HAVE_PARALLEL
entry->coll_next = NULL; entry->coll_next = NULL;
entry->coll_prev = NULL; entry->coll_prev = NULL;
#endif /* H5_HAVE_PARALLEL */
H5C__RESET_CACHE_ENTRY_STATS(entry); H5C__RESET_CACHE_ENTRY_STATS(entry);
@ -9018,13 +8805,6 @@ H5C_make_space_in_cache(H5F_t * f,
cache_ptr->entries_scanned_to_make_space++; cache_ptr->entries_scanned_to_make_space++;
#endif /* H5C_COLLECT_CACHE_STATS */ #endif /* H5C_COLLECT_CACHE_STATS */
#ifdef H5_HAVE_PARALLEL
if(TRUE == entry_ptr->coll_access) {
entry_ptr->coll_access = FALSE;
H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
} /* end if */
#endif
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, NULL) < 0) if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry") HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
} else { } else {
@ -10207,9 +9987,22 @@ done:
FUNC_LEAVE_NOAPI(ret_value) FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_get_entry_ring() */ } /* H5C_get_entry_ring() */
/*-------------------------------------------------------------------------
* Function: H5C__generate_image
*
* Purpose: Serialize an entry and generate its image.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Mohamad Chaarawi
* 2/10/16
*
*-------------------------------------------------------------------------
*/
static herr_t static herr_t
H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr, H5C__generate_image(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
hid_t dxpl_id, int64_t *entry_size_change_ptr) hid_t dxpl_id, int64_t *entry_size_change_ptr)
{ {
haddr_t new_addr = HADDR_UNDEF; haddr_t new_addr = HADDR_UNDEF;
haddr_t old_addr = HADDR_UNDEF; haddr_t old_addr = HADDR_UNDEF;
@ -10218,8 +10011,9 @@ H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET; unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
herr_t ret_value = SUCCEED; herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI_NOINIT FUNC_ENTER_STATIC
/* Sanity check */
HDassert(!entry_ptr->image_up_to_date); HDassert(!entry_ptr->image_up_to_date);
/* reset cache_ptr->slist_changed so we can detect slist /* reset cache_ptr->slist_changed so we can detect slist
@ -10231,93 +10025,91 @@ H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
old_addr = entry_ptr->addr; old_addr = entry_ptr->addr;
/* Call client's pre-serialize callback, if there's one */ /* Call client's pre-serialize callback, if there's one */
if ( ( entry_ptr->type->pre_serialize != NULL ) && if(entry_ptr->type->pre_serialize &&
( (entry_ptr->type->pre_serialize)(f, dxpl_id, (entry_ptr->type->pre_serialize)(f, dxpl_id,
(void *)entry_ptr, (void *)entry_ptr, entry_ptr->addr, entry_ptr->size,
entry_ptr->addr, entry_ptr->compressed_size, &new_addr, &new_len,
entry_ptr->size, &new_compressed_len, &serialize_flags) < 0)
entry_ptr->compressed_size, HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to pre-serialize entry")
&new_addr, &new_len,
&new_compressed_len,
&serialize_flags) < 0 ) ) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
"unable to pre-serialize entry");
}
/* set cache_ptr->slist_change_in_pre_serialize if the /* set cache_ptr->slist_change_in_pre_serialize if the
* slist was modified. * slist was modified.
*/ */
if ( cache_ptr->slist_changed ) if(cache_ptr->slist_changed)
cache_ptr->slist_change_in_pre_serialize = TRUE; cache_ptr->slist_change_in_pre_serialize = TRUE;
/* Check for any flags set in the pre-serialize callback */ /* Check for any flags set in the pre-serialize callback */
if ( serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET ) { if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
/* Check for unexpected flags from serialize callback */ /* Check for unexpected flags from serialize callback */
if ( serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | if(serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG |
H5C__SERIALIZE_MOVED_FLAG | H5C__SERIALIZE_MOVED_FLAG |
H5C__SERIALIZE_COMPRESSED_FLAG)) { H5C__SERIALIZE_COMPRESSED_FLAG))
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
"unknown serialize flag(s)");
}
#ifdef H5_HAVE_PARALLEL #ifdef H5_HAVE_PARALLEL
if ( cache_ptr->aux_ptr != NULL ) /* In the parallel case, resizes and moves in
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \ * the serialize operation can cause problems.
"resize/move in serialize occured in parallel case."); * If they occur, scream and die.
*
* At present, in the parallel case, the aux_ptr
* will only be set if there is more than one
* process. Thus we can use this to detect
* the parallel case.
*
* This works for now, but if we start using the
* aux_ptr for other purposes, we will have to
* change this test accordingly.
*
* NB: While this test detects entryies that attempt
* to resize or move themselves during a flush
* in the parallel case, it will not detect an
* entry that dirties, resizes, and/or moves
* other entries during its flush.
*
* From what Quincey tells me, this test is
* sufficient for now, as any flush routine that
* does the latter will also do the former.
*
* If that ceases to be the case, further
* tests will be necessary.
*/
if(cache_ptr->aux_ptr != NULL)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occured in parallel case.")
#endif #endif
/* Resize the buffer if required */ /* Resize the buffer if required */
if ( ( ( ! entry_ptr->compressed ) && if(((!entry_ptr->compressed) && (serialize_flags & H5C__SERIALIZE_RESIZED_FLAG)) ||
( serialize_flags & H5C__SERIALIZE_RESIZED_FLAG ) ) || ((entry_ptr->compressed) && (serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG))) {
( ( entry_ptr->compressed ) &&
( serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG ) ) ) {
size_t new_image_size; size_t new_image_size;
if ( entry_ptr->compressed ) if(entry_ptr->compressed)
new_image_size = new_compressed_len; new_image_size = new_compressed_len;
else else
new_image_size = new_len; new_image_size = new_len;
HDassert(new_image_size > 0); HDassert(new_image_size > 0);
/* Release the current image */ /* Release the current image */
if ( entry_ptr->image_ptr ) { if(entry_ptr->image_ptr)
entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr); entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
}
/* Allocate a new image buffer */ /* Allocate a new image buffer */
entry_ptr->image_ptr = if(NULL == (entry_ptr->image_ptr = H5MM_malloc(new_image_size + H5C_IMAGE_EXTRA_SPACE)))
H5MM_malloc(new_image_size + H5C_IMAGE_EXTRA_SPACE); HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
if ( NULL == entry_ptr->image_ptr )
{
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, \
"memory allocation failed for on disk image buffer");
}
#if H5C_DO_MEMORY_SANITY_CHECKS #if H5C_DO_MEMORY_SANITY_CHECKS
HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + new_image_size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + new_image_size,
H5C_IMAGE_SANITY_VALUE,
H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
} /* end if */ } /* end if */
/* If required, update the entry and the cache data structures /* If required, update the entry and the cache data structures
* for a resize. * for a resize.
*/ */
if ( serialize_flags & H5C__SERIALIZE_RESIZED_FLAG ) { if(serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, \
entry_ptr, new_len);
/* update the hash table for the size change*/ /* update the hash table for the size change*/
H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, \ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, \
entry_ptr->size, \ new_len, entry_ptr, !(entry_ptr->is_dirty));
new_len, entry_ptr, \
!(entry_ptr->is_dirty));
/* The entry can't be protected since we are /* The entry can't be protected since we are
* in the process of flushing it. Thus we must * in the process of flushing it. Thus we must
@ -10332,17 +10124,13 @@ H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
* be in the slist. Thus update it for the size change. * be in the slist. Thus update it for the size change.
*/ */
HDassert(entry_ptr->in_slist); HDassert(entry_ptr->in_slist);
H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, \ H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
new_len);
/* if defined, update *entry_size_change_ptr for the /* if defined, update *entry_size_change_ptr for the
* change in entry size. * change in entry size.
*/ */
if ( entry_size_change_ptr != NULL ) if(entry_size_change_ptr != NULL)
{ *entry_size_change_ptr = (int64_t)new_len - (int64_t)(entry_ptr->size);
*entry_size_change_ptr = (int64_t)new_len;
*entry_size_change_ptr -= (int64_t)(entry_ptr->size);
}
/* finally, update the entry for its new size */ /* finally, update the entry for its new size */
entry_ptr->size = new_len; entry_ptr->size = new_len;
@ -10359,7 +10147,7 @@ H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr); H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr);
if ( entry_ptr->addr == old_addr ) { if(entry_ptr->addr == old_addr) {
/* we must update cache data structures for the /* we must update cache data structures for the
* change in address. * change in address.
*/ */
@ -10391,27 +10179,26 @@ H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
cache_ptr->slist_len_increase = saved_slist_len_increase; cache_ptr->slist_len_increase = saved_slist_len_increase;
cache_ptr->slist_size_increase = saved_slist_size_increase; cache_ptr->slist_size_increase = saved_slist_size_increase;
#endif /* H5C_DO_SANITY_CHECKS */ #endif /* H5C_DO_SANITY_CHECKS */
} } /* end if */
else { else /* move is already done for us -- just do sanity checks */
HDassert(entry_ptr->addr == new_addr); HDassert(entry_ptr->addr == new_addr);
}
} /* end if */ } /* end if */
if ( serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG ) { if(serialize_flags & H5C__SERIALIZE_COMPRESSED_FLAG) {
/* just save the new compressed entry size in /* just save the new compressed entry size in
* entry_ptr->compressed_size. We don't need to * entry_ptr->compressed_size. We don't need to
* do more, as compressed size is only used for I/O. * do more, as compressed size is only used for I/O.
*/ */
HDassert(entry_ptr->compressed); HDassert(entry_ptr->compressed);
entry_ptr->compressed_size = new_compressed_len; entry_ptr->compressed_size = new_compressed_len;
} } /* end if */
} /* end if ( serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET ) */ } /* end if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) */
/* Serialize object into buffer */ /* Serialize object into buffer */
{ {
size_t image_len; size_t image_len;
if ( entry_ptr->compressed ) if(entry_ptr->compressed)
image_len = entry_ptr->compressed_size; image_len = entry_ptr->compressed_size;
else else
image_len = entry_ptr->size; image_len = entry_ptr->size;
@ -10420,33 +10207,25 @@ H5C__generate_image(H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
* modifications in the serialize call. * modifications in the serialize call.
*/ */
cache_ptr->slist_changed = FALSE; cache_ptr->slist_changed = FALSE;
if ( entry_ptr->type->serialize(f, entry_ptr->image_ptr, if(entry_ptr->type->serialize(f, entry_ptr->image_ptr, image_len, (void *)entry_ptr) < 0)
image_len, HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to serialize entry")
(void *)entry_ptr) < 0) {
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
"unable to serialize entry");
}
/* set cache_ptr->slist_change_in_serialize if the /* set cache_ptr->slist_change_in_serialize if the
* slist was modified. * slist was modified.
*/ */
if ( cache_ptr->slist_changed ) if(cache_ptr->slist_changed)
cache_ptr->slist_change_in_pre_serialize = TRUE; cache_ptr->slist_change_in_pre_serialize = TRUE;
#if H5C_DO_MEMORY_SANITY_CHECKS #if H5C_DO_MEMORY_SANITY_CHECKS
HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + image_len,
HDassert(0 == HDmemcmp(((uint8_t *)entry_ptr->image_ptr) + H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE));
image_len,
H5C_IMAGE_SANITY_VALUE,
H5C_IMAGE_EXTRA_SPACE));
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */ #endif /* H5C_DO_MEMORY_SANITY_CHECKS */
entry_ptr->image_up_to_date = TRUE; entry_ptr->image_up_to_date = TRUE;
} } /* end block */
done: done:
FUNC_LEAVE_NOAPI(ret_value) FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__generate_image */ } /* H5C__generate_image */

View File

@ -43,13 +43,13 @@
#include "H5Eprivate.h" /* Error handling */ #include "H5Eprivate.h" /* Error handling */
#include "H5Fpkg.h" /* Files */ #include "H5Fpkg.h" /* Files */
#include "H5FDprivate.h" /* File drivers */ #include "H5FDprivate.h" /* File drivers */
#include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */ #include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */ #include "H5MMprivate.h" /* Memory management */
#include "H5Pprivate.h" /* Property lists */ #include "H5Pprivate.h" /* Property lists */
#include "H5SLprivate.h" /* Skip lists */
#ifdef H5_HAVE_PARALLEL #ifdef H5_HAVE_PARALLEL
/****************/ /****************/
/* Local Macros */ /* Local Macros */
/****************/ /****************/
@ -64,6 +64,9 @@
/********************/ /********************/
/* Local Prototypes */ /* Local Prototypes */
/********************/ /********************/
static herr_t H5C__collective_write(H5F_t *f, hid_t dxpl_id,
H5SL_t *collective_write_list);
static herr_t H5C__collective_write_free(void *_item, void *key, void *op_data);
/*********************/ /*********************/
@ -80,6 +83,9 @@
/* Local Variables */ /* Local Variables */
/*******************/ /*******************/
/* Declare a free list to manage the H5C_collective_write_t struct */
H5FL_DEFINE(H5C_collective_write_t);
/*------------------------------------------------------------------------- /*-------------------------------------------------------------------------
@ -261,7 +267,7 @@ H5C_apply_candidate_list(H5F_t * f,
/* Create skip list of entries for collective write */ /* Create skip list of entries for collective write */
if(NULL == (collective_write_list = H5SL_create(H5SL_TYPE_HADDR, NULL))) if(NULL == (collective_write_list = H5SL_create(H5SL_TYPE_HADDR, NULL)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create skip list for entries") HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create skip list for entries")
} } /* end if */
n = num_candidates / mpi_size; n = num_candidates / mpi_size;
m = num_candidates % mpi_size; m = num_candidates % mpi_size;
@ -376,7 +382,7 @@ H5C_apply_candidate_list(H5F_t * f,
if(TRUE == entry_ptr->coll_access) { if(TRUE == entry_ptr->coll_access) {
entry_ptr->coll_access = FALSE; entry_ptr->coll_access = FALSE;
H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL) H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
} } /* end if */
} /* end else */ } /* end else */
} /* end for */ } /* end for */
@ -453,10 +459,8 @@ H5C_apply_candidate_list(H5F_t * f,
* will not call either the pre_serialize or serialize callbacks. * will not call either the pre_serialize or serialize callbacks.
*/ */
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, NULL) < 0)
H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
NULL, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
} /* end if */ } /* end if */
/* Else, if this process needs to flush this entry. */ /* Else, if this process needs to flush this entry. */
@ -500,9 +504,9 @@ H5C_apply_candidate_list(H5F_t * f,
cache_ptr->entries_removed_counter = 0; cache_ptr->entries_removed_counter = 0;
cache_ptr->last_entry_removed_ptr = NULL; cache_ptr->last_entry_removed_ptr = NULL;
if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, /* Add this entry to the list of entries to collectively write */
NULL, collective_write_list) < 0) if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, collective_write_list) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't flush entry.") HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
if ( ( cache_ptr->entries_removed_counter > 1 ) || if ( ( cache_ptr->entries_removed_counter > 1 ) ||
( cache_ptr->last_entry_removed_ptr == entry_ptr ) ) ( cache_ptr->last_entry_removed_ptr == entry_ptr ) )
@ -655,14 +659,12 @@ H5C_apply_candidate_list(H5F_t * f,
entries_cleared++; entries_cleared++;
#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 ) #if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
HDfprintf(stdout, "%s:%d: clearing 0x%llx.\n", FUNC, mpi_rank, HDfprintf(stdout, "%s:%d: clearing 0x%llx.\n", FUNC, mpi_rank,
(long long)clear_ptr->addr); (long long)clear_ptr->addr);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, NULL) < 0)
H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
NULL, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
} /* end else-if */ } /* end else-if */
/* Else, if this process needs to independently flush this entry. */ /* Else, if this process needs to independently flush this entry. */
@ -677,9 +679,9 @@ H5C_apply_candidate_list(H5F_t * f,
(long long)flush_ptr->addr); (long long)flush_ptr->addr);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */ #endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, /* Add this entry to the list of entries to collectively write */
NULL, collective_write_list) < 0) if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, collective_write_list) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.") HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
} /* end else-if */ } /* end else-if */
} /* end if */ } /* end if */
@ -712,16 +714,15 @@ H5C_apply_candidate_list(H5F_t * f,
if (delayed_ptr) { if (delayed_ptr) {
if (delayed_ptr->clear_on_unprotect) { if (delayed_ptr->clear_on_unprotect) {
if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG, if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG, NULL, NULL) < 0)
NULL, NULL) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't flush entry collectively.")
entry_ptr->clear_on_unprotect = FALSE; entry_ptr->clear_on_unprotect = FALSE;
entries_cleared++; entries_cleared++;
} else if (delayed_ptr->flush_immediately) { } else if (delayed_ptr->flush_immediately) {
if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, /* Add this entry to the list of entries to collectively write */
NULL, collective_write_list) < 0) if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, collective_write_list) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't flush entry collectively.") HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry collectively.")
entry_ptr->flush_immediately = FALSE; entry_ptr->flush_immediately = FALSE;
entries_flushed++; entries_flushed++;
@ -731,15 +732,15 @@ H5C_apply_candidate_list(H5F_t * f,
entries_flushed_or_cleared_last++; entries_flushed_or_cleared_last++;
} /* end if */ } /* end if */
/* If we've deferred writing to do it collectively, take care of that now */
if(f->coll_md_write) { if(f->coll_md_write) {
HDassert(collective_write_list); HDassert(collective_write_list);
/* Write collective list */ /* Write collective list */
if(H5C_collective_write(f, if(H5C__collective_write(f, dxpl_id, collective_write_list) < 0)
dxpl_id, HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, "Can't write metadata collectively")
collective_write_list) < 0) } /* end if */
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't write metadata collectively")
}
/* ====================================================================== * /* ====================================================================== *
* Finished flushing everything. * * Finished flushing everything. *
* ====================================================================== */ * ====================================================================== */
@ -750,9 +751,9 @@ H5C_apply_candidate_list(H5F_t * f,
HDassert((entries_flushed_collectively == entries_to_flush_collectively)); HDassert((entries_flushed_collectively == entries_to_flush_collectively));
if((entries_flushed != entries_to_flush) || if((entries_flushed != entries_to_flush) ||
(entries_cleared != entries_to_clear) || (entries_cleared != entries_to_clear) ||
(entries_flushed_or_cleared_last != entries_to_flush_or_clear_last) || (entries_flushed_or_cleared_last != entries_to_flush_or_clear_last) ||
(entries_flushed_collectively != entries_to_flush_collectively)) (entries_flushed_collectively != entries_to_flush_collectively))
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry count mismatch.") HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry count mismatch.")
done: done:
@ -760,7 +761,7 @@ done:
candidate_assignment_table = (int *)H5MM_xfree((void *)candidate_assignment_table); candidate_assignment_table = (int *)H5MM_xfree((void *)candidate_assignment_table);
if(collective_write_list) if(collective_write_list)
if(H5SL_destroy(collective_write_list, H5C_collective_write_free, NULL) < 0) if(H5SL_destroy(collective_write_list, H5C__collective_write_free, NULL) < 0)
HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "failed to destroy skip list") HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "failed to destroy skip list")
FUNC_LEAVE_NOAPI(ret_value) FUNC_LEAVE_NOAPI(ret_value)
@ -1132,7 +1133,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
if(TRUE == entry_ptr->coll_access) { if(TRUE == entry_ptr->coll_access) {
entry_ptr->coll_access = FALSE; entry_ptr->coll_access = FALSE;
H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL) H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
} } /* end if */
entry_ptr->clear_on_unprotect = TRUE; entry_ptr->clear_on_unprotect = TRUE;
#if H5C_DO_SANITY_CHECKS #if H5C_DO_SANITY_CHECKS
@ -1194,10 +1195,8 @@ H5C_mark_entries_as_clean(H5F_t * f,
entry_ptr = entry_ptr->prev; entry_ptr = entry_ptr->prev;
entries_cleared++; entries_cleared++;
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, NULL) < 0)
H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
NULL, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
} else { } else {
entry_ptr = entry_ptr->prev; entry_ptr = entry_ptr->prev;
@ -1224,10 +1223,8 @@ H5C_mark_entries_as_clean(H5F_t * f,
entry_ptr = entry_ptr->next; entry_ptr = entry_ptr->next;
entries_cleared++; entries_cleared++;
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL, NULL) < 0 )
H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
NULL, NULL) < 0 )
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't clear entry.")
} else { } else {
entry_ptr = entry_ptr->next; entry_ptr = entry_ptr->next;
@ -1288,78 +1285,95 @@ done:
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
herr_t herr_t
H5C_clear_coll_entries(H5C_t * cache_ptr, hbool_t partial) H5C_clear_coll_entries(H5C_t *cache_ptr, hbool_t partial)
{ {
int32_t list_len, coll_entries_cleared = 0; int32_t clear_cnt;
H5C_cache_entry_t * entry_ptr = NULL; H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * prev_ptr;
herr_t ret_value = SUCCEED; herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI_NOINIT FUNC_ENTER_NOAPI_NOINIT
entry_ptr = cache_ptr->coll_tail_ptr; entry_ptr = cache_ptr->coll_tail_ptr;
list_len = cache_ptr->coll_list_len; clear_cnt = (partial ? cache_ptr->coll_list_len / 2 : cache_ptr->coll_list_len);
while(entry_ptr && clear_cnt > 0) {
while(entry_ptr && (coll_entries_cleared < (partial ? list_len/2 : list_len))) { H5C_cache_entry_t *prev_ptr = entry_ptr->coll_prev;
prev_ptr = entry_ptr->coll_prev;
/* Sanity check */
HDassert(entry_ptr->coll_access); HDassert(entry_ptr->coll_access);
/* Mark entry as independent */
entry_ptr->coll_access = FALSE; entry_ptr->coll_access = FALSE;
H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL) H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
coll_entries_cleared ++;
/* Decrement entry count */
clear_cnt--;
/* Advance to next entry */
entry_ptr = prev_ptr; entry_ptr = prev_ptr;
} } /* end while */
done: done:
FUNC_LEAVE_NOAPI(ret_value) FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_clear_coll_entries */ } /* H5C_clear_coll_entries */
herr_t
H5C_collective_write(H5F_t *f, /*-------------------------------------------------------------------------
hid_t dxpl_id, *
H5SL_t *collective_write_list) * Function: H5C__collective_write
*
* Purpose: Perform a collective write of a list of metadata entries.
*
* Return: FAIL if error is detected, SUCCEED otherwise.
*
* Programmer: Mohamad Chaarawi
* February, 2016
*
*-------------------------------------------------------------------------
*/
static herr_t
H5C__collective_write(H5F_t *f, hid_t dxpl_id, H5SL_t *collective_write_list)
{ {
H5P_genplist_t *plist = NULL; H5P_genplist_t *plist = NULL;
H5FD_mpio_xfer_t xfer_mode = H5FD_MPIO_COLLECTIVE; H5FD_mpio_xfer_t orig_xfer_mode = H5FD_MPIO_COLLECTIVE;
H5FD_mpio_xfer_t orig_xfer_mode;
H5SL_node_t *node;
H5C_collective_write_t *item;
int count; int count;
void *base_buf;
int *length_array = NULL; int *length_array = NULL;
MPI_Aint *buf_array = NULL; MPI_Aint *buf_array = NULL;
MPI_Aint *offset_array = NULL; MPI_Aint *offset_array = NULL;
MPI_Datatype btype; MPI_Datatype btype;
MPI_Datatype ftype;
hbool_t btype_created = FALSE; hbool_t btype_created = FALSE;
MPI_Datatype ftype;
hbool_t ftype_created = FALSE; hbool_t ftype_created = FALSE;
int mpi_code; int mpi_code;
int i;
herr_t ret_value = SUCCEED; herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI_NOINIT FUNC_ENTER_STATIC
count = (int)H5SL_count(collective_write_list);
/* Get original transfer mode */
if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id))) if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list") HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list")
if(H5P_get(plist, H5D_XFER_IO_XFER_MODE_NAME, &orig_xfer_mode) < 0) if(H5P_get(plist, H5D_XFER_IO_XFER_MODE_NAME, &orig_xfer_mode) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property") HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property")
/* Get number of entries in collective write list */
count = (int)H5SL_count(collective_write_list);
if(count > 0) { if(count > 0) {
H5FD_mpio_xfer_t xfer_mode = H5FD_MPIO_COLLECTIVE;
H5SL_node_t *node;
H5C_collective_write_t *item;
void *base_buf;
int i;
if(H5P_set(plist, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0) if(H5P_set(plist, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property") HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property")
/* Allocate arrays */ /* Allocate arrays */
if(NULL == (length_array = (int *)H5MM_malloc((size_t)count * sizeof(int)))) if(NULL == (length_array = (int *)H5MM_malloc((size_t)count * sizeof(int))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for collective write table length array") HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective write table length array")
if(NULL == (buf_array = (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint)))) if(NULL == (buf_array = (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for collective buf table length array") HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective buf table length array")
if(NULL == (offset_array = (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint)))) if(NULL == (offset_array = (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for collective offset table length array") HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective offset table length array")
/* Fill arrays */ /* Fill arrays */
node = H5SL_first(collective_write_list); node = H5SL_first(collective_write_list);
@ -1367,6 +1381,7 @@ H5C_collective_write(H5F_t *f,
if(NULL == (item = (H5C_collective_write_t *)H5SL_item(node))) if(NULL == (item = (H5C_collective_write_t *)H5SL_item(node)))
HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item") HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item")
/* Set up initial array position & buffer base address */
length_array[0] = (int)item->length; length_array[0] = (int)item->length;
base_buf = item->buf; base_buf = item->buf;
buf_array[0] = (MPI_Aint)0; buf_array[0] = (MPI_Aint)0;
@ -1378,28 +1393,31 @@ H5C_collective_write(H5F_t *f,
if(NULL == (item = (H5C_collective_write_t *)H5SL_item(node))) if(NULL == (item = (H5C_collective_write_t *)H5SL_item(node)))
HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item") HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item")
/* Set up array position */
length_array[i] = (int)item->length; length_array[i] = (int)item->length;
buf_array[i] = (MPI_Aint)item->buf - (MPI_Aint)base_buf; buf_array[i] = (MPI_Aint)item->buf - (MPI_Aint)base_buf;
offset_array[i] = (MPI_Aint)item->offset; offset_array[i] = (MPI_Aint)item->offset;
/* Advance to next node & array location */
node = H5SL_next(node); node = H5SL_next(node);
i++; i++;
} /* end while */ } /* end while */
/* Create memory mpi type */ /* Create memory MPI type */
if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(count, length_array, buf_array, MPI_BYTE, &btype))) if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(count, length_array, buf_array, MPI_BYTE, &btype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
btype_created = TRUE; btype_created = TRUE;
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&btype))) if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&btype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
/* Create file mpi type */ /* Create file MPI type */
if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(count, length_array, offset_array, MPI_BYTE, &ftype))) if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(count, length_array, offset_array, MPI_BYTE, &ftype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
ftype_created = TRUE; ftype_created = TRUE;
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&ftype))) if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&ftype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code) HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
/* Pass buf type, file type to the file driver. */ /* Pass buf type, file type to the file driver */
if(H5FD_mpi_setup_collective(dxpl_id, &btype, &ftype) < 0) if(H5FD_mpi_setup_collective(dxpl_id, &btype, &ftype) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O properties") HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O properties")
@ -1417,19 +1435,16 @@ H5C_collective_write(H5F_t *f,
mpi_fh = *(MPI_File*)mpi_fh_p; mpi_fh = *(MPI_File*)mpi_fh_p;
/* just to match up with the 1st MPI_File_set_view from H5FD_mpio_write() */ /* just to match up with the 1st MPI_File_set_view from H5FD_mpio_write() */
if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE, if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE, MPI_BYTE, "native", MPI_INFO_NULL)))
MPI_BYTE, "native", MPI_INFO_NULL)))
HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code) HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code)
/* just to match up with MPI_File_write_at_all from H5FD_mpio_write() */ /* just to match up with MPI_File_write_at_all from H5FD_mpio_write() */
HDmemset(&mpi_stat, 0, sizeof(MPI_Status)); HDmemset(&mpi_stat, 0, sizeof(MPI_Status));
if(MPI_SUCCESS != (mpi_code = MPI_File_write_at_all(mpi_fh, (MPI_Offset)0, NULL, 0, if(MPI_SUCCESS != (mpi_code = MPI_File_write_at_all(mpi_fh, (MPI_Offset)0, NULL, 0, MPI_BYTE, &mpi_stat)))
MPI_BYTE, &mpi_stat)))
HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at_all failed", mpi_code) HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at_all failed", mpi_code)
/* just to match up with the 2nd MPI_File_set_view (reset) in H5FD_mpio_write() */ /* just to match up with the 2nd MPI_File_set_view (reset) in H5FD_mpio_write() */
if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE, if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE, MPI_BYTE, "native", MPI_INFO_NULL)))
MPI_BYTE, "native", MPI_INFO_NULL)))
HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code) HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code)
} /* end else */ } /* end else */
@ -1453,23 +1468,37 @@ done:
} /* end if */ } /* end if */
FUNC_LEAVE_NOAPI(ret_value); FUNC_LEAVE_NOAPI(ret_value);
} /* end H5C_collective_write() */ } /* end H5C__collective_write() */
herr_t
H5C_collective_write_free(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *op_data) /*-------------------------------------------------------------------------
*
* Function: H5C__collective_write_free
*
* Purpose: Release node on collective write skiplist
*
* Return: FAIL if error is detected, SUCCEED otherwise.
*
* Programmer: Mohamad Chaarawi
* February, 2016
*
*-------------------------------------------------------------------------
*/
static herr_t
H5C__collective_write_free(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED *op_data)
{ {
H5C_collective_write_t *item = (H5C_collective_write_t *)_item; H5C_collective_write_t *item = (H5C_collective_write_t *)_item;
FUNC_ENTER_NOAPI_NOINIT_NOERR FUNC_ENTER_STATIC_NOERR
HDassert(item); /* Sanity check */
HDassert(item);
if(item->free_buf) if(item->free_buf)
item->buf = H5MM_xfree(item->buf); item->buf = H5MM_xfree(item->buf);
/*!FIXME change to use free list for items */ H5FL_FREE(H5C_collective_write_t, item);
H5MM_free(item);
FUNC_LEAVE_NOAPI(SUCCEED) FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5C_collective_write_free() */ } /* end H5C__collective_write_free() */
#endif /* H5_HAVE_PARALLEL */ #endif /* H5_HAVE_PARALLEL */

View File

@ -3133,11 +3133,11 @@ if ( ( (entry_ptr) == NULL ) || \
(cache_ptr)->coll_list_size, \ (cache_ptr)->coll_list_size, \
(fail_val)) \ (fail_val)) \
\ \
H5C__COLL_DLL_PREPEND((entry_ptr), (cache_ptr)->coll_head_ptr, \ H5C__COLL_DLL_PREPEND((entry_ptr), (cache_ptr)->coll_head_ptr, \
(cache_ptr)->coll_tail_ptr, \ (cache_ptr)->coll_tail_ptr, \
(cache_ptr)->coll_list_len, \ (cache_ptr)->coll_list_len, \
(cache_ptr)->coll_list_size, \ (cache_ptr)->coll_list_size, \
(fail_val)) \ (fail_val)) \
\ \
} /* H5C__MOVE_TO_TOP_IN_COLL_LIST */ } /* H5C__MOVE_TO_TOP_IN_COLL_LIST */
#endif /* H5_HAVE_PARALLEL */ #endif /* H5_HAVE_PARALLEL */
@ -4254,6 +4254,7 @@ typedef struct H5C_collective_write_t {
} H5C_collective_write_t; } H5C_collective_write_t;
#endif /* H5_HAVE_PARALLEL */ #endif /* H5_HAVE_PARALLEL */
/*****************************/ /*****************************/
/* Package Private Variables */ /* Package Private Variables */
/*****************************/ /*****************************/
@ -4264,10 +4265,6 @@ typedef struct H5C_collective_write_t {
/******************************/ /******************************/
H5_DLL herr_t H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5_DLL herr_t H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id,
H5C_cache_entry_t *entry_ptr, unsigned flags, int64_t *entry_size_change_ptr, H5SL_t *collective_write_list); H5C_cache_entry_t *entry_ptr, unsigned flags, int64_t *entry_size_change_ptr, H5SL_t *collective_write_list);
#ifdef H5_HAVE_PARALLEL
H5_DLL herr_t H5C_collective_write(H5F_t *f, hid_t dxpl_id, H5SL_t *collective_write_list);
H5_DLL herr_t H5C_collective_write_free(void *_item, void *key, void *op_data);
#endif /* H5_HAVE_PARALLEL */
#endif /* _H5Cpkg_H */ #endif /* _H5Cpkg_H */

View File

@ -1609,7 +1609,6 @@ typedef struct H5C_cache_entry_t {
hbool_t clear_on_unprotect; hbool_t clear_on_unprotect;
hbool_t flush_immediately; hbool_t flush_immediately;
hbool_t coll_access; hbool_t coll_access;
hbool_t ind_access_while_coll;
#endif /* H5_HAVE_PARALLEL */ #endif /* H5_HAVE_PARALLEL */
hbool_t flush_in_progress; hbool_t flush_in_progress;
hbool_t destroy_in_progress; hbool_t destroy_in_progress;
@ -1633,8 +1632,10 @@ typedef struct H5C_cache_entry_t {
struct H5C_cache_entry_t * prev; struct H5C_cache_entry_t * prev;
struct H5C_cache_entry_t * aux_next; struct H5C_cache_entry_t * aux_next;
struct H5C_cache_entry_t * aux_prev; struct H5C_cache_entry_t * aux_prev;
#ifdef H5_HAVE_PARALLEL
struct H5C_cache_entry_t * coll_next; struct H5C_cache_entry_t * coll_next;
struct H5C_cache_entry_t * coll_prev; struct H5C_cache_entry_t * coll_prev;
#endif /* H5_HAVE_PARALLEL */
#if H5C_COLLECT_CACHE_ENTRY_STATS #if H5C_COLLECT_CACHE_ENTRY_STATS
/* cache entry stats fields */ /* cache entry stats fields */

View File

@ -45,7 +45,7 @@
/****************/ /****************/
#include "H5Dmodule.h" /* This source code file is part of the H5D module */ #include "H5Dmodule.h" /* This source code file is part of the H5D module */
#define H5F_FRIEND /*suppress error about including H5Fpkg */
/***********/ /***********/
/* Headers */ /* Headers */
@ -56,7 +56,7 @@
#endif /* H5_HAVE_PARALLEL */ #endif /* H5_HAVE_PARALLEL */
#include "H5Dpkg.h" /* Dataset functions */ #include "H5Dpkg.h" /* Dataset functions */
#include "H5Eprivate.h" /* Error handling */ #include "H5Eprivate.h" /* Error handling */
#include "H5Fpkg.h" /* File functions */ #include "H5Fprivate.h" /* File functions */
#include "H5FLprivate.h" /* Free Lists */ #include "H5FLprivate.h" /* Free Lists */
#include "H5Iprivate.h" /* IDs */ #include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */ #include "H5MMprivate.h" /* Memory management */
@ -2652,7 +2652,7 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
if(!H5D__chunk_cinfo_cache_found(&dset->shared->cache.chunk.last, udata)) { if(!H5D__chunk_cinfo_cache_found(&dset->shared->cache.chunk.last, udata)) {
H5D_chk_idx_info_t idx_info; /* Chunked index info */ H5D_chk_idx_info_t idx_info; /* Chunked index info */
#ifdef H5_HAVE_PARALLEL #ifdef H5_HAVE_PARALLEL
H5P_coll_md_read_flag_t temp_flag; /* temp flag to hold the coll metadata read setting */ H5P_coll_md_read_flag_t temp_cmr; /* Temp value to hold the coll metadata read setting */
#endif /* H5_HAVE_PARALLEL */ #endif /* H5_HAVE_PARALLEL */
/* Compose chunked index info struct */ /* Compose chunked index info struct */
@ -2668,9 +2668,9 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
as it is highly unlikely that users would read the as it is highly unlikely that users would read the
same chunks from all processes. MSC - might turn on same chunks from all processes. MSC - might turn on
for root node? */ for root node? */
temp_flag = idx_info.f->coll_md_read; temp_cmr = H5F_COLL_MD_READ(idx_info.f);
idx_info.f->coll_md_read = H5P_FORCE_FALSE; H5F_set_coll_md_read(idx_info.f, H5P_FORCE_FALSE);
} } /* end if */
#endif /* H5_HAVE_PARALLEL */ #endif /* H5_HAVE_PARALLEL */
/* Go get the chunk information */ /* Go get the chunk information */
@ -2678,9 +2678,8 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query chunk address") HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't query chunk address")
#ifdef H5_HAVE_PARALLEL #ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI)) { if(H5F_HAS_FEATURE(idx_info.f, H5FD_FEAT_HAS_MPI))
idx_info.f->coll_md_read = temp_flag; H5F_set_coll_md_read(idx_info.f, temp_cmr);
}
#endif /* H5_HAVE_PARALLEL */ #endif /* H5_HAVE_PARALLEL */
/* Cache the information retrieved */ /* Cache the information retrieved */

View File

@ -141,8 +141,7 @@ H5Dcreate1(hid_t loc_id, const char *name, hid_t type_id, hid_t space_id,
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not dataset create property list ID") HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not dataset create property list ID")
/* Build and open the new dataset */ /* Build and open the new dataset */
if(NULL == (dset = H5D__create_named(&loc, name, type_id, space, H5P_LINK_CREATE_DEFAULT, if(NULL == (dset = H5D__create_named(&loc, name, type_id, space, H5P_LINK_CREATE_DEFAULT, dcpl_id, H5P_DATASET_ACCESS_DEFAULT, H5AC_dxpl_id)))
dcpl_id, H5P_DATASET_ACCESS_DEFAULT, H5AC_dxpl_id)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create dataset") HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create dataset")
/* Register the new dataset to get an ID for it */ /* Register the new dataset to get an ID for it */

View File

@ -2110,3 +2110,33 @@ done:
FUNC_LEAVE_NOAPI(ret_value) FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F__set_eoa() */ } /* end H5F__set_eoa() */
#ifdef H5_HAVE_PARALLEL
/*-------------------------------------------------------------------------
* Function: H5F_set_coll_md_read
*
* Purpose: Set the coll_md_read field with a new value.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Programmer: Quincey Koziol
* 2/10/16
*
*-------------------------------------------------------------------------
*/
void
H5F_set_coll_md_read(H5F_t *f, H5P_coll_md_read_flag_t cmr)
{
/* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
FUNC_ENTER_NOAPI_NOINIT_NOERR
/* Sanity check */
HDassert(f);
f->coll_md_read = cmr;
FUNC_LEAVE_NOAPI_VOID
} /* H5F_set_coll_md_read() */
#endif /* H5_HAVE_PARALLEL */

View File

@ -27,6 +27,9 @@
#include "H5FDpublic.h" /* File drivers */ #include "H5FDpublic.h" /* File drivers */
/* Private headers needed by this file */ /* Private headers needed by this file */
#ifdef H5_HAVE_PARALLEL
#include "H5Pprivate.h" /* Property lists */
#endif /* H5_HAVE_PARALLEL */
#include "H5VMprivate.h" /* Vectors and arrays */ #include "H5VMprivate.h" /* Vectors and arrays */
@ -312,6 +315,9 @@
#define H5F_SET_GRP_BTREE_SHARED(F, RC) (((F)->shared->grp_btree_shared = (RC)) ? SUCCEED : FAIL) #define H5F_SET_GRP_BTREE_SHARED(F, RC) (((F)->shared->grp_btree_shared = (RC)) ? SUCCEED : FAIL)
#define H5F_USE_TMP_SPACE(F) ((F)->shared->use_tmp_space) #define H5F_USE_TMP_SPACE(F) ((F)->shared->use_tmp_space)
#define H5F_IS_TMP_ADDR(F, ADDR) (H5F_addr_le((F)->shared->tmp_addr, (ADDR))) #define H5F_IS_TMP_ADDR(F, ADDR) (H5F_addr_le((F)->shared->tmp_addr, (ADDR)))
#ifdef H5_HAVE_PARALLEL
#define H5F_COLL_MD_READ(F) ((F)->coll_md_read)
#endif /* H5_HAVE_PARALLEL */
#else /* H5F_MODULE */ #else /* H5F_MODULE */
#define H5F_INTENT(F) (H5F_get_intent(F)) #define H5F_INTENT(F) (H5F_get_intent(F))
#define H5F_OPEN_NAME(F) (H5F_get_open_name(F)) #define H5F_OPEN_NAME(F) (H5F_get_open_name(F))
@ -354,6 +360,9 @@
#define H5F_SET_GRP_BTREE_SHARED(F, RC) (H5F_set_grp_btree_shared((F), (RC))) #define H5F_SET_GRP_BTREE_SHARED(F, RC) (H5F_set_grp_btree_shared((F), (RC)))
#define H5F_USE_TMP_SPACE(F) (H5F_use_tmp_space(F)) #define H5F_USE_TMP_SPACE(F) (H5F_use_tmp_space(F))
#define H5F_IS_TMP_ADDR(F, ADDR) (H5F_is_tmp_addr((F), (ADDR))) #define H5F_IS_TMP_ADDR(F, ADDR) (H5F_is_tmp_addr((F), (ADDR)))
#ifdef H5_HAVE_PARALLEL
#define H5F_COLL_MD_READ(F) (H5F_coll_md_read(F))
#endif /* H5_HAVE_PARALLEL */
#endif /* H5F_MODULE */ #endif /* H5F_MODULE */
@ -639,6 +648,10 @@ H5_DLL struct H5UC_t *H5F_grp_btree_shared(const H5F_t *f);
H5_DLL herr_t H5F_set_grp_btree_shared(H5F_t *f, struct H5UC_t *rc); H5_DLL herr_t H5F_set_grp_btree_shared(H5F_t *f, struct H5UC_t *rc);
H5_DLL hbool_t H5F_use_tmp_space(const H5F_t *f); H5_DLL hbool_t H5F_use_tmp_space(const H5F_t *f);
H5_DLL hbool_t H5F_is_tmp_addr(const H5F_t *f, haddr_t addr); H5_DLL hbool_t H5F_is_tmp_addr(const H5F_t *f, haddr_t addr);
#ifdef H5_HAVE_PARALLEL
H5_DLL H5P_coll_md_read_flag_t H5F_coll_md_read(const H5F_t *f);
H5_DLL void H5F_set_coll_md_read(H5F_t *f, H5P_coll_md_read_flag_t flag);
#endif /* H5_HAVE_PARALLEL */
/* Functions that retrieve values from VFD layer */ /* Functions that retrieve values from VFD layer */
H5_DLL hid_t H5F_get_driver_id(const H5F_t *f); H5_DLL hid_t H5F_get_driver_id(const H5F_t *f);

View File

@ -1073,3 +1073,31 @@ H5F_use_tmp_space(const H5F_t *f)
FUNC_LEAVE_NOAPI(f->shared->use_tmp_space) FUNC_LEAVE_NOAPI(f->shared->use_tmp_space)
} /* end H5F_use_tmp_space() */ } /* end H5F_use_tmp_space() */
#ifdef H5_HAVE_PARALLEL
/*-------------------------------------------------------------------------
* Function: H5F_coll_md_read
*
* Purpose: Retrieve the 'collective metadata reads' flag for the file.
*
* Return: Success: Non-negative, the 'collective metadata reads' flag
* Failure: (can't happen)
*
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
* Feb 10 2016
*
*-------------------------------------------------------------------------
*/
H5P_coll_md_read_flag_t
H5F_coll_md_read(const H5F_t *f)
{
/* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
FUNC_ENTER_NOAPI_NOINIT_NOERR
HDassert(f);
FUNC_LEAVE_NOAPI(f->coll_md_read)
} /* end H5F_coll_md_read() */
#endif /* H5_HAVE_PARALLEL */

View File

@ -179,7 +179,6 @@
#ifdef H5_DEBUG_BUILD #ifdef H5_DEBUG_BUILD
/* dxpl I/O type - private property */ /* dxpl I/O type - private property */
#define H5FD_DXPL_TYPE_SIZE sizeof(H5FD_dxpl_type_t) #define H5FD_DXPL_TYPE_SIZE sizeof(H5FD_dxpl_type_t)
#define H5FD_DXPL_TYPE_DEF H5FD_NOIO_DXPL
#endif /* H5_DEBUG_BUILD */ #endif /* H5_DEBUG_BUILD */
#ifdef H5_HAVE_PARALLEL #ifdef H5_HAVE_PARALLEL
/* Definition for reading metadata collectively */ /* Definition for reading metadata collectively */
@ -187,7 +186,9 @@
#define H5D_XFER_COLL_MD_READ_DEF H5P_USER_FALSE #define H5D_XFER_COLL_MD_READ_DEF H5P_USER_FALSE
#define H5D_XFER_COLL_MD_READ_ENC H5P__encode_coll_md_read_flag_t #define H5D_XFER_COLL_MD_READ_ENC H5P__encode_coll_md_read_flag_t
#define H5D_XFER_COLL_MD_READ_DEC H5P__decode_coll_md_read_flag_t #define H5D_XFER_COLL_MD_READ_DEC H5P__decode_coll_md_read_flag_t
#endif H5_HAVE_PARALLEL #endif /* H5_HAVE_PARALLEL */
/******************/ /******************/
/* Local Typedefs */ /* Local Typedefs */
/******************/ /******************/

View File

@ -29,7 +29,9 @@
/* Headers */ /* Headers */
/***********/ /***********/
#include "H5private.h" /* Generic Functions */ #include "H5private.h" /* Generic Functions */
#include "H5ACprivate.h" /* Metadata cache */ #ifdef H5_HAVE_PARALLEL
#include "H5ACprivate.h" /* Metadata cache */
#endif /* H5_HAVE_PARALLEL */
#include "H5Eprivate.h" /* Error handling */ #include "H5Eprivate.h" /* Error handling */
#include "H5Fprivate.h" /* File access */ #include "H5Fprivate.h" /* File access */
#include "H5FLprivate.h" /* Free lists */ #include "H5FLprivate.h" /* Free lists */
@ -5452,7 +5454,15 @@ H5P_get_class(const H5P_genplist_t *plist)
*/ */
herr_t herr_t
H5P_verify_apl_and_dxpl(hid_t *acspl_id, const H5P_libclass_t *libclass, hid_t *dxpl_id, H5P_verify_apl_and_dxpl(hid_t *acspl_id, const H5P_libclass_t *libclass, hid_t *dxpl_id,
hid_t loc_id, hbool_t is_collective) hid_t
#ifndef H5_HAVE_PARALLEL
H5_ATTR_UNUSED
#endif /* H5_HAVE_PARALLEL */
loc_id, hbool_t
#ifndef H5_HAVE_PARALLEL
H5_ATTR_UNUSED
#endif /* H5_HAVE_PARALLEL */
is_collective)
{ {
herr_t ret_value = SUCCEED; /* Return value */ herr_t ret_value = SUCCEED; /* Return value */
@ -5495,8 +5505,6 @@ H5P_verify_apl_and_dxpl(hid_t *acspl_id, const H5P_libclass_t *libclass, hid_t *
if(TRUE != H5P_isa_class(*acspl_id, *libclass->class_id)) if(TRUE != H5P_isa_class(*acspl_id, *libclass->class_id))
HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not the required access property list") HGOTO_ERROR(H5E_PLIST, H5E_BADTYPE, FAIL, "not the required access property list")
*dxpl_id = H5AC_ind_read_dxpl_id;
#ifdef H5_HAVE_PARALLEL #ifdef H5_HAVE_PARALLEL
/* Get the plist structure for the access property list */ /* Get the plist structure for the access property list */
if(NULL == (plist = (H5P_genplist_t *)H5I_object(*acspl_id))) if(NULL == (plist = (H5P_genplist_t *)H5I_object(*acspl_id)))

View File

@ -94,6 +94,7 @@
#define H5L_ACS_COLL_MD_READ_ENC H5P__encode_coll_md_read_flag_t #define H5L_ACS_COLL_MD_READ_ENC H5P__encode_coll_md_read_flag_t
#define H5L_ACS_COLL_MD_READ_DEC H5P__decode_coll_md_read_flag_t #define H5L_ACS_COLL_MD_READ_DEC H5P__decode_coll_md_read_flag_t
/******************/ /******************/
/* Local Typedefs */ /* Local Typedefs */
/******************/ /******************/
@ -222,8 +223,7 @@ H5P__lacc_reg_prop(H5P_genclass_t *pclass)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
/* Register the metadata collective read flag */ /* Register the metadata collective read flag */
if(H5P_register_real(pclass, H5_COLL_MD_READ_FLAG_NAME, H5L_ACS_COLL_MD_READ_SIZE, if(H5P_register_real(pclass, H5_COLL_MD_READ_FLAG_NAME, H5L_ACS_COLL_MD_READ_SIZE, &H5L_def_coll_md_read_g,
&H5L_def_coll_md_read_g,
NULL, NULL, NULL, H5L_ACS_COLL_MD_READ_ENC, H5L_ACS_COLL_MD_READ_DEC, NULL, NULL, NULL, H5L_ACS_COLL_MD_READ_ENC, H5L_ACS_COLL_MD_READ_DEC,
NULL, NULL, NULL, NULL) < 0) NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class") HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")

View File

@ -41,13 +41,13 @@
#define H5P_CLASS(P) (H5P_get_class(P)) #define H5P_CLASS(P) (H5P_get_class(P))
#endif /* H5P_MODULE */ #endif /* H5P_MODULE */
#define H5_COLL_MD_READ_FLAG_NAME "collective_metadata_read"
/****************************/ /****************************/
/* Library Private Typedefs */ /* Library Private Typedefs */
/****************************/ /****************************/
#define H5_COLL_MD_READ_FLAG_NAME "collective_metadata_read"
typedef enum H5P_coll_md_read_flag_t { typedef enum H5P_coll_md_read_flag_t {
H5P_FORCE_FALSE = -1, H5P_FORCE_FALSE = -1,
H5P_USER_FALSE = 0, H5P_USER_FALSE = 0,

View File

@ -611,10 +611,10 @@ H5Rdereference2(hid_t obj_id, hid_t oapl_id, H5R_type_t ref_type, const void *_r
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid reference type") HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid reference type")
if(_ref == NULL) if(_ref == NULL)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid reference pointer") HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid reference pointer")
/* Verify access property list and get correct dxpl */ /* Verify access property list and get correct dxpl */
if(H5P_verify_apl_and_dxpl(&oapl_id, H5P_CLS_DACC, &dxpl_id, obj_id, FALSE) < 0) if(H5P_verify_apl_and_dxpl(&oapl_id, H5P_CLS_DACC, &dxpl_id, obj_id, FALSE) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTSET, FAIL, "can't set access and transfer property lists") HGOTO_ERROR(H5E_REFERENCE, H5E_CANTSET, FAIL, "can't set access and transfer property lists")
/* Get the file pointer from the entry */ /* Get the file pointer from the entry */
file = loc.oloc->file; file = loc.oloc->file;

View File

@ -7595,7 +7595,7 @@ main(int argc, char **argv)
#endif #endif
/* enable the collective metadata read property */ /* enable the collective metadata read property */
if ( world_mpi_rank != world_server_mpi_rank ) { if ( world_mpi_rank != world_server_mpi_rank ) {
if ( H5Pset_coll_metadata_read(fapl, 1) < 0 ) { if ( H5Pset_coll_metadata_read(fapl, TRUE) < 0 ) {
nerrors++; nerrors++;
if ( verbose ) { if ( verbose ) {

View File

@ -888,7 +888,7 @@ void independent_group_read(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type); plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
H5Pset_coll_metadata_read(plist, 0); H5Pset_coll_metadata_read(plist, FALSE);
fid = H5Fopen(filename, H5F_ACC_RDONLY, plist); fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
H5Pclose(plist); H5Pclose(plist);

View File

@ -4948,9 +4948,9 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/* set Parallel access with communicator */ /* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info); ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY((ret >= 0), ""); VRFY((ret >= 0), "");
ret = H5Pset_coll_metadata_read(ret_pl, true); ret = H5Pset_coll_metadata_read(ret_pl, TRUE);
VRFY((ret >= 0), ""); VRFY((ret >= 0), "");
ret = H5Pset_coll_metadata_write(ret_pl, true); ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
VRFY((ret >= 0), ""); VRFY((ret >= 0), "");
return(ret_pl); return(ret_pl);
} }

View File

@ -272,9 +272,9 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/* set Parallel access with communicator */ /* set Parallel access with communicator */
ret = H5Pset_fapl_mpio(ret_pl, comm, info); ret = H5Pset_fapl_mpio(ret_pl, comm, info);
VRFY((ret >= 0), ""); VRFY((ret >= 0), "");
ret = H5Pset_coll_metadata_read(ret_pl, true); ret = H5Pset_coll_metadata_read(ret_pl, TRUE);
VRFY((ret >= 0), ""); VRFY((ret >= 0), "");
ret = H5Pset_coll_metadata_write(ret_pl, true); ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
VRFY((ret >= 0), ""); VRFY((ret >= 0), "");
return(ret_pl); return(ret_pl);
} }
@ -536,6 +536,7 @@ int main(int argc, char **argv)
AddTest("denseattr", test_dense_attr, NULL, AddTest("denseattr", test_dense_attr, NULL,
"Store Dense Attributes", PARATESTFILE); "Store Dense Attributes", PARATESTFILE);
/* Display testing information */ /* Display testing information */
TestInfo(argv[0]); TestInfo(argv[0]);