mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-04-12 17:31:09 +08:00
Remove "collective write list" parameter from H5C__flush_single_entry() as it
is only used in a couple of places. The collective write list has been moved to be internal to the cache data structure instead.
This commit is contained in:
parent
460b573a73
commit
2cb2d5a533
39
src/H5C.c
39
src/H5C.c
@ -366,6 +366,7 @@ H5C_create(size_t max_cache_size,
|
||||
cache_ptr->coll_list_size = (size_t)0;
|
||||
cache_ptr->coll_head_ptr = NULL;
|
||||
cache_ptr->coll_tail_ptr = NULL;
|
||||
cache_ptr->coll_write_list = NULL;
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
|
||||
cache_ptr->cLRU_list_len = 0;
|
||||
@ -811,7 +812,7 @@ H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
|
||||
/* Delete the entry from the skip list on destroy */
|
||||
flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
|
||||
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flush_flags, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flush_flags) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't flush entry")
|
||||
|
||||
done:
|
||||
@ -3111,7 +3112,7 @@ H5C_unprotect(H5F_t * f,
|
||||
/* Delete the entry from the skip list on destroy */
|
||||
flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
|
||||
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flush_flags, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flush_flags) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry")
|
||||
|
||||
}
|
||||
@ -3125,7 +3126,7 @@ H5C_unprotect(H5F_t * f,
|
||||
else if(test_entry_ptr != entry_ptr)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?.")
|
||||
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry")
|
||||
}
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
@ -4318,7 +4319,7 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
|
||||
cache_ptr->entries_removed_counter = 0;
|
||||
cache_ptr->last_entry_removed_ptr = NULL;
|
||||
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
|
||||
|
||||
if(cache_ptr->entries_removed_counter > 1 || cache_ptr->last_entry_removed_ptr == prev_ptr)
|
||||
@ -4329,7 +4330,7 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
|
||||
|
||||
bytes_evicted += entry_ptr->size;
|
||||
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0 )
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 )
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
|
||||
}
|
||||
|
||||
@ -4412,7 +4413,7 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
|
||||
prev_ptr = entry_ptr->prev;
|
||||
|
||||
if ( ! (entry_ptr->is_dirty) ) {
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry")
|
||||
}
|
||||
/* just skip the entry if it is dirty, as we can't do
|
||||
@ -5209,7 +5210,7 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
|
||||
*/
|
||||
protected_entries++;
|
||||
} else if(entry_ptr->is_pinned) {
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__DURING_FLUSH_FLAG, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
|
||||
|
||||
if(cache_ptr->slist_changed) {
|
||||
@ -5226,8 +5227,7 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
|
||||
} /* end if */
|
||||
} /* end if */
|
||||
else {
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
|
||||
(cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG), NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed.")
|
||||
|
||||
if(cache_ptr->slist_changed) {
|
||||
@ -5324,8 +5324,7 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
|
||||
* or three entries.
|
||||
*/
|
||||
cache_ptr->entry_watched_for_removal = next_entry_ptr;
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr,
|
||||
(cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG), NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed.")
|
||||
|
||||
/* Check for the next entry getting removed */
|
||||
@ -5613,7 +5612,7 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
|
||||
protected_entries++;
|
||||
} /* end if */
|
||||
else {
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG), NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
|
||||
|
||||
if(cache_ptr->slist_changed) {
|
||||
@ -5721,11 +5720,7 @@ done:
|
||||
*/
|
||||
herr_t
|
||||
H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
|
||||
unsigned flags, H5SL_t
|
||||
#ifndef H5_HAVE_PARALLEL
|
||||
H5_ATTR_UNUSED
|
||||
#endif /* NDEBUG */
|
||||
*collective_write_list)
|
||||
unsigned flags)
|
||||
{
|
||||
H5C_t * cache_ptr; /* Cache for file */
|
||||
hbool_t destroy; /* external flag */
|
||||
@ -5855,8 +5850,8 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
|
||||
|
||||
if(((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0) {
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
if(collective_write_list) {
|
||||
if(H5SL_insert(collective_write_list, entry_ptr, &entry_ptr->addr) < 0)
|
||||
if(cache_ptr->coll_write_list) {
|
||||
if(H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
|
||||
} /* end if */
|
||||
else
|
||||
@ -6679,7 +6674,7 @@ H5C_make_space_in_cache(H5F_t * f,
|
||||
} /* end if */
|
||||
#endif
|
||||
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
|
||||
|
||||
if ( ( cache_ptr->entries_removed_counter > 1 ) ||
|
||||
@ -6696,7 +6691,7 @@ H5C_make_space_in_cache(H5F_t * f,
|
||||
cache_ptr->entries_scanned_to_make_space++;
|
||||
#endif /* H5C_COLLECT_CACHE_STATS */
|
||||
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
|
||||
} else {
|
||||
/* We have enough space so don't flush clean entry. */
|
||||
@ -6837,7 +6832,7 @@ H5C_make_space_in_cache(H5F_t * f,
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
if(!(entry_ptr->coll_access)) {
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
} /* end if */
|
||||
|
@ -64,8 +64,7 @@
|
||||
/********************/
|
||||
/* Local Prototypes */
|
||||
/********************/
|
||||
static herr_t H5C__collective_write(H5F_t *f, hid_t dxpl_id,
|
||||
H5SL_t *collective_write_list);
|
||||
static herr_t H5C__collective_write(H5F_t *f, hid_t dxpl_id);
|
||||
|
||||
|
||||
/*********************/
|
||||
@ -226,7 +225,6 @@ H5C_apply_candidate_list(H5F_t * f,
|
||||
H5C_cache_entry_t * entry_ptr = NULL;
|
||||
H5C_cache_entry_t * flush_ptr = NULL;
|
||||
H5C_cache_entry_t * delayed_ptr = NULL;
|
||||
H5SL_t * collective_write_list = NULL;
|
||||
#if H5C_DO_SANITY_CHECKS
|
||||
haddr_t last_addr;
|
||||
#endif /* H5C_DO_SANITY_CHECKS */
|
||||
@ -260,8 +258,11 @@ H5C_apply_candidate_list(H5F_t * f,
|
||||
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
|
||||
|
||||
if(f->coll_md_write) {
|
||||
/* Sanity check */
|
||||
HDassert(NULL == cache_ptr->coll_write_list);
|
||||
|
||||
/* Create skip list of entries for collective write */
|
||||
if(NULL == (collective_write_list = H5SL_create(H5SL_TYPE_HADDR, NULL)))
|
||||
if(NULL == (cache_ptr->coll_write_list = H5SL_create(H5SL_TYPE_HADDR, NULL)))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTCREATE, FAIL, "can't create skip list for entries")
|
||||
} /* end if */
|
||||
|
||||
@ -475,7 +476,7 @@ H5C_apply_candidate_list(H5F_t * f,
|
||||
cache_ptr->entries_removed_counter = 0;
|
||||
cache_ptr->last_entry_removed_ptr = NULL;
|
||||
|
||||
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
|
||||
|
||||
if((cache_ptr->entries_removed_counter > 1) ||
|
||||
@ -525,7 +526,7 @@ H5C_apply_candidate_list(H5F_t * f,
|
||||
cache_ptr->last_entry_removed_ptr = NULL;
|
||||
|
||||
/* Add this entry to the list of entries to collectively write */
|
||||
if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, collective_write_list) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
|
||||
|
||||
if((cache_ptr->entries_removed_counter > 1) ||
|
||||
@ -681,7 +682,7 @@ H5C_apply_candidate_list(H5F_t * f,
|
||||
(long long)clear_ptr->addr);
|
||||
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
|
||||
|
||||
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
|
||||
} /* end else-if */
|
||||
|
||||
@ -698,7 +699,7 @@ H5C_apply_candidate_list(H5F_t * f,
|
||||
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
|
||||
|
||||
/* Add this entry to the list of entries to collectively write */
|
||||
if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, collective_write_list) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
|
||||
} /* end else-if */
|
||||
} /* end if */
|
||||
@ -732,14 +733,14 @@ H5C_apply_candidate_list(H5F_t * f,
|
||||
if (delayed_ptr) {
|
||||
|
||||
if (delayed_ptr->clear_on_unprotect) {
|
||||
if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
|
||||
|
||||
entry_ptr->clear_on_unprotect = FALSE;
|
||||
entries_cleared++;
|
||||
} else if (delayed_ptr->flush_immediately) {
|
||||
/* Add this entry to the list of entries to collectively write */
|
||||
if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, collective_write_list) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry collectively.")
|
||||
|
||||
entry_ptr->flush_immediately = FALSE;
|
||||
@ -752,10 +753,11 @@ H5C_apply_candidate_list(H5F_t * f,
|
||||
|
||||
/* If we've deferred writing to do it collectively, take care of that now */
|
||||
if(f->coll_md_write) {
|
||||
HDassert(collective_write_list);
|
||||
/* Sanity check */
|
||||
HDassert(cache_ptr->coll_write_list);
|
||||
|
||||
/* Write collective list */
|
||||
if(H5C__collective_write(f, dxpl_id, collective_write_list) < 0)
|
||||
if(H5C__collective_write(f, dxpl_id) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, "Can't write metadata collectively")
|
||||
} /* end if */
|
||||
|
||||
@ -778,9 +780,11 @@ done:
|
||||
if(candidate_assignment_table != NULL)
|
||||
candidate_assignment_table = (int *)H5MM_xfree((void *)candidate_assignment_table);
|
||||
|
||||
if(collective_write_list)
|
||||
if(H5SL_close(collective_write_list) < 0)
|
||||
if(cache_ptr->coll_write_list) {
|
||||
if(H5SL_close(cache_ptr->coll_write_list) < 0)
|
||||
HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "failed to destroy skip list")
|
||||
cache_ptr->coll_write_list = NULL;
|
||||
} /* end if */
|
||||
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
} /* H5C_apply_candidate_list() */
|
||||
@ -1019,7 +1023,7 @@ done:
|
||||
*
|
||||
* Note that unlike H5C_apply_candidate_list(),
|
||||
* H5C_mark_entries_as_clean() makes all its calls to
|
||||
* H6C_flush_single_entry() with the
|
||||
* H5C__flush_single_entry() with the
|
||||
* H5C__FLUSH_CLEAR_ONLY_FLAG set. As a result,
|
||||
* the pre_serialize() and serialize calls are not made.
|
||||
*
|
||||
@ -1178,7 +1182,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
|
||||
*
|
||||
* Note that unlike H5C_apply_candidate_list(),
|
||||
* H5C_mark_entries_as_clean() makes all its calls to
|
||||
* H6C_flush_single_entry() with the H5C__FLUSH_CLEAR_ONLY_FLAG
|
||||
* H5C__flush_single_entry() with the H5C__FLUSH_CLEAR_ONLY_FLAG
|
||||
* set. As a result, the pre_serialize() and serialize calls are
|
||||
* not made.
|
||||
*
|
||||
@ -1213,7 +1217,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
|
||||
entry_ptr = entry_ptr->prev;
|
||||
entries_cleared++;
|
||||
|
||||
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0)
|
||||
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
|
||||
} else {
|
||||
|
||||
@ -1241,7 +1245,7 @@ H5C_mark_entries_as_clean(H5F_t * f,
|
||||
entry_ptr = entry_ptr->next;
|
||||
entries_cleared++;
|
||||
|
||||
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0 )
|
||||
if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 )
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
|
||||
} else {
|
||||
|
||||
@ -1349,8 +1353,9 @@ done:
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
static herr_t
|
||||
H5C__collective_write(H5F_t *f, hid_t dxpl_id, H5SL_t *collective_write_list)
|
||||
H5C__collective_write(H5F_t *f, hid_t dxpl_id)
|
||||
{
|
||||
H5AC_t *cache_ptr;
|
||||
H5P_genplist_t *plist = NULL;
|
||||
H5FD_mpio_xfer_t orig_xfer_mode = H5FD_MPIO_COLLECTIVE;
|
||||
int count;
|
||||
@ -1366,6 +1371,12 @@ H5C__collective_write(H5F_t *f, hid_t dxpl_id, H5SL_t *collective_write_list)
|
||||
|
||||
FUNC_ENTER_STATIC
|
||||
|
||||
/* Sanity checks */
|
||||
HDassert(f != NULL);
|
||||
cache_ptr = f->shared->cache;
|
||||
HDassert(cache_ptr != NULL);
|
||||
HDassert(cache_ptr->coll_write_list != NULL);
|
||||
|
||||
/* Get original transfer mode */
|
||||
if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list")
|
||||
@ -1373,7 +1384,7 @@ H5C__collective_write(H5F_t *f, hid_t dxpl_id, H5SL_t *collective_write_list)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property")
|
||||
|
||||
/* Get number of entries in collective write list */
|
||||
count = (int)H5SL_count(collective_write_list);
|
||||
count = (int)H5SL_count(cache_ptr->coll_write_list);
|
||||
|
||||
if(count > 0) {
|
||||
H5FD_mpio_xfer_t xfer_mode = H5FD_MPIO_COLLECTIVE;
|
||||
@ -1394,7 +1405,7 @@ H5C__collective_write(H5F_t *f, hid_t dxpl_id, H5SL_t *collective_write_list)
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective offset table length array")
|
||||
|
||||
/* Fill arrays */
|
||||
node = H5SL_first(collective_write_list);
|
||||
node = H5SL_first(cache_ptr->coll_write_list);
|
||||
HDassert(node);
|
||||
if(NULL == (entry_ptr = (H5C_cache_entry_t *)H5SL_item(node)))
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item")
|
||||
|
16
src/H5Cpkg.h
16
src/H5Cpkg.h
@ -3430,7 +3430,7 @@ typedef struct H5C_tag_info_t {
|
||||
* entry is removed from the cache by any means (eviction,
|
||||
* expungement, or take ownership at this point in time).
|
||||
* Functions that perform scans on lists may set this field
|
||||
* to zero prior to calling H5C_flush_single_entry().
|
||||
* to zero prior to calling H5C__flush_single_entry().
|
||||
* Unexpected changes to the counter indicate that an entry
|
||||
* was removed from the cache as a side effect of the flush.
|
||||
*
|
||||
@ -3438,7 +3438,7 @@ typedef struct H5C_tag_info_t {
|
||||
* which contained the last entry to be removed from the cache,
|
||||
* or NULL if there either is no such entry, or if a function
|
||||
* performing a scan of a list has set this field to NULL prior
|
||||
* to calling H5C_flush_single_entry().
|
||||
* to calling H5C__flush_single_entry().
|
||||
*
|
||||
* WARNING!!! This field must NEVER be dereferenced. It is
|
||||
* maintained to allow functions that perform scans of lists
|
||||
@ -4069,17 +4069,17 @@ typedef struct H5C_tag_info_t {
|
||||
* obtain estimates of how frequently these restarts occur.
|
||||
*
|
||||
* slist_scan_restarts: Number of times a scan of the slist (that contains
|
||||
* calls to H5C_flush_single_entry()) has been restarted to
|
||||
* calls to H5C__flush_single_entry()) has been restarted to
|
||||
* avoid potential issues with change of status of the next
|
||||
* entry in the scan.
|
||||
*
|
||||
* LRU_scan_restarts: Number of times a scan of the LRU list (that contains
|
||||
* calls to H5C_flush_single_entry()) has been restarted to
|
||||
* calls to H5C__flush_single_entry()) has been restarted to
|
||||
* avoid potential issues with change of status of the next
|
||||
* entry in the scan.
|
||||
*
|
||||
* hash_bucket_scan_restarts: Number of times a scan of a hash bucket list
|
||||
* (that contains calls to H5C_flush_single_entry()) has been
|
||||
* (that contains calls to H5C__flush_single_entry()) has been
|
||||
* restarted to avoid potential issues with change of status
|
||||
* of the next entry in the scan.
|
||||
*
|
||||
@ -4205,10 +4205,14 @@ struct H5C_t {
|
||||
H5C_cache_entry_t * dLRU_tail_ptr;
|
||||
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
/* Fields for collective metadata reads */
|
||||
int32_t coll_list_len;
|
||||
size_t coll_list_size;
|
||||
H5C_cache_entry_t * coll_head_ptr;
|
||||
H5C_cache_entry_t * coll_tail_ptr;
|
||||
|
||||
/* Fields for collective metadata writes */
|
||||
H5SL_t * coll_write_list;
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
|
||||
/* Fields for automatic cache size adjustment */
|
||||
@ -4328,7 +4332,7 @@ H5_DLLVAR const H5C_class_t H5C__epoch_marker_class;
|
||||
|
||||
/* General routines */
|
||||
H5_DLL herr_t H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id,
|
||||
H5C_cache_entry_t *entry_ptr, unsigned flags, H5SL_t *collective_write_list);
|
||||
H5C_cache_entry_t *entry_ptr, unsigned flags);
|
||||
H5_DLL herr_t H5C__flush_marked_entries(H5F_t * f, hid_t dxpl_id);
|
||||
H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global,
|
||||
H5C_tag_iter_cb_t cb, void *cb_ctx);
|
||||
|
@ -467,7 +467,7 @@ H5C__evict_tagged_entries_cb(H5C_cache_entry_t *entry, void *_ctx)
|
||||
ctx->pinned_entries_need_evicted = TRUE;
|
||||
else {
|
||||
/* Evict the Entry */
|
||||
if(H5C__flush_single_entry(ctx->f, ctx->dxpl_id, entry, H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG, NULL) < 0)
|
||||
if(H5C__flush_single_entry(ctx->f, ctx->dxpl_id, entry, H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, H5_ITER_ERROR, "Entry eviction failed.")
|
||||
ctx->evicted_entries_last_pass = TRUE;
|
||||
} /* end else */
|
||||
|
Loading…
x
Reference in New Issue
Block a user