[svn-r27237] Description:

Merge v3 metadata cache changes to trunk.  Yay! :-)

Tested on:
    MacOSX/64 10.10.3 (amazon) w/serial & parallel
    Linux/32 2.6.x (jam) w/serial & parallel
    (daily tested on branch)
This commit is contained in:
Quincey Koziol 2015-06-17 19:58:49 -05:00
parent 6e9e9e0dd2
commit f30ab55f41
89 changed files with 20202 additions and 13688 deletions

View File

@ -128,8 +128,7 @@ static herr_t H5AC__receive_haddr_list(MPI_Comm mpi_comm, int *num_entries_ptr,
haddr_t **haddr_buf_ptr_ptr);
static herr_t H5AC__receive_candidate_list(const H5AC_t *cache_ptr,
int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
static herr_t H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t primary_dxpl_id,
hid_t secondary_dxpl_id);
static herr_t H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t dxpl_id);
static herr_t H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, int num_candidates,
haddr_t *candidates_list_ptr);
static herr_t H5AC__rsp__dist_md_write__flush(H5F_t *f, hid_t dxpl_id);
@ -201,6 +200,7 @@ static const char *H5AC_entry_type_names[H5AC_NTYPES] =
"fixed array data block",
"fixed array data block pages",
"superblock",
"driver info",
"test entry" /* for testing only -- not used for actual files */
};
@ -575,7 +575,7 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
#endif /* H5_HAVE_PARALLEL */
/* Destroy the cache */
if(H5C_dest(f, dxpl_id, H5AC_dxpl_id) < 0)
if(H5C_dest(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "can't destroy cache")
f->shared->cache = NULL;
@ -628,8 +628,7 @@ H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
HDassert(f->shared);
HDassert(f->shared->cache);
HDassert(type);
HDassert(type->clear);
HDassert(type->dest);
HDassert(type->serialize);
HDassert(H5F_addr_defined(addr));
#if H5AC__TRACE_FILE_ENABLED
@ -645,7 +644,7 @@ H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
}
#endif /* H5AC__TRACE_FILE_ENABLED */
if(H5C_expunge_entry(f, dxpl_id, H5AC_dxpl_id, type, addr, flags) < 0)
if(H5C_expunge_entry(f, dxpl_id, type, addr, flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "H5C_expunge_entry() failed.")
done:
@ -710,7 +709,7 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id)
/* Flush the cache */
/* (Again, in parallel - writes out the superblock) */
if(H5C_flush_cache(f, dxpl_id, H5AC_dxpl_id, H5AC__NO_FLAGS_SET) < 0)
if(H5C_flush_cache(f, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache.")
done:
@ -818,8 +817,7 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
HDassert(f->shared);
HDassert(f->shared->cache);
HDassert(type);
HDassert(type->flush);
HDassert(type->size);
HDassert(type->serialize);
HDassert(H5F_addr_defined(addr));
HDassert(thing);
@ -841,7 +839,7 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
#endif /* H5AC__TRACE_FILE_ENABLED */
/* Insert entry into metadata cache */
if(H5C_insert_entry(f, dxpl_id, H5AC_dxpl_id, type, addr, thing, flags) < 0)
if(H5C_insert_entry(f, dxpl_id, type, addr, thing, flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C_insert_entry() failed")
#if H5AC__TRACE_FILE_ENABLED
@ -1138,9 +1136,8 @@ done:
*/
void *
H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
void *udata, H5AC_protect_t rw)
void *udata, unsigned flags)
{
unsigned protect_flags = H5C__NO_FLAGS_SET;
void * thing; /* Pointer to native data structure for entry */
#if H5AC__TRACE_FILE_ENABLED
char trace[128] = "";
@ -1156,47 +1153,36 @@ H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
HDassert(f->shared);
HDassert(f->shared->cache);
HDassert(type);
HDassert(type->flush);
HDassert(type->load);
HDassert(type->serialize);
HDassert(H5F_addr_defined(addr));
/* Check for unexpected flags -- H5C__FLUSH_COLLECTIVELY_FLAG
* only permitted in the parallel case.
*/
#ifdef H5_HAVE_PARALLEL
HDassert(0 == (flags & (unsigned)(~(H5C__READ_ONLY_FLAG | \
H5C__FLUSH_LAST_FLAG | \
H5C__FLUSH_COLLECTIVELY_FLAG))));
#else /* H5_HAVE_PARALLEL */
HDassert(0 == (flags & (unsigned)(~(H5C__READ_ONLY_FLAG | \
H5C__FLUSH_LAST_FLAG))));
#endif /* H5_HAVE_PARALLEL */
/* Check for invalid access request */
if(0 == (H5F_INTENT(f) & H5F_ACC_RDWR) && rw == H5AC_WRITE)
if((0 == (H5F_INTENT(f) & H5F_ACC_RDWR)) && (0 == (flags & H5C__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "no write intent on file")
#if H5AC__TRACE_FILE_ENABLED
/* For the protect call, only the addr and type id is really necessary
* in the trace file. Include the size of the entry protected as a
* sanity check. Also indicate whether the call was successful to
* catch occult errors.
/* For the protect call, only the addr, size, type id, and flags are
* necessary in the trace file. Also indicate whether the call was
* successful to catch occult errors.
*/
if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr))) {
const char * rw_string;
if ( rw == H5AC_WRITE ) {
rw_string = "H5AC_WRITE";
} else if ( rw == H5AC_READ ) {
rw_string = "H5AC_READ";
} else {
rw_string = "???";
}
sprintf(trace, "%s 0x%lx %d %s", FUNC, (unsigned long)addr,
(int)(type->id), rw_string);
}
if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
sprintf(trace, "%s 0x%lx %d 0x%x", FUNC, (unsigned long)addr,
(int)(type->id), flags);
#endif /* H5AC__TRACE_FILE_ENABLED */
if ( rw == H5AC_READ )
protect_flags |= H5C__READ_ONLY_FLAG;
if(NULL == (thing = H5C_protect(f, dxpl_id, H5AC_dxpl_id, type, addr, udata, protect_flags)))
if(NULL == (thing = H5C_protect(f, dxpl_id, type, addr, udata, flags)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_protect() failed.")
#if H5AC__TRACE_FILE_ENABLED
@ -1438,8 +1424,8 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
HDassert(f->shared);
HDassert(f->shared->cache);
HDassert(type);
HDassert(type->clear);
HDassert(type->flush);
HDassert(type->deserialize);
HDassert(type->image_len);
HDassert(H5F_addr_defined(addr));
HDassert(thing);
HDassert( ((H5AC_info_t *)thing)->addr == addr );
@ -1462,9 +1448,11 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
* the entry.
*/
if(dirtied && !deleted) {
hbool_t curr_compressed = FALSE; /* dummy for call */
size_t curr_size = 0;
size_t curr_compressed_size = 0; /* dummy for call */
if((type->size)(f, thing, &curr_size) < 0)
if((type->image_len)(thing, &curr_size, &curr_compressed, &curr_compressed_size) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGETSIZE, FAIL, "Can't get size of thing")
if(((H5AC_info_t *)thing)->size != curr_size)
@ -1483,7 +1471,7 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
} /* end if */
#endif /* H5_HAVE_PARALLEL */
if(H5C_unprotect(f, dxpl_id, H5AC_dxpl_id, type, addr, thing, flags) < 0)
if(H5C_unprotect(f, dxpl_id, type, addr, thing, flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5C_unprotect() failed.")
#ifdef H5_HAVE_PARALLEL
@ -3419,7 +3407,7 @@ H5AC__propagate_and_apply_candidate_list(H5F_t *f, hid_t dxpl_id)
aux_ptr->write_permitted = TRUE;
/* Apply the candidate list */
result = H5C_apply_candidate_list(f, dxpl_id, dxpl_id, cache_ptr, num_candidates,
result = H5C_apply_candidate_list(f, dxpl_id, cache_ptr, num_candidates,
candidates_list_ptr, aux_ptr->mpi_rank, aux_ptr->mpi_size);
/* Disable writes again */
@ -3563,7 +3551,7 @@ H5AC__propagate_flushed_and_still_clean_entries_list(H5F_t *f, hid_t dxpl_id)
HDassert(H5SL_count(aux_ptr->c_slist_ptr) == 0);
} /* end if */
else {
if(H5AC__receive_and_apply_clean_list(f, dxpl_id, H5AC_dxpl_id) < 0)
if(H5AC__receive_and_apply_clean_list(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't receive and/or process clean slist broadcast.")
} /* end else */
@ -3667,7 +3655,7 @@ done:
*/
#ifdef H5_HAVE_PARALLEL
static herr_t
H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t primary_dxpl_id, hid_t secondary_dxpl_id)
H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t dxpl_id)
{
H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
@ -3693,7 +3681,7 @@ H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t primary_dxpl_id, hid_t second
if(num_entries > 0)
/* mark the indicated entries as clean */
if(H5C_mark_entries_as_clean(f, primary_dxpl_id, secondary_dxpl_id, (int32_t)num_entries, haddr_buf_ptr) < 0)
if(H5C_mark_entries_as_clean(f, dxpl_id, (int32_t)num_entries, haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't mark entries clean.")
/* if it is defined, call the sync point done callback. Note
@ -3860,7 +3848,7 @@ H5AC__rsp__dist_md_write__flush(H5F_t *f, hid_t dxpl_id)
aux_ptr->write_permitted = TRUE;
/* Apply the candidate list */
result = H5C_apply_candidate_list(f, dxpl_id, dxpl_id, cache_ptr, num_entries,
result = H5C_apply_candidate_list(f, dxpl_id, cache_ptr, num_entries,
haddr_buf_ptr, aux_ptr->mpi_rank, aux_ptr->mpi_size);
/* Disable writes again */
@ -4073,7 +4061,7 @@ H5AC__rsp__p0_only__flush(H5F_t *f, hid_t dxpl_id)
aux_ptr->write_permitted = TRUE;
/* Flush the cache */
result = H5C_flush_cache(f, dxpl_id, dxpl_id, H5AC__NO_FLAGS_SET);
result = H5C_flush_cache(f, dxpl_id, H5AC__NO_FLAGS_SET);
/* Disable writes again */
aux_ptr->write_permitted = FALSE;
@ -4197,7 +4185,7 @@ H5AC__rsp__p0_only__flush_to_min_clean(H5F_t *f, hid_t dxpl_id)
aux_ptr->write_permitted = TRUE;
/* Flush the cache */
result = H5C_flush_to_min_clean(f, dxpl_id, H5AC_dxpl_id);
result = H5C_flush_to_min_clean(f, dxpl_id);
/* Disable writes again */
aux_ptr->write_permitted = FALSE;

View File

@ -392,6 +392,12 @@ typedef struct H5AC_aux_t
} H5AC_aux_t; /* struct H5AC_aux_t */
/* Package scoped functions */
H5_DLL herr_t H5AC_set_sync_point_done_callback(H5C_t *cache_ptr,
void (*sync_point_done)(int num_writes, haddr_t *written_entries_tbl));
H5_DLL herr_t H5AC_set_write_done_callback(H5C_t * cache_ptr,
void (* write_done)(void));
#endif /* H5_HAVE_PARALLEL */
#endif /* _H5ACpkg_H */

View File

@ -79,6 +79,7 @@ typedef enum {
H5AC_FARRAY_DBLOCK_ID, /*fixed array data block */
H5AC_FARRAY_DBLK_PAGE_ID, /*fixed array data block page */
H5AC_SUPERBLOCK_ID, /* file superblock */
H5AC_DRVRINFO_ID, /* driver info block (supplements superblock)*/
H5AC_TEST_ID, /*test entry -- not used for actual files */
H5AC_NTYPES /* Number of types, must be last */
} H5AC_type_t;
@ -91,6 +92,17 @@ typedef enum {
* times for debugging purposes.
*
* Hence the following, somewhat odd set of #defines.
*
* NOTE: test/cache plays games with the f->shared->cache, and thus
* setting H5AC_DUMP_STATS_ON_CLOSE will generate constant,
* irrelevant data when run with that test program. See
* comments on setup_cache() / takedown_cache() in test/cache_common.c.
* for details.
*
* If you need to dump stats at file close in test/cache.c,
* use the dump_stats parameter to takedown_cache(), or call
* H5C_stats() directly.
* JRM -- 4/12/15
*/
#if H5C_COLLECT_CACHE_STATS
@ -113,83 +125,50 @@ typedef enum {
/*
* Class methods pertaining to caching. Each type of cached object will
* have a constant variable with permanent life-span that describes how
* to cache the object. That variable will be of type H5AC_class_t and
* have the following required fields...
*
* LOAD: Loads an object from disk to memory. The function
* should allocate some data structure and return it.
*
* FLUSH: Writes some data structure back to disk. It would be
* wise for the data structure to include dirty flags to
* indicate whether it really needs to be written. This
* function is also responsible for freeing memory allocated
* by the LOAD method if the DEST argument is non-zero (by
* calling the DEST method).
*
* DEST: Just frees memory allocated by the LOAD method.
*
* CLEAR: Just marks object as non-dirty.
*
* NOTIFY: Notify client that an action on an entry has taken/will take
* place
*
* SIZE: Report the size (on disk) of the specified cache object.
* Note that the space allocated on disk may not be contiguous.
* to cache the object.
*/
#define H5AC_CALLBACK__NO_FLAGS_SET H5C_CALLBACK__NO_FLAGS_SET
#define H5AC_CALLBACK__SIZE_CHANGED_FLAG H5C_CALLBACK__SIZE_CHANGED_FLAG
#define H5AC_CALLBACK__MOVED_FLAG H5C_CALLBACK__MOVED_FLAG
#define H5AC__SERIALIZE_RESIZED_FLAG H5C__SERIALIZE_RESIZED_FLAG
#define H5AC__SERIALIZE_MOVED_FLAG H5C__SERIALIZE_MOVED_FLAG
#define H5AC__SERIALIZE_COMPRESSED_FLAG H5C__SERIALIZE_COMPRESSED_FLAG
/* Aliases for 'notify action' type & values */
typedef H5C_notify_action_t H5AC_notify_action_t;
#define H5AC_NOTIFY_ACTION_AFTER_INSERT H5C_NOTIFY_ACTION_AFTER_INSERT
#define H5AC_NOTIFY_ACTION_AFTER_LOAD H5C_NOTIFY_ACTION_AFTER_LOAD
#define H5AC_NOTIFY_ACTION_AFTER_FLUSH H5C_NOTIFY_ACTION_AFTER_FLUSH
#define H5AC_NOTIFY_ACTION_BEFORE_EVICT H5C_NOTIFY_ACTION_BEFORE_EVICT
typedef H5C_load_func_t H5AC_load_func_t;
typedef H5C_flush_func_t H5AC_flush_func_t;
typedef H5C_dest_func_t H5AC_dest_func_t;
typedef H5C_clear_func_t H5AC_clear_func_t;
typedef H5C_notify_func_t H5AC_notify_func_t;
typedef H5C_size_func_t H5AC_size_func_t;
#define H5AC__CLASS_NO_FLAGS_SET H5C__CLASS_NO_FLAGS_SET
#define H5AC__CLASS_SPECULATIVE_LOAD_FLAG H5C__CLASS_SPECULATIVE_LOAD_FLAG
#define H5AC__CLASS_COMPRESSED_FLAG H5C__CLASS_COMPRESSED_FLAG
/* The following flags should only appear in test code */
#define H5AC__CLASS_NO_IO_FLAG H5C__CLASS_NO_IO_FLAG
#define H5AC__CLASS_SKIP_READS H5C__CLASS_SKIP_READS
#define H5AC__CLASS_SKIP_WRITES H5C__CLASS_SKIP_WRITES
typedef H5C_get_load_size_func_t H5AC_get_load_size_func_t;
typedef H5C_deserialize_func_t H5AC_deserialize_func_t;
typedef H5C_image_len_func_t H5AC_image_len_func_t;
#define H5AC__SERIALIZE_NO_FLAGS_SET H5C__SERIALIZE_NO_FLAGS_SET
#define H5AC__SERIALIZE_RESIZED_FLAG H5C__SERIALIZE_RESIZED_FLAG
#define H5AC__SERIALIZE_MOVED_FLAG H5C__SERIALIZE_MOVED_FLAG
typedef H5C_pre_serialize_func_t H5AC_pre_serialize_func_t;
typedef H5C_serialize_func_t H5AC_serialize_func_t;
typedef H5C_notify_func_t H5AC_notify_func_t;
typedef H5C_free_icr_func_t H5AC_free_icr_func_t;
typedef H5C_clear_func_t H5AC_clear_func_t;
typedef H5C_get_fsf_size_t H5AC_get_fsf_size_t;
typedef H5C_class_t H5AC_class_t;
/* The H5AC_NSLOTS #define is now obsolete, as the metadata cache no longer
* uses slots. However I am leaving it in for now to avoid modifying the
* interface between the metadata cache and the rest of HDF. It should
* be removed when we get to dealing with the size_hint parameter in
* H5AC_create().
* JRM - 5/20/04
*
* Old comment on H5AC_NSLOTS follows:
*
* A cache has a certain number of entries. Objects are mapped into a
* cache entry by hashing the object's file address. Each file has its
* own cache, an array of slots.
*/
#define H5AC_NSLOTS 10330 /* The library "likes" this number... */
/* Cache entry info */
typedef H5C_cache_entry_t H5AC_info_t;
/*===----------------------------------------------------------------------===
* Protect Types
*===----------------------------------------------------------------------===
*
* These are for the wrapper functions to H5AC_protect. They specify what
* type of operation you're planning on doing to the metadata. The
* Flexible Parallel HDF5 locking can then act accordingly.
*/
typedef enum H5AC_protect_t {
H5AC_WRITE, /* Protect object for writing */
H5AC_READ /* Protect object for reading */
} H5AC_protect_t;
/* Typedef for metadata cache (defined in H5Cpkg.h) */
typedef H5C_t H5AC_t;
@ -317,6 +296,7 @@ H5_DLLVAR hid_t H5AC_ind_dxpl_id;
#define H5AC__FLUSH_CLEAR_ONLY_FLAG H5C__FLUSH_CLEAR_ONLY_FLAG
#define H5AC__FLUSH_MARKED_ENTRIES_FLAG H5C__FLUSH_MARKED_ENTRIES_FLAG
#define H5AC__FLUSH_IGNORE_PROTECTED_FLAG H5C__FLUSH_IGNORE_PROTECTED_FLAG
#define H5AC__READ_ONLY_FLAG H5C__READ_ONLY_FLAG
#define H5AC__FREE_FILE_SPACE_FLAG H5C__FREE_FILE_SPACE_FLAG
#define H5AC__TAKE_OWNERSHIP_FLAG H5C__TAKE_OWNERSHIP_FLAG
#define H5AC__FLUSH_LAST_FLAG H5C__FLUSH_LAST_FLAG
@ -346,7 +326,7 @@ H5_DLL herr_t H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *typ
H5_DLL herr_t H5AC_pin_protected_entry(void *thing);
H5_DLL herr_t H5AC_create_flush_dependency(void *parent_thing, void *child_thing);
H5_DLL void * H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
haddr_t addr, void *udata, H5AC_protect_t rw);
haddr_t addr, void *udata, unsigned flags);
H5_DLL herr_t H5AC_resize_entry(void *thing, size_t new_size);
H5_DLL herr_t H5AC_unpin_entry(void *thing);
H5_DLL herr_t H5AC_destroy_flush_dependency(void *parent_thing, void *child_thing);
@ -359,10 +339,6 @@ H5_DLL herr_t H5AC_move_entry(H5F_t *f, const H5AC_class_t *type,
H5_DLL herr_t H5AC_dest(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id,
const H5AC_class_t *type, haddr_t addr, unsigned flags);
H5_DLL herr_t H5AC_set_sync_point_done_callback(H5C_t *cache_ptr,
void (*sync_point_done)(int num_writes, haddr_t *written_entries_tbl));
H5_DLL herr_t H5AC_set_write_done_callback(H5C_t * cache_ptr,
void (* write_done)(void));
H5_DLL herr_t H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
H5AC_cache_config_t *config_ptr);
H5_DLL herr_t H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr,

View File

@ -336,7 +336,7 @@ H5B_find(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void *u
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
rt = bt->nchildren;
@ -485,7 +485,7 @@ H5B__split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud, unsigned idx,
cache_udata.f = f;
cache_udata.type = shared->type;
cache_udata.rc_shared = bt_ud->bt->rc_shared;
if(NULL == (split_bt_ud->bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, split_bt_ud->addr, &cache_udata, H5AC_WRITE)))
if(NULL == (split_bt_ud->bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, split_bt_ud->addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree")
split_bt_ud->bt->level = bt_ud->bt->level;
@ -518,7 +518,7 @@ H5B__split(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud, unsigned idx,
if(H5F_addr_defined(bt_ud->bt->right)) {
H5B_t *tmp_bt;
if(NULL == (tmp_bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt_ud->bt->right, &cache_udata, H5AC_WRITE)))
if(NULL == (tmp_bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt_ud->bt->right, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load right sibling")
tmp_bt->left = split_bt_ud->addr;
@ -597,7 +597,7 @@ H5B_insert(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
bt_ud.addr = addr;
if(NULL == (bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE)))
if(NULL == (bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to locate root of B-tree")
/* Insert the object */
@ -901,7 +901,7 @@ H5B__insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
* Follow the minimum branch out of this node to a subtree.
*/
child_bt_ud.addr = bt->child[idx];
if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC_WRITE)))
if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node")
if((int)(my_ins = H5B__insert_helper(f, dxpl_id, &child_bt_ud, type,
@ -947,7 +947,7 @@ H5B__insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
*/
idx = bt->nchildren - 1;
child_bt_ud.addr = bt->child[idx];
if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC_WRITE)))
if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node")
if((int)(my_ins = H5B__insert_helper(f, dxpl_id, &child_bt_ud, type,
@ -1002,7 +1002,7 @@ H5B__insert_helper(H5F_t *f, hid_t dxpl_id, H5B_ins_ud_t *bt_ud,
*/
HDassert(idx < bt->nchildren);
child_bt_ud.addr = bt->child[idx];
if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC_WRITE)))
if(NULL == (child_bt_ud.bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, child_bt_ud.addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node")
if((int)(my_ins = H5B__insert_helper(f, dxpl_id, &child_bt_ud, type,
@ -1164,7 +1164,7 @@ H5B__iterate_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t ad
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to load B-tree node")
/* Iterate over node's children */
@ -1287,7 +1287,7 @@ H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *typ
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE)))
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load B-tree node")
rt = bt->nchildren;
@ -1387,7 +1387,7 @@ H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *typ
* "critical" for any child in its node to maintain this
* consistency (and avoid breaking key/child consistency) */
if(H5F_addr_defined(bt->left)) {
if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->left, &cache_udata, H5AC_WRITE)))
if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->left, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to load node from tree")
/* Copy right-most key from deleted node to right-most key
@ -1404,7 +1404,7 @@ H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *typ
sibling = NULL; /* Make certain future references will be caught */
} /* end if */
if(H5F_addr_defined(bt->right)) {
if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->right, &cache_udata, H5AC_WRITE)))
if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->right, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to unlink node from tree")
/* Copy left-most key from deleted node to left-most key in
@ -1524,7 +1524,7 @@ H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *typ
HDassert(level > 0);
/* Update the rightmost key in the left sibling */
if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->left, &cache_udata, H5AC_WRITE)))
if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->left, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to protect node")
HDmemcpy(H5B_NKEY(sibling, shared, sibling->nchildren),
@ -1539,7 +1539,7 @@ H5B__remove_helper(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *typ
HDassert(level > 0);
/* Update the lefttmost key in the right sibling */
if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->right, &cache_udata, H5AC_WRITE)))
if(NULL == (sibling = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, bt->right, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, H5B_INS_ERROR, "unable to protect node")
HDmemcpy(H5B_NKEY(sibling, shared, 0),
@ -1646,7 +1646,7 @@ H5B_delete(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr, void
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_WRITE)))
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
/* Iterate over all children in tree, deleting them */
@ -1908,7 +1908,7 @@ H5B__get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t a
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
/* Cache information from this node */
@ -1932,7 +1932,7 @@ H5B__get_info_helper(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t a
while(H5F_addr_defined(next_addr)) {
/* Protect the next node to the right */
addr = next_addr;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "B-tree node")
/* Cache information from this node */
@ -2059,7 +2059,7 @@ H5B_valid(H5F_t *f, hid_t dxpl_id, const H5B_class_t *type, haddr_t addr)
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree node")
done:

View File

@ -155,8 +155,9 @@ H5B2_create(H5F_t *f, hid_t dxpl_id, const H5B2_create_t *cparam, void *ctx_udat
/* Look up the B-tree header */
cache_udata.f = f;
cache_udata.addr = hdr_addr;
cache_udata.ctx_udata = ctx_udata;
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC_WRITE)))
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to load B-tree header")
/* Point v2 B-tree wrapper at header and bump it's ref count */
@ -215,8 +216,9 @@ H5B2_open(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata)
/* Look up the B-tree header */
cache_udata.f = f;
cache_udata.addr = addr;
cache_udata.ctx_udata = ctx_udata;
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC_READ)))
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to load B-tree header")
/* Check for pending heap deletion */
@ -479,7 +481,7 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */
/* Lock B-tree current node */
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_READ)))
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
/* Locate node pointer for child */
@ -542,7 +544,7 @@ H5B2_find(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_found_t op,
H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */
/* Lock B-tree leaf node */
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ)))
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate record */
@ -662,7 +664,7 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
unsigned u; /* Local index variable */
/* Lock B-tree current node */
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_READ)))
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
/* Search for record with correct index */
@ -734,7 +736,7 @@ H5B2_index(H5B2_t *bt2, hid_t dxpl_id, H5_iter_order_t order, hsize_t idx,
H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */
/* Lock B-tree leaf node */
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ)))
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Sanity check index */
@ -1070,7 +1072,7 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */
/* Lock B-tree current node */
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_WRITE)))
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
/* Locate node pointer for child */
@ -1142,7 +1144,7 @@ H5B2_modify(H5B2_t *bt2, hid_t dxpl_id, void *udata, H5B2_modify_t op,
hbool_t changed = FALSE;/* Whether the 'modify' callback changed the record */
/* Lock B-tree leaf node */
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_WRITE)))
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate record */
@ -1276,7 +1278,7 @@ H5B2_close(H5B2_t *bt2, hid_t dxpl_id)
/* Lock the v2 B-tree header into memory */
/* (OK to pass in NULL for callback context, since we know the header must be in the cache) */
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(bt2->f, dxpl_id, H5AC_BT2_HDR, bt2_addr, NULL, H5AC_WRITE)))
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(bt2->f, dxpl_id, H5AC_BT2_HDR, bt2_addr, NULL, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect v2 B-tree header")
/* Set the shared v2 B-tree header's file context for this operation */
@ -1352,8 +1354,9 @@ H5B2_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *ctx_udata,
HDfprintf(stderr, "%s: addr = %a\n", FUNC, addr);
#endif /* QAK */
cache_udata.f = f;
cache_udata.addr = addr;
cache_udata.ctx_udata = ctx_udata;
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC_WRITE)))
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect v2 B-tree header")
/* Remember the callback & context for later */

File diff suppressed because it is too large Load Diff

View File

@ -123,8 +123,9 @@ H5B2__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
* Load the B-tree header.
*/
cache_udata.f = f;
cache_udata.addr = addr;
cache_udata.ctx_udata = dbg_ctx;
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC_READ)))
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree header")
/* Set file pointer for this B-tree operation */
@ -241,8 +242,9 @@ H5B2__int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
* Load the B-tree header.
*/
cache_udata.f = f;
cache_udata.addr = hdr_addr;
cache_udata.ctx_udata = dbg_ctx;
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC_READ)))
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree header")
/* Set file pointer for this B-tree operation */
@ -253,7 +255,7 @@ H5B2__int_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
*/
H5_CHECK_OVERFLOW(nrec, unsigned, uint16_t);
H5_CHECK_OVERFLOW(depth, unsigned, uint16_t);
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, addr, (uint16_t)nrec, (uint16_t)depth, H5AC_READ)))
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, addr, (uint16_t)nrec, (uint16_t)depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, FAIL, "unable to load B-tree internal node")
/* Print opening message */
@ -375,8 +377,9 @@ H5B2__leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent
* Load the B-tree header.
*/
cache_udata.f = f;
cache_udata.addr = hdr_addr;
cache_udata.ctx_udata = dbg_ctx;
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC_READ)))
if(NULL == (hdr = (H5B2_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_BT2_HDR, hdr_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree header")
/* Set file pointer for this B-tree operation */
@ -386,7 +389,7 @@ H5B2__leaf_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent
* Load the B-tree leaf node
*/
H5_CHECK_OVERFLOW(nrec, unsigned, uint16_t);
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, addr, (uint16_t)nrec, H5AC_READ)))
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, addr, (uint16_t)nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Print opening message */

View File

@ -215,9 +215,9 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Protect both leaves */
if(NULL == (left_int = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (left_int = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
if(NULL == (right_int = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (right_int = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for child nodes */
@ -244,9 +244,9 @@ H5B2__split1(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Protect both leaves */
if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for child nodes */
@ -403,7 +403,7 @@ H5B2__split_root(H5B2_hdr_t *hdr, hid_t dxpl_id)
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "unable to create new internal node")
/* Protect new root node */
if(NULL == (new_root = H5B2__protect_internal(hdr, dxpl_id, hdr->root.addr, hdr->root.node_nrec, hdr->depth, H5AC_WRITE)))
if(NULL == (new_root = H5B2__protect_internal(hdr, dxpl_id, hdr->root.addr, hdr->root.node_nrec, hdr->depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Set first node pointer in root node to old root node pointer info */
@ -467,9 +467,9 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for child nodes */
@ -492,9 +492,9 @@ H5B2__redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for child nodes */
@ -704,11 +704,11 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for child nodes */
@ -737,11 +737,11 @@ H5B2__redistribute3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC_WRITE)))
if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for child nodes */
@ -1084,9 +1084,9 @@ H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for accessing child node information */
@ -1109,9 +1109,9 @@ H5B2__merge2(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock left & right B-tree child nodes */
if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for accessing child node information */
@ -1246,11 +1246,11 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (left_internal = H5B2__protect_internal(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (middle_internal = H5B2__protect_internal(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (right_internal = H5B2__protect_internal(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for accessing child node information */
@ -1279,11 +1279,11 @@ H5B2__merge3(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
right_addr = internal->node_ptrs[idx + 1].addr;
/* Lock B-tree child nodes */
if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC_WRITE)))
if(NULL == (left_leaf = H5B2__protect_leaf(hdr, dxpl_id, left_addr, internal->node_ptrs[idx - 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
if(NULL == (middle_leaf = H5B2__protect_leaf(hdr, dxpl_id, middle_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC_WRITE)))
if(NULL == (right_leaf = H5B2__protect_leaf(hdr, dxpl_id, right_addr, internal->node_ptrs[idx + 1].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for accessing child node information */
@ -1460,7 +1460,7 @@ H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
child_addr = internal->node_ptrs[idx].addr;
/* Lock B-tree child nodes */
if(NULL == (child_internal = H5B2__protect_internal(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC_WRITE)))
if(NULL == (child_internal = H5B2__protect_internal(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, (uint16_t)(depth - 1), H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* More setup for accessing child node information */
@ -1475,7 +1475,7 @@ H5B2__swap_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
child_addr = internal->node_ptrs[idx].addr;
/* Lock B-tree child node */
if(NULL == (child_leaf = H5B2__protect_leaf(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, H5AC_WRITE)))
if(NULL == (child_leaf = H5B2__protect_leaf(hdr, dxpl_id, child_addr, internal->node_ptrs[idx].node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* More setup for accessing child node information */
@ -1538,7 +1538,7 @@ H5B2__insert_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr
HDassert(H5F_addr_defined(curr_node_ptr->addr));
/* Lock current B-tree node */
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC_WRITE)))
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Must have a leaf node with enough space to insert a record now */
@ -1637,7 +1637,7 @@ H5B2__insert_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDassert(H5F_addr_defined(curr_node_ptr->addr));
/* Lock current B-tree node */
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE)))
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Split or redistribute child node pointers, if necessary */
@ -1834,7 +1834,7 @@ done:
*/
H5B2_leaf_t *
H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, uint16_t nrec,
H5AC_protect_t rw)
unsigned flags)
{
H5B2_leaf_cache_ud_t udata; /* User-data for callback */
H5B2_leaf_t *ret_value; /* Return value */
@ -1845,13 +1845,16 @@ H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, uint16_t nrec,
HDassert(hdr);
HDassert(H5F_addr_defined(addr));
/* only H5AC__READ_ONLY_FLAG may appear in flags */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Set up user data for callback */
udata.f = hdr->f;
udata.hdr = hdr;
H5_CHECKED_ASSIGN(udata.nrec, uint16_t, nrec, unsigned)
/* Protect the leaf node */
if(NULL == (ret_value = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, addr, &udata, rw)))
if(NULL == (ret_value = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, addr, &udata, flags)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to protect B-tree leaf node")
done:
@ -1953,7 +1956,7 @@ done:
*/
H5B2_internal_t *
H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
uint16_t nrec, uint16_t depth, H5AC_protect_t rw)
uint16_t nrec, uint16_t depth, unsigned flags)
{
H5B2_internal_cache_ud_t udata; /* User data to pass through to cache 'deserialize' callback */
H5B2_internal_t *ret_value; /* Return value */
@ -1965,6 +1968,9 @@ H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
HDassert(H5F_addr_defined(addr));
HDassert(depth > 0);
/* only H5AC__READ_ONLY_FLAG may appear in flags */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Set up user data for callback */
udata.f = hdr->f;
udata.hdr = hdr;
@ -1972,7 +1978,7 @@ H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
udata.depth = depth;
/* Protect the internal node */
if(NULL == (ret_value = (H5B2_internal_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_INT, addr, &udata, rw)))
if(NULL == (ret_value = (H5B2_internal_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_INT, addr, &udata, flags)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, NULL, "unable to protect B-tree internal node")
done:
@ -2021,7 +2027,7 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_internal_t *internal; /* Pointer to internal node */
/* Lock the current B-tree node */
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_READ)))
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Set up information about current node */
@ -2040,7 +2046,7 @@ H5B2__iterate_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_leaf_t *leaf; /* Pointer to leaf node */
/* Lock the current B-tree node */
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC_READ)))
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Set up information about current node */
@ -2125,7 +2131,7 @@ H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_ptr
/* Lock current B-tree node */
leaf_addr = curr_node_ptr->addr;
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC_WRITE)))
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Sanity check number of records */
@ -2234,7 +2240,7 @@ H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, hbool_t *depth_decreased,
/* Lock current B-tree node */
internal_addr = curr_node_ptr->addr;
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE)))
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Determine the correct number of records to merge at */
@ -2443,7 +2449,7 @@ H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Lock B-tree leaf node */
leaf_addr = curr_node_ptr->addr;
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC_WRITE)))
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, leaf_addr, curr_node_ptr->node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Sanity check number of records */
@ -2551,7 +2557,7 @@ H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Lock current B-tree node */
internal_addr = curr_node_ptr->addr;
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC_WRITE)))
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, internal_addr, curr_node_ptr->node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
HDassert(internal->nrec == curr_node_ptr->node_nrec);
HDassert(depth == hdr->depth || internal->nrec > 1);
@ -2827,7 +2833,7 @@ H5B2__neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, H5B2_node_ptr_t *curr_node_p
HDassert(op);
/* Lock current B-tree node */
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC_READ)))
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate node pointer for child */
@ -2914,7 +2920,7 @@ H5B2__neighbor_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDassert(op);
/* Lock current B-tree node */
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC_READ)))
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr->addr, curr_node_ptr->node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Locate node pointer for child */
@ -2987,7 +2993,7 @@ H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
unsigned u; /* Local index */
/* Lock the current B-tree node */
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_WRITE)))
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Set up information about current node */
@ -3004,7 +3010,7 @@ H5B2__delete_node(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
H5B2_leaf_t *leaf; /* Pointer to leaf node */
/* Lock the current B-tree node */
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC_WRITE)))
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Set up information about current node */
@ -3063,7 +3069,7 @@ H5B2__node_size(H5B2_hdr_t *hdr, hid_t dxpl_id, uint16_t depth,
HDassert(depth > 0);
/* Lock the current B-tree node */
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC_READ)))
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node->addr, curr_node->node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree internal node")
/* Recursively descend into child nodes, if we are above the "twig" level in the B-tree */

View File

@ -231,6 +231,7 @@ typedef enum H5B2_nodepos_t {
/* Callback info for loading a free space header into the cache */
typedef struct H5B2_hdr_cache_ud_t {
H5F_t *f; /* File that v2 b-tree header is within */
haddr_t addr; /* Address of B-tree header in the file */
void *ctx_udata; /* User-data for protecting */
} H5B2_hdr_cache_ud_t;
@ -305,11 +306,11 @@ H5_DLL herr_t H5B2__hdr_delete(H5B2_hdr_t *hdr, hid_t dxpl_id);
/* Routines for operating on leaf nodes */
H5B2_leaf_t *H5B2__protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
uint16_t nrec, H5AC_protect_t rw);
uint16_t nrec, unsigned flags);
/* Routines for operating on internal nodes */
H5_DLL H5B2_internal_t *H5B2__protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
haddr_t addr, uint16_t nrec, uint16_t depth, H5AC_protect_t rw);
haddr_t addr, uint16_t nrec, uint16_t depth, unsigned flags);
/* Routines for allocating nodes */
H5_DLL herr_t H5B2__split_root(H5B2_hdr_t *hdr, hid_t dxpl_id);
@ -347,7 +348,7 @@ H5_DLL herr_t H5B2__neighbor_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
/* Routines for removing records */
H5_DLL herr_t H5B2__remove_internal(H5B2_hdr_t *hdr, hid_t dxpl_id,
hbool_t *depth_decreased, void *swap_loc, uint16_t depth,
H5AC_info_t *parent_cache_info, hbool_t *parent_cache_info_dirtied_ptr,
H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr,
H5B2_nodepos_t curr_pos, H5B2_node_ptr_t *curr_node_ptr, void *udata,
H5B2_remove_t op, void *op_data);
H5_DLL herr_t H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
@ -355,8 +356,8 @@ H5_DLL herr_t H5B2__remove_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id,
void *udata, H5B2_remove_t op, void *op_data);
H5_DLL herr_t H5B2__remove_internal_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
hbool_t *depth_decreased, void *swap_loc, uint16_t depth,
H5AC_info_t *parent_cache_info, hbool_t *parent_cache_info_dirtied_ptr,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t idx,
H5AC_info_t *parent_cache_info, unsigned *parent_cache_info_flags_ptr,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos, hsize_t n,
H5B2_remove_t op, void *op_data);
H5_DLL herr_t H5B2__remove_leaf_by_idx(H5B2_hdr_t *hdr, hid_t dxpl_id,
H5B2_node_ptr_t *curr_node_ptr, H5B2_nodepos_t curr_pos,

View File

@ -430,7 +430,7 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
H5B2_node_ptr_t next_node_ptr; /* Node pointer info for next node */
/* Lock B-tree current node */
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC_READ)))
if(NULL == (internal = H5B2__protect_internal(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, depth, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree internal node")
/* Locate node pointer for child */
@ -470,7 +470,7 @@ H5B2_get_node_info_test(H5B2_t *bt2, hid_t dxpl_id, void *udata,
H5B2_leaf_t *leaf; /* Pointer to leaf node in B-tree */
/* Lock B-tree leaf node */
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC_READ)))
if(NULL == (leaf = H5B2__protect_leaf(hdr, dxpl_id, curr_node_ptr.addr, curr_node_ptr.node_nrec, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to protect B-tree leaf node")
/* Locate record */

View File

@ -37,7 +37,6 @@
#include "H5private.h" /* Generic Functions */
#include "H5Bpkg.h" /* B-link trees */
#include "H5Eprivate.h" /* Error handling */
#include "H5MFprivate.h" /* File memory management */
/****************/
@ -55,11 +54,14 @@
/********************/
/* Metadata cache callbacks */
static H5B_t *H5B__load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
static herr_t H5B__flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B_t *b, unsigned H5_ATTR_UNUSED * flags_ptr);
static herr_t H5B__dest(H5F_t *f, H5B_t *bt);
static herr_t H5B__clear(H5F_t *f, H5B_t *b, hbool_t destroy);
static herr_t H5B__compute_size(const H5F_t *f, const H5B_t *bt, size_t *size_ptr);
static herr_t H5B__get_load_size(const void *udata, size_t *image_len);
static void *H5B__deserialize(const void *image, size_t len, void *udata,
hbool_t *dirty);
static herr_t H5B__image_len(const void *thing, size_t *image_len,
hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static herr_t H5B__serialize(const H5F_t *f, void *image, size_t len,
void *thing);
static herr_t H5B__free_icr(void *thing);
/*********************/
@ -68,13 +70,19 @@ static herr_t H5B__compute_size(const H5F_t *f, const H5B_t *bt, size_t *size_pt
/* H5B inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_BT[1] = {{
H5AC_BT_ID,
(H5AC_load_func_t)H5B__load,
(H5AC_flush_func_t)H5B__flush,
(H5AC_dest_func_t)H5B__dest,
(H5AC_clear_func_t)H5B__clear,
(H5AC_notify_func_t)NULL,
(H5AC_size_func_t)H5B__compute_size,
H5AC_BT_ID, /* Metadata client ID */
"v1 B-tree", /* Metadata client name (for debugging) */
H5FD_MEM_BTREE, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5B__get_load_size, /* 'get_load_size' callback */
H5B__deserialize, /* 'deserialize' callback */
H5B__image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
H5B__serialize, /* 'serialize' callback */
NULL, /* 'notify' callback */
H5B__free_icr, /* 'free_icr' callback */
NULL, /* 'clear" callback */
NULL, /* 'fsf_size' callback */
}};
/*******************/
@ -84,35 +92,71 @@ const H5AC_class_t H5AC_BT[1] = {{
/*-------------------------------------------------------------------------
* Function: H5B__load
* Function: H5B__get_load_size
*
* Purpose: Loads a B-tree node from the disk.
* Purpose: Compute the size of the data structure on disk.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
* May 18, 2010
*
*-------------------------------------------------------------------------
*/
static herr_t
H5B__get_load_size(const void *_udata, size_t *image_len)
{
const H5B_cache_ud_t *udata = (const H5B_cache_ud_t *)_udata; /* User data for callback */
H5B_shared_t *shared; /* Pointer to shared B-tree info */
FUNC_ENTER_STATIC_NOERR
/* Check arguments */
HDassert(udata);
HDassert(image_len);
/* Get shared info for B-tree */
shared = (H5B_shared_t *)H5UC_GET_OBJ(udata->rc_shared);
HDassert(shared);
/* Set the image length size */
*image_len = shared->sizeof_rnode;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5B__get_load_size() */
/*-------------------------------------------------------------------------
* Function: H5B__deserialize
*
* Purpose: Deserialize the data structure from disk.
*
* Return: Success: Pointer to a new B-tree node.
* Failure: NULL
*
* Programmer: Robb Matzke
* matzke@llnl.gov
* Jun 23 1997
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
* Mar 24, 2008
*
*-------------------------------------------------------------------------
*/
static H5B_t *
H5B__load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
static void *
H5B__deserialize(const void *_image, size_t H5_ATTR_UNUSED len, void *_udata,
hbool_t H5_ATTR_UNUSED *dirty)
{
H5B_t *bt = NULL; /* Pointer to the deserialized B-tree node */
H5B_cache_ud_t *udata = (H5B_cache_ud_t *)_udata; /* User data for callback */
H5B_shared_t *shared; /* Pointer to shared B-tree info */
const uint8_t *p; /* Pointer into raw data buffer */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into image buffer */
uint8_t *native; /* Pointer to native keys */
unsigned u; /* Local index variable */
H5B_t *ret_value; /* Return value */
FUNC_ENTER_STATIC
/* Check arguments */
HDassert(f);
HDassert(H5F_addr_defined(addr));
/* check arguments */
HDassert(image);
HDassert(udata);
/* Allocate the B-tree node in memory */
@ -134,53 +178,50 @@ H5B__load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
if(NULL == (bt->child = H5FL_SEQ_MALLOC(haddr_t, (size_t)shared->two_k)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTALLOC, NULL, "can't allocate buffer for child addresses")
if(H5F_block_read(f, H5FD_MEM_BTREE, addr, shared->sizeof_rnode, dxpl_id, shared->page) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_READERROR, NULL, "can't read B-tree node")
/* Set the pointer into the raw data buffer */
p = shared->page;
/* magic number */
if(HDmemcmp(p, H5B_MAGIC, (size_t)H5_SIZEOF_MAGIC))
if(HDmemcmp(image, H5B_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "wrong B-tree signature")
p += 4;
image += H5_SIZEOF_MAGIC;
/* node type and level */
if(*p++ != (uint8_t)udata->type->id)
if(*image++ != (uint8_t)udata->type->id)
HGOTO_ERROR(H5E_BTREE, H5E_CANTLOAD, NULL, "incorrect B-tree node type")
bt->level = *p++;
bt->level = *image++;
/* entries used */
UINT16DECODE(p, bt->nchildren);
UINT16DECODE(image, bt->nchildren);
/* Check if bt->nchildren is greater than two_k */
if(bt->nchildren > shared->two_k)
HGOTO_ERROR(H5E_BTREE, H5E_BADVALUE, NULL, "number of children is greater than maximum")
/* sibling pointers */
H5F_addr_decode(udata->f, (const uint8_t **)&p, &(bt->left));
H5F_addr_decode(udata->f, (const uint8_t **)&p, &(bt->right));
H5F_addr_decode(udata->f, (const uint8_t **)&image, &(bt->left));
H5F_addr_decode(udata->f, (const uint8_t **)&image, &(bt->right));
/* the child/key pairs */
native = bt->native;
for(u = 0; u < bt->nchildren; u++) {
/* Decode native key value */
if((udata->type->decode)(shared, p, native) < 0)
if((udata->type->decode)(shared, image, native) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDECODE, NULL, "unable to decode key")
p += shared->sizeof_rkey;
image += shared->sizeof_rkey;
native += udata->type->sizeof_nkey;
/* Decode address value */
H5F_addr_decode(udata->f, (const uint8_t **)&p, bt->child + u);
H5F_addr_decode(udata->f, (const uint8_t **)&image, bt->child + u);
} /* end for */
/* Decode final key */
if(bt->nchildren > 0) {
/* Decode native key value */
if((udata->type->decode)(shared, p, native) < 0)
if((udata->type->decode)(shared, image, native) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTDECODE, NULL, "unable to decode key")
} /* end if */
/* Sanity check */
HDassert((size_t)((const uint8_t *)image - (const uint8_t *)_image) <= len);
/* Set return value */
ret_value = bt;
@ -190,223 +231,151 @@ done:
HDONE_ERROR(H5E_BTREE, H5E_CANTFREE, NULL, "unable to destroy B-tree node")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B__load() */ /*lint !e818 Can't make udata a pointer to const */
} /* end H5B__deserialize() */
/*-------------------------------------------------------------------------
* Function: H5B__flush
* Function: H5B__image_len
*
* Purpose: Flushes a dirty B-tree node to disk.
* Purpose: Compute the size of the data structure on disk.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Robb Matzke
* matzke@llnl.gov
* Jun 23 1997
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
* May 20, 2010
*
*-------------------------------------------------------------------------
*/
static herr_t
H5B__flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5B_t *bt, unsigned H5_ATTR_UNUSED * flags_ptr)
H5B__image_len(const void *_thing, size_t *image_len,
hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
const H5B_t *bt = (const H5B_t *)_thing; /* Pointer to the B-tree node */
H5B_shared_t *shared; /* Pointer to shared B-tree info */
FUNC_ENTER_STATIC_NOERR
/* Check arguments */
HDassert(bt);
HDassert(image_len);
/* Get shared info for B-tree */
shared = (H5B_shared_t *)H5UC_GET_OBJ(bt->rc_shared);
HDassert(shared);
/* Set the image length size */
*image_len = shared->sizeof_rnode;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5B__image_len() */
/*-------------------------------------------------------------------------
* Function: H5B__serialize
*
* Purpose: Serialize the data structure for writing to disk.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
* Mar 24, 2008
*
*-------------------------------------------------------------------------
*/
static herr_t
H5B__serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNUSED len,
void *_thing)
{
H5B_t *bt = (H5B_t *)_thing; /* Pointer to the B-tree node */
H5B_shared_t *shared; /* Pointer to shared B-tree info */
uint8_t *image = (uint8_t *)_image; /* Pointer into image buffer */
uint8_t *native; /* Pointer to native keys */
unsigned u; /* Local index counter */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/* check arguments */
HDassert(f);
HDassert(H5F_addr_defined(addr));
HDassert(image);
HDassert(bt);
HDassert(bt->rc_shared);
shared = (H5B_shared_t *)H5UC_GET_OBJ(bt->rc_shared);
HDassert(shared);
HDassert(shared->type);
HDassert(shared->type->encode);
if(bt->cache_info.is_dirty) {
uint8_t *p; /* Pointer into raw data buffer */
uint8_t *native; /* Pointer to native keys */
unsigned u; /* Local index variable */
/* magic number */
HDmemcpy(image, H5B_MAGIC, (size_t)H5_SIZEOF_MAGIC);
image += 4;
p = shared->page;
/* node type and level */
*image++ = (uint8_t)shared->type->id;
H5_CHECK_OVERFLOW(bt->level, unsigned, uint8_t);
*image++ = (uint8_t)bt->level;
/* magic number */
HDmemcpy(p, H5B_MAGIC, (size_t)H5_SIZEOF_MAGIC);
p += 4;
/* entries used */
UINT16ENCODE(image, bt->nchildren);
/* node type and level */
*p++ = (uint8_t)shared->type->id;
H5_CHECK_OVERFLOW(bt->level, unsigned, uint8_t);
*p++ = (uint8_t)bt->level;
/* sibling pointers */
H5F_addr_encode(f, &image, bt->left);
H5F_addr_encode(f, &image, bt->right);
/* entries used */
UINT16ENCODE(p, bt->nchildren);
/* child keys and pointers */
native = bt->native;
for(u = 0; u < bt->nchildren; ++u) {
/* encode the key */
if(shared->type->encode(shared, image, native) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, FAIL, "unable to encode B-tree key")
image += shared->sizeof_rkey;
native += shared->type->sizeof_nkey;
/* sibling pointers */
H5F_addr_encode(f, &p, bt->left);
H5F_addr_encode(f, &p, bt->right);
/* child keys and pointers */
native = bt->native;
for(u = 0; u < bt->nchildren; ++u) {
/* encode the key */
if(shared->type->encode(shared, p, native) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, FAIL, "unable to encode B-tree key")
p += shared->sizeof_rkey;
native += shared->type->sizeof_nkey;
/* encode the child address */
H5F_addr_encode(f, &p, bt->child[u]);
} /* end for */
if(bt->nchildren > 0) {
/* Encode the final key */
if(shared->type->encode(shared, p, native) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, FAIL, "unable to encode B-tree key")
} /* end if */
/*
* Write the disk page. We always write the header, but we don't
* bother writing data for the child entries that don't exist or
* for the final unchanged children.
*/
if(H5F_block_write(f, H5FD_MEM_BTREE, addr, shared->sizeof_rnode, dxpl_id, shared->page) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFLUSH, FAIL, "unable to save B-tree node to disk")
bt->cache_info.is_dirty = FALSE;
/* encode the child address */
H5F_addr_encode(f, &image, bt->child[u]);
} /* end for */
if(bt->nchildren > 0) {
/* Encode the final key */
if(shared->type->encode(shared, image, native) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTENCODE, FAIL, "unable to encode B-tree key")
} /* end if */
if(destroy)
if(H5B__dest(f, bt) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node")
/* Sanity check */
HDassert((size_t)((const uint8_t *)image - (const uint8_t *)_image) <= len);
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B__flush() */
} /* end H5B__serialize() */
/*-------------------------------------------------------------------------
* Function: H5B__dest
* Function: H5B__free_icr
*
* Purpose: Destroys a B-tree node in memory.
* Purpose: Destroy/release an "in core representation" of a data structure
*
* Return: Non-negative on success/Negative on failure
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* koziol@ncsa.uiuc.edu
* Jan 15 2003
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
* Mar 26, 2008
*
*-------------------------------------------------------------------------
*/
static herr_t
H5B__dest(H5F_t *f, H5B_t *bt)
H5B__free_icr(void *thing)
{
herr_t ret_value = SUCCEED; /* Return value */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/*
* Check arguments.
*/
HDassert(f);
HDassert(bt);
HDassert(bt->rc_shared);
/* If we're going to free the space on disk, the address must be valid */
HDassert(!bt->cache_info.free_file_space_on_destroy || H5F_addr_defined(bt->cache_info.addr));
/* Check for freeing file space for B-tree node */
if(bt->cache_info.free_file_space_on_destroy) {
H5B_shared_t *shared; /* Pointer to shared B-tree info */
/* Get the pointer to the shared B-tree info */
shared = (H5B_shared_t *)H5UC_GET_OBJ(bt->rc_shared);
HDassert(shared);
/* Release the space on disk */
/* (XXX: Nasty usage of internal DXPL value! -QAK) */
if(H5MF_xfree(f, H5FD_MEM_BTREE, H5AC_dxpl_id, bt->cache_info.addr, (hsize_t)shared->sizeof_rnode) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free B-tree node")
} /* end if */
/* Check arguments */
HDassert(thing);
/* Destroy B-tree node */
if(H5B__node_dest(bt) < 0)
if(H5B__node_dest((H5B_t *)thing) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B__dest() */
/*-------------------------------------------------------------------------
* Function: H5B__clear
*
* Purpose: Mark a B-tree node in memory as non-dirty.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* koziol@ncsa.uiuc.edu
* Mar 20 2003
*
*-------------------------------------------------------------------------
*/
static herr_t
H5B__clear(H5F_t *f, H5B_t *bt, hbool_t destroy)
{
herr_t ret_value = SUCCEED;
FUNC_ENTER_STATIC
/*
* Check arguments.
*/
HDassert(bt);
/* Reset the dirty flag. */
bt->cache_info.is_dirty = FALSE;
if(destroy)
if(H5B__dest(f, bt) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to destroy B-tree node")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5B__clear() */
/*-------------------------------------------------------------------------
* Function: H5B__compute_size
*
* Purpose: Compute the size in bytes of the specified instance of
* H5B_t on disk, and return it in *len_ptr. On failure,
* the value of *len_ptr is undefined.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
* 5/13/04
*
*-------------------------------------------------------------------------
*/
static herr_t
H5B__compute_size(const H5F_t H5_ATTR_UNUSED *f, const H5B_t *bt, size_t *size_ptr)
{
H5B_shared_t *shared; /* Pointer to shared B-tree info */
FUNC_ENTER_STATIC_NOERR
/* check arguments */
HDassert(f);
HDassert(bt);
HDassert(bt->rc_shared);
shared = (H5B_shared_t *)H5UC_GET_OBJ(bt->rc_shared);
HDassert(shared);
HDassert(shared->type);
HDassert(size_ptr);
/* Set size value */
*size_ptr = shared->sizeof_rnode;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5B__compute_size() */
} /* end H5B__free_icr() */

View File

@ -89,7 +89,7 @@ H5B_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int f
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ)))
if(NULL == (bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_BTREE, H5E_CANTPROTECT, FAIL, "unable to load B-tree node")
/*
@ -206,7 +206,7 @@ H5B__assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void
cache_udata.f = f;
cache_udata.type = type;
cache_udata.rc_shared = rc_shared;
bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC_READ);
bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, addr, &cache_udata, H5AC__READ_ONLY_FLAG);
HDassert(bt);
shared = (H5B_shared_t *)H5UC_GET_OBJ(bt->rc_shared);
HDassert(shared);
@ -227,7 +227,7 @@ H5B__assert(H5F_t *f, hid_t dxpl_id, haddr_t addr, const H5B_class_t *type, void
* test.
*/
for(ncell = 0; cur; ncell++) {
bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, cur->addr, &cache_udata, H5AC_READ);
bt = (H5B_t *)H5AC_protect(f, dxpl_id, H5AC_BT, cur->addr, &cache_udata, H5AC__READ_ONLY_FLAG);
HDassert(bt);
/* Check node header */

3263
src/H5C.c

File diff suppressed because it is too large Load Diff

View File

@ -60,7 +60,7 @@
/* Cache configuration settings */
#define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */
#define H5C__H5C_T_MAGIC 0x005CAC0E
#define H5C__MAX_NUM_TYPE_IDS 27
#define H5C__MAX_NUM_TYPE_IDS 28
#define H5C__PREFIX_LEN 32
/****************************************************************************
@ -573,6 +573,15 @@ if ( ( (entry_ptr) == NULL ) || \
#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr) \
((cache_ptr)->unpins)[(entry_ptr)->type->id]++;
#define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr) \
((cache_ptr)->slist_scan_restarts)++;
#define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) \
((cache_ptr)->LRU_scan_restarts)++;
#define H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr) \
((cache_ptr)->hash_bucket_scan_restarts)++;
#if H5C_COLLECT_CACHE_ENTRY_STATS
#define H5C__RESET_CACHE_ENTRY_STATS(entry_ptr) \
@ -599,9 +608,12 @@ if ( ( (entry_ptr) == NULL ) || \
((entry_ptr)->flushes)++; \
}
#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \
{ \
(((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \
#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \
{ \
if ( take_ownership ) \
(((cache_ptr)->take_ownerships)[(entry_ptr)->type->id])++; \
else \
(((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \
if ( (entry_ptr)->accesses > \
((cache_ptr)->max_accesses)[(entry_ptr)->type->id] ) \
((cache_ptr)->max_accesses)[(entry_ptr)->type->id] = \
@ -709,8 +721,13 @@ if ( ( (entry_ptr) == NULL ) || \
(((cache_ptr)->pinned_flushes)[(entry_ptr)->type->id])++; \
}
#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr) \
(((cache_ptr)->evictions)[(entry_ptr)->type->id])++;
#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership) \
{ \
if ( take_ownership ) \
(((cache_ptr)->take_ownerships)[(entry_ptr)->type->id])++; \
else \
(((cache_ptr)->evictions)[(entry_ptr)->type->id])++; \
}
#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr) \
{ \
@ -780,10 +797,13 @@ if ( ( (entry_ptr) == NULL ) || \
#define H5C__UPDATE_STATS_FOR_INSERTION(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_CLEAR(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_FLUSH(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_EVICTION(cache_ptr, entry_ptr, take_ownership)
#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit)
#define H5C__UPDATE_STATS_FOR_PIN(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
#define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
#define H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr)
#endif /* H5C_COLLECT_CACHE_STATS */
@ -828,11 +848,25 @@ if ( ( (cache_ptr) == NULL ) || \
( H5C__HASH_FCN((entry_ptr)->addr) >= H5C__HASH_TABLE_LEN ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) ) { \
(cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
"Pre HT insert SC failed") \
}
#define H5C__POST_HT_INSERT_SC(cache_ptr, fail_val) \
if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
"Post HT insert SC failed") \
}
#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
@ -853,10 +887,28 @@ if ( ( (cache_ptr) == NULL ) || \
( (entry_ptr)->ht_prev != NULL ) ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) ) { \
(cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT remove SC failed") \
}
#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
( (entry_ptr) == NULL ) || \
( ! H5F_addr_defined((entry_ptr)->addr) ) || \
( (entry_ptr)->size <= 0 ) || \
( (entry_ptr)->ht_prev != NULL ) || \
( (entry_ptr)->ht_prev != NULL ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Post HT remove SC failed") \
}
/* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
if ( ( (cache_ptr) == NULL ) || \
@ -915,6 +967,8 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
( ( !( was_clean ) || \
( (cache_ptr)->clean_index_size < (old_size) ) ) && \
( ( (was_clean) ) || \
@ -933,6 +987,8 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + \
(cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) || \
( ( !((entry_ptr)->is_dirty ) || \
( (cache_ptr)->dirty_index_size < (new_size) ) ) && \
( ( ((entry_ptr)->is_dirty) ) || \
@ -953,7 +1009,9 @@ if ( \
( (cache_ptr)->index_size < (entry_ptr)->size ) || \
( (cache_ptr)->dirty_index_size < (entry_ptr)->size ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) ) { \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Pre HT update for entry clean SC failed") \
}
@ -968,21 +1026,27 @@ if ( \
( (cache_ptr)->index_size < (entry_ptr)->size ) || \
( (cache_ptr)->clean_index_size < (entry_ptr)->size ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) ) { \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Pre HT update for entry dirty SC failed") \
}
#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
if ( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) { \
if ( ( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Post HT update for entry clean SC failed") \
}
#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
if ( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) { \
if ( ( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->clean_index_size) ) || \
( (cache_ptr)->index_size < ((cache_ptr)->dirty_index_size) ) ) { \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
"Post HT update for entry dirty SC failed") \
}
@ -990,7 +1054,9 @@ if ( (cache_ptr)->index_size != \
#else /* H5C_DO_SANITY_CHECKS */
#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val)
#define H5C__POST_HT_INSERT_SC(cache_ptr, fail_val)
#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr)
#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr)
#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val)
#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val)
#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val)
@ -1026,9 +1092,10 @@ if ( (cache_ptr)->index_size != \
(cache_ptr)->clean_index_size += (entry_ptr)->size; \
if ((entry_ptr)->flush_me_last) { \
(cache_ptr)->num_last_entries++; \
HDassert((cache_ptr)->num_last_entries == 1); \
HDassert((cache_ptr)->num_last_entries <= 2); \
} \
H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
H5C__POST_HT_INSERT_SC(cache_ptr, fail_val) \
}
#define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr) \
@ -1052,9 +1119,10 @@ if ( (cache_ptr)->index_size != \
(cache_ptr)->clean_index_size -= (entry_ptr)->size; \
if ((entry_ptr)->flush_me_last) { \
(cache_ptr)->num_last_entries--; \
HDassert((cache_ptr)->num_last_entries == 0); \
HDassert((cache_ptr)->num_last_entries <= 1); \
} \
H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
}
#define H5C__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \
@ -1205,9 +1273,20 @@ if ( (cache_ptr)->index_size != \
* able to dirty, resize and/or move entries during the
* flush.
*
* JRM -- 12/13/14
* Added code to set cache_ptr->slist_changed to TRUE
* when an entry is inserted in the slist.
*
*-------------------------------------------------------------------------
*/
#if H5C_DO_SLIST_SANITY_CHECKS
#define ENTRY_IN_SLIST(cache_ptr, entry_ptr) \
H5C_entry_in_skip_list((cache_ptr), (entry_ptr))
#else /* H5C_DO_SLIST_SANITY_CHECKS */
#define ENTRY_IN_SLIST(cache_ptr, entry_ptr) FALSE
#endif /* H5C_DO_SLIST_SANITY_CHECKS */
#if H5C_DO_SANITY_CHECKS
#define H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, fail_val) \
@ -1218,12 +1297,14 @@ if ( (cache_ptr)->index_size != \
HDassert( (entry_ptr)->size > 0 ); \
HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
HDassert( !((entry_ptr)->in_slist) ); \
HDassert( !ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
\
if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
"Can't insert entry in skip list") \
\
(entry_ptr)->in_slist = TRUE; \
(cache_ptr)->slist_changed = TRUE; \
(cache_ptr)->slist_len++; \
(cache_ptr)->slist_size += (entry_ptr)->size; \
(cache_ptr)->slist_len_increase++; \
@ -1244,12 +1325,14 @@ if ( (cache_ptr)->index_size != \
HDassert( (entry_ptr)->size > 0 ); \
HDassert( H5F_addr_defined((entry_ptr)->addr) ); \
HDassert( !((entry_ptr)->in_slist) ); \
HDassert( !ENTRY_IN_SLIST((cache_ptr), (entry_ptr)) ); \
\
if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
"Can't insert entry in skip list") \
\
(entry_ptr)->in_slist = TRUE; \
(cache_ptr)->slist_changed = TRUE; \
(cache_ptr)->slist_len++; \
(cache_ptr)->slist_size += (entry_ptr)->size; \
\
@ -1290,6 +1373,10 @@ if ( (cache_ptr)->index_size != \
* Updated sanity checks for the new is_read_only and
* ro_ref_count fields in H5C_cache_entry_t.
*
* JRM -- 12/13/14
* Added code to set cache_ptr->slist_changed to TRUE
* when an entry is removed from the slist.
*
*-------------------------------------------------------------------------
*/
@ -1311,6 +1398,7 @@ if ( (cache_ptr)->index_size != \
"Can't delete entry from skip list.") \
\
HDassert( (cache_ptr)->slist_len > 0 ); \
(cache_ptr)->slist_changed = TRUE; \
(cache_ptr)->slist_len--; \
HDassert( (cache_ptr)->slist_size >= (entry_ptr)->size ); \
(cache_ptr)->slist_size -= (entry_ptr)->size; \
@ -1341,6 +1429,11 @@ if ( (cache_ptr)->index_size != \
* able to dirty, resize and/or move entries during the
* flush.
*
* JRM -- 12/13/14
* Note that we do not set cache_ptr->slist_changed to TRUE
* in this case, as the structure of the slist is not
* modified.
*
*-------------------------------------------------------------------------
*/
@ -2706,6 +2799,10 @@ if ( (cache_ptr)->index_size != \
* the max_cache_size limit until the next time the cache
* attempts to load or insert an entry.
*
* d) When the evictions_enabled field is false (see below),
* the cache size will increase without limit until the
* field is set to true.
*
* min_clean_size: Nominal minimum number of clean bytes in the cache.
* The cache attempts to maintain this number of bytes of
* clean data so as to avoid case b) above. Again, this is
@ -2756,7 +2853,7 @@ if ( (cache_ptr)->index_size != \
* This value should not be mistaken for footprint of the
* cache in memory. The average cache entry is small, and
* the cache has a considerable overhead. Multiplying the
* index_size by two should yield a conservative estimate
* index_size by three should yield a conservative estimate
* of the cache's memory footprint.
*
* clean_index_size: Number of bytes of clean entries currently stored in
@ -2767,12 +2864,7 @@ if ( (cache_ptr)->index_size != \
*
* WARNING:
*
* 1) The clean_index_size field is not maintained by the
* index macros, as the hash table doesn't care whether
* the entry is clean or dirty. Instead the field is
* maintained in the H5C__UPDATE_RP macros.
*
* 2) The value of the clean_index_size must not be mistaken
* The value of the clean_index_size must not be mistaken
* for the current clean size of the cache. Rather, the
* clean size of the cache is the current value of
* clean_index_size plus the amount of empty space (if any)
@ -2784,13 +2876,6 @@ if ( (cache_ptr)->index_size != \
* Thus we should have the invariant that clean_index_size +
* dirty_index_size == index_size.
*
* WARNING:
*
* 1) The dirty_index_size field is not maintained by the
* index macros, as the hash table doesn't care whether
* the entry is clean or dirty. Instead the field is
* maintained in the H5C__UPDATE_RP macros.
*
* index: Array of pointer to H5C_cache_entry_t of size
* H5C__HASH_TABLE_LEN. At present, this value is a power
* of two, not the usual prime number.
@ -2806,6 +2891,37 @@ if ( (cache_ptr)->index_size != \
* changing the H5C__HASH_FCN macro and the deletion of the
* H5C__HASH_MASK #define. No other changes should be required.
*
* With the addition of the take ownership flag, it is possible that
* an entry may be removed from the cache as the result of the flush of
* a second entry. In general, this causes little trouble, but it is
* possible that the entry removed may be the next entry in the scan of
* a list. In this case, we must be able to detect the fact that the
* entry has been removed, so that the scan doesn't attempt to proceed with
* an entry that is no longer in the cache.
*
* The following fields are maintained to facilitate this.
*
* entries_removed_counter: Counter that is incremented each time an
* entry is removed from the cache by any means (eviction,
* expungement, or take ownership at this point in time).
* Functions that perform scans on lists may set this field
* to zero prior to calling H5C_flush_single_entry().
* Unexpected changes to the counter indicate that an entry
* was removed from the cache as a side effect of the flush.
*
* last_entry_removed_ptr: Pointer to the instance of H5C_cache_entry_t
* which contained the last entry to be removed from the cache,
* or NULL if there either is no such entry, or if a function
* performing a scan of a list has set this field to NULL prior
* to calling H5C_flush_single_entry().
*
* WARNING!!! This field must NEVER be dereferenced. It is
* maintained to allow functions that perform scans of lists
* to compare this pointer with their pointers to next, thus
* allowing them to avoid unnecessary restarts of scans if the
* pointers don't match, and if entries_removed_counter is
* one.
*
*
* With the addition of cache entry tagging, it is possible that
* an entry may be inserted into the cache without a tag during testing
@ -2825,6 +2941,21 @@ if ( (cache_ptr)->index_size != \
* are flushed. (this has been changed -- dirty entries are now removed from
* the skip list as they are flushed. JRM - 10/25/05)
*
* slist_changed: Boolean flag used to indicate whether the contents of
* the slist has changed since the last time this flag was
* reset. This is used in the cache flush code to detect
* conditions in which pre-serialize or serialize callbacks
* have modified the slist -- which obliges us to restart
* the scan of the slist from the beginning.
*
* slist_change_in_pre_serialize: Boolean flag used to indicate that
* a pre_serialize call has modified the slist since the
* last time this flag was reset.
*
* slist_change_in_serialize: Boolean flag used to indicate that
* a serialize call has modified the slist since the
* last time this flag was reset.
*
* slist_len: Number of entries currently in the skip list
* used to maintain a sorted list of dirty entries in the
* cache.
@ -2855,6 +2986,11 @@ if ( (cache_ptr)->index_size != \
* explicit tests for that case should be added when said
* HDasserts are removed.
*
* Update: There are now two possible last entries
* (superblock and file driver info message). This
* number will probably increase as we add superblock
* messages. JRM -- 11/18/14
*
* With the addition of the fractal heap, the cache must now deal with
* the case in which entries may be dirtied, moved, or have their sizes
* changed during a flush. To allow sanity checks in this situation, the
@ -2863,10 +2999,11 @@ if ( (cache_ptr)->index_size != \
*
* slist_len_increase: Number of entries that have been added to the
* slist since the last time this field was set to zero.
* Note that this value can be negative.
*
* slist_size_increase: Total size of all entries that have been added
* to the slist since the last time this field was set to
* zero.
* zero. Note that this value can be negative.
*
*
* When a cache entry is protected, it must be removed from the LRU
@ -2905,9 +3042,9 @@ if ( (cache_ptr)->index_size != \
* replacement policy code).
*
* 2) A pinned entry can be accessed or modified at any time.
* Therefore, the cache must check with the entry owner
* before flushing it. If permission is denied, the
* cache just skips the entry in the flush.
* This places an additional burden on the associated pre-serialize
* and serialize callbacks, which must ensure the the entry is in
* a consistant state before creating an image of it.
*
* 3) A pinned entry can be marked as dirty (and possibly
* change size) while it is unprotected.
@ -2965,7 +3102,8 @@ if ( (cache_ptr)->index_size != \
* be collective and the other processes will not know to participate.
*
* To deal with this issue, I have modified the usual LRU policy by adding
* clean and dirty LRU lists to the usual LRU list.
* clean and dirty LRU lists to the usual LRU list. In general, these
* lists are only exist in parallel builds.
*
* The clean LRU list is simply the regular LRU list with all dirty cache
* entries removed.
@ -2982,7 +3120,7 @@ if ( (cache_ptr)->index_size != \
*
* Even if we start with a completely clean cache, a sequence of protects
* without unprotects can empty the clean LRU list. In this case, the
* cache must grow temporarily. At the next write, we will attempt to
* cache must grow temporarily. At the next sync point, we will attempt to
* evict enough entries to reduce index_size to less than max_cache_size.
* While this will usually be possible, all bets are off if enough entries
* are protected.
@ -2992,14 +3130,14 @@ if ( (cache_ptr)->index_size != \
*
* LRU_list_len: Number of cache entries currently on the LRU list.
*
* Observe that LRU_list_len + pl_len must always equal
* index_len.
* Observe that LRU_list_len + pl_len + pel_len must always
* equal index_len.
*
* LRU_list_size: Number of bytes of cache entries currently residing on the
* LRU list.
*
* Observe that LRU_list_size + pl_size must always equal
* index_size.
* Observe that LRU_list_size + pl_size + pel_size must always
* equal index_size.
*
* LRU_head_ptr: Pointer to the head of the doubly linked LRU list. Cache
* entries on this list are linked by their next and prev fields.
@ -3244,6 +3382,11 @@ if ( (cache_ptr)->index_size != \
* equal to the array index has been evicted from the cache in
* the current epoch.
*
* take_ownerships: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The
* cells are used to record the number of times an entry with
* type id equal to the array index has been removed from the
* cache via the H5C__TAKE_OWNERSHIP_FLAG in the current epoch.
*
* moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1. The cells
* are used to record the number of times an entry with type
* id equal to the array index has been moved in the current
@ -3252,7 +3395,7 @@ if ( (cache_ptr)->index_size != \
* entry_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
* The cells are used to record the number of times an entry
* with type id equal to the array index has been moved
* during its flush callback in the current epoch.
* during its pre-serialize callback in the current epoch.
*
* cache_flush_moves: Array of int64 of length H5C__MAX_NUM_TYPE_IDS + 1.
* The cells are used to record the number of times an entry
@ -3297,7 +3440,8 @@ if ( (cache_ptr)->index_size != \
* entry_flush_size_changes: Array of int64 of length
* H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
* the number of times an entry with type id equal to the
* array index has changed size while in its flush callback.
* array index has changed size while in its pre-serialize
* callback.
*
* cache_flush_size_changes: Array of int64 of length
* H5C__MAX_NUM_TYPE_IDS + 1. The cells are used to record
@ -3370,7 +3514,33 @@ if ( (cache_ptr)->index_size != \
*
* entries_scanned_to_make_space: Number of entries scanned only when looking
* for entries to evict in order to make space in cache.
*
*
* As entries are now capable of moving, loading, dirtying, and deleting
* other entries in their pre_serialize and serialize callbacks, it has
* been necessary to insert code to restart scans of lists so as to avoid
* improper behavior if the next entry in the list is the target of one on
* these operations.
*
* The following fields are use to count such occurances. They are used
* both in tests (to verify that the scan has been restarted), and to
* obtain estimates of how frequently these restarts occur.
*
* slist_scan_restarts: Number of times a scan of the slist (that contains
* calls to H5C_flush_single_entry()) has been restarted to
* avoid potential issues with change of status of the next
* entry in the scan.
*
* LRU_scan_restarts: Number of times a scan of the LRU list (that contains
* calls to H5C_flush_single_entry()) has been restarted to
* avoid potential issues with change of status of the next
* entry in the scan.
*
* hash_bucket_scan_restarts: Number of times a scan of a hash bucket list
* (that contains calls to H5C_flush_single_entry()) has been
* restarted to avoid potential issues with change of status
* of the next entry in the scan.
*
* The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS
* and H5C_COLLECT_CACHE_ENTRY_STATS are true.
*
@ -3438,9 +3608,17 @@ struct H5C_t {
size_t dirty_index_size;
H5C_cache_entry_t * (index[H5C__HASH_TABLE_LEN]);
/* Fields to detect entries removed during scans */
int64_t entries_removed_counter;
H5C_cache_entry_t * last_entry_removed_ptr;
/* Field to disable tag validation */
hbool_t ignore_tags;
/* Fields for maintaining list of in-order entries, for flushing */
hbool_t slist_changed;
hbool_t slist_change_in_pre_serialize;
hbool_t slist_change_in_serialize;
int32_t slist_len;
size_t slist_size;
H5SL_t * slist_ptr;
@ -3515,6 +3693,7 @@ struct H5C_t {
int64_t clears[H5C__MAX_NUM_TYPE_IDS + 1];
int64_t flushes[H5C__MAX_NUM_TYPE_IDS + 1];
int64_t evictions[H5C__MAX_NUM_TYPE_IDS + 1];
int64_t take_ownerships[H5C__MAX_NUM_TYPE_IDS + 1];
int64_t moves[H5C__MAX_NUM_TYPE_IDS + 1];
int64_t entry_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
int64_t cache_flush_moves[H5C__MAX_NUM_TYPE_IDS + 1];
@ -3559,6 +3738,11 @@ struct H5C_t {
int32_t max_entries_skipped_in_msic;
int32_t max_entries_scanned_in_msic;
int64_t entries_scanned_to_make_space;
/* Fields for tracking skip list scan restarts */
int64_t slist_scan_restarts;
int64_t LRU_scan_restarts;
int64_t hash_bucket_scan_restarts;
#if H5C_COLLECT_CACHE_ENTRY_STATS
int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1];

File diff suppressed because it is too large Load Diff

View File

@ -253,7 +253,7 @@ H5D__layout_oh_create(H5F_t *file, hid_t dxpl_id, H5O_t *oh, H5D_t *dset,
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create EFL file name heap")
/* Pin the heap down in memory */
if(NULL == (heap = H5HL_protect(file, dxpl_id, efl->heap_addr, H5AC_WRITE)))
if(NULL == (heap = H5HL_protect(file, dxpl_id, efl->heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTPROTECT, FAIL, "unable to protect EFL file name heap")
/* Insert "empty" name first */

View File

@ -148,7 +148,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for extensible array info")
/* Lock the array header into memory */
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC_WRITE)))
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Point extensible array wrapper at header and bump it's ref count */
@ -209,7 +209,7 @@ H5EA_open(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata))
#ifdef QAK
HDfprintf(stderr, "%s: ea_addr = %a\n", FUNC, ea_addr);
#endif /* QAK */
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC_READ)))
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header, address = %llu", (unsigned long long)ea_addr)
/* Check for pending array deletion */
@ -333,7 +333,7 @@ END_FUNC(PRIV) /* end H5EA_get_addr() */
*/
BEGIN_FUNC(STATIC, ERR,
herr_t, SUCCEED, FAIL,
H5EA__lookup_elmt(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, H5AC_protect_t thing_acc,
H5EA__lookup_elmt(const H5EA_t *ea, hid_t dxpl_id, hsize_t idx, unsigned thing_acc,
void **thing, uint8_t **thing_elmt_buf, hsize_t *thing_elmt_idx,
H5EA__unprotect_func_t *thing_unprot_func))
@ -362,6 +362,9 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
HDassert(thing_elmt_buf);
HDassert(thing_unprot_func);
/* only the H5AC__READ_ONLY_FLAG may be set in thing_acc */
HDassert((thing_acc & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Set the shared array header's file context for this operation */
hdr->f = ea->f;
@ -377,7 +380,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
HDfprintf(stderr, "%s: Index block address not defined!\n", FUNC, idx);
#endif /* QAK */
/* Check if we are allowed to create the thing */
if(H5AC_WRITE == thing_acc) {
if(0 == (thing_acc & H5AC__READ_ONLY_FLAG)) { /* i.e. r/w access */
/* Create the index block */
hdr->idx_blk_addr = H5EA__iblock_create(hdr, dxpl_id, &stats_changed);
if(!H5F_addr_defined(hdr->idx_blk_addr))
@ -435,7 +438,7 @@ HDfprintf(stderr, "%s: dblk_idx = %u, iblock->ndblk_addrs = %Zu\n", FUNC, dblk_i
/* Check if the data block has been allocated on disk yet */
if(!H5F_addr_defined(iblock->dblk_addrs[dblk_idx])) {
/* Check if we are allowed to create the thing */
if(H5AC_WRITE == thing_acc) {
if(0 == (thing_acc & H5AC__READ_ONLY_FLAG)) { /* i.e. r/w access */
haddr_t dblk_addr; /* Address of data block created */
hsize_t dblk_off; /* Offset of data block in array */
@ -475,7 +478,7 @@ HDfprintf(stderr, "%s: dblk_idx = %u, iblock->ndblk_addrs = %Zu\n", FUNC, dblk_i
/* Check if the super block has been allocated on disk yet */
if(!H5F_addr_defined(iblock->sblk_addrs[sblk_off])) {
/* Check if we are allowed to create the thing */
if(H5AC_WRITE == thing_acc) {
if(0 == (thing_acc & H5AC__READ_ONLY_FLAG)) { /* i.e. r/w access */
haddr_t sblk_addr; /* Address of data block created */
/* Create super block */
@ -508,7 +511,7 @@ HDfprintf(stderr, "%s: dblk_idx = %u, sblock->ndblks = %Zu\n", FUNC, dblk_idx, s
/* Check if the data block has been allocated on disk yet */
if(!H5F_addr_defined(sblock->dblk_addrs[dblk_idx])) {
/* Check if we are allowed to create the thing */
if(H5AC_WRITE == thing_acc) {
if(0 == (thing_acc & H5AC__READ_ONLY_FLAG)) { /* i.e. r/w access */
haddr_t dblk_addr; /* Address of data block created */
hsize_t dblk_off; /* Offset of data block in array */
@ -568,7 +571,7 @@ HDfprintf(stderr, "%s: sblock->dblk_page_size = %Zu\n", FUNC, sblock->dblk_page_
/* Check if page has been initialized yet */
if(!H5VM_bit_get(sblock->page_init, page_init_idx)) {
/* Check if we are allowed to create the thing */
if(H5AC_WRITE == thing_acc) {
if(0 == (thing_acc & H5AC__READ_ONLY_FLAG)) { /* i.e. r/w access */
/* Create the data block page */
if(H5EA__dblk_page_create(hdr, dxpl_id, sblock, dblk_page_addr) < 0)
H5E_THROW(H5E_CANTCREATE, "unable to create data block page")
@ -677,7 +680,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
hdr->f = ea->f;
/* Look up the array metadata containing the element we want to set */
if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC_WRITE, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__NO_FLAGS_SET, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata")
/* Sanity check */
@ -762,7 +765,7 @@ HDfprintf(stderr, "%s: Index block address is: %a\n", FUNC, hdr->idx_blk_addr);
hdr->f = ea->f;
/* Look up the array metadata containing the element we want to set */
if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC_READ, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__READ_ONLY_FLAG, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata")
/* Check if the thing holding the element has been created yet */
@ -905,7 +908,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
HDassert(ea);
/* Look up the array metadata containing the element we want to set */
if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC_WRITE, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__NO_FLAGS_SET, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata")
/* Sanity check */
@ -960,7 +963,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
HDassert(ea);
/* Look up the array metadata containing the element we want to set */
if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC_READ, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
if(H5EA__lookup_elmt(ea, dxpl_id, idx, H5AC__READ_ONLY_FLAG, &thing, &thing_elmt_buf, &thing_elmt_idx, &thing_unprot_func) < 0)
H5E_THROW(H5E_CANTPROTECT, "unable to protect array metadata")
/* Sanity check */
@ -1048,7 +1051,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Lock the array header into memory */
/* (OK to pass in NULL for callback context, since we know the header must be in the cache) */
if(NULL == (hdr = H5EA__hdr_protect(ea->f, dxpl_id, ea_addr, NULL, H5AC_WRITE)))
if(NULL == (hdr = H5EA__hdr_protect(ea->f, dxpl_id, ea_addr, NULL, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTLOAD, "unable to load extensible array header")
/* Set the shared array header's file context for this operation */
@ -1112,7 +1115,7 @@ H5EA_delete(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata))
#ifdef QAK
HDfprintf(stderr, "%s: ea_addr = %a\n", FUNC, ea_addr);
#endif /* QAK */
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC_WRITE)))
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, ea_addr, ctx_udata, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array header, address = %llu", (unsigned long long)ea_addr)
/* Check for files using shared array header */

File diff suppressed because it is too large Load Diff

View File

@ -119,7 +119,7 @@ H5EA__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
} /* end if */
/* Load the extensible array header */
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, addr, dbg_ctx, H5AC_READ)))
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, addr, dbg_ctx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Print opening message */
@ -218,14 +218,14 @@ H5EA__iblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t H5_ATTR_UNUSED addr, FILE *s
} /* end if */
/* Load the extensible array header */
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC_READ)))
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Sanity check */
HDassert(H5F_addr_eq(hdr->idx_blk_addr, addr));
/* Protect index block */
if(NULL == (iblock = H5EA__iblock_protect(hdr, dxpl_id, H5AC_READ)))
if(NULL == (iblock = H5EA__iblock_protect(hdr, dxpl_id, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array index block, address = %llu", (unsigned long long)hdr->idx_blk_addr)
/* Print opening message */
@ -343,12 +343,12 @@ H5EA__sblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
} /* end if */
/* Load the extensible array header */
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC_READ)))
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Protect super block */
/* (Note: setting parent of super block to 'hdr' for this operation should be OK -QAK) */
if(NULL == (sblock = H5EA__sblock_protect(hdr, dxpl_id, (H5EA_iblock_t *)hdr, addr, sblk_idx, H5AC_READ)))
if(NULL == (sblock = H5EA__sblock_protect(hdr, dxpl_id, (H5EA_iblock_t *)hdr, addr, sblk_idx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array super block, address = %llu", (unsigned long long)addr)
/* Print opening message */
@ -437,12 +437,12 @@ H5EA__dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
} /* end if */
/* Load the extensible array header */
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC_READ)))
if(NULL == (hdr = H5EA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load extensible array header")
/* Protect data block */
/* (Note: setting parent of data block to 'hdr' for this operation should be OK -QAK) */
if(NULL == (dblock = H5EA__dblock_protect(hdr, dxpl_id, hdr, addr, dblk_nelmts, H5AC_READ)))
if(NULL == (dblock = H5EA__dblock_protect(hdr, dxpl_id, hdr, addr, dblk_nelmts, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array data block, address = %llu", (unsigned long long)addr)
/* Print opening message */

View File

@ -210,7 +210,7 @@ END_FUNC(PKG) /* end H5EA__dblk_page_create() */
BEGIN_FUNC(PKG, ERR,
H5EA_dblk_page_t *, NULL, NULL,
H5EA__dblk_page_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_sblock_t *parent,
haddr_t dblk_page_addr, H5AC_protect_t rw))
haddr_t dblk_page_addr, unsigned flags))
/* Local variables */
H5EA_dblk_page_cache_ud_t udata; /* Information needed for loading data block page */
@ -223,12 +223,16 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(hdr);
HDassert(H5F_addr_defined(dblk_page_addr));
/* only the H5AC__READ_ONLY_FLAG may be set */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Set up user data */
udata.hdr = hdr;
udata.parent = parent;
udata.dblk_page_addr = dblk_page_addr;
/* Protect the data block page */
if(NULL == (ret_value = (H5EA_dblk_page_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_DBLK_PAGE, dblk_page_addr, &udata, rw)))
if(NULL == (ret_value = (H5EA_dblk_page_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_DBLK_PAGE, dblk_page_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array data block page, address = %llu", (unsigned long long)dblk_page_addr)
CATCH

View File

@ -304,7 +304,7 @@ END_FUNC(PKG) /* end H5EA__dblock_sblk_idx() */
BEGIN_FUNC(PKG, ERR,
H5EA_dblock_t *, NULL, NULL,
H5EA__dblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent,
haddr_t dblk_addr, size_t dblk_nelmts, H5AC_protect_t rw))
haddr_t dblk_addr, size_t dblk_nelmts, unsigned flags))
/* Local variables */
H5EA_dblock_cache_ud_t udata; /* Information needed for loading data block */
@ -318,13 +318,17 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(H5F_addr_defined(dblk_addr));
HDassert(dblk_nelmts);
/* only the H5AC__READ_ONLY_FLAG may be set */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Set up user data */
udata.hdr = hdr;
udata.parent = parent;
udata.nelmts = dblk_nelmts;
udata.dblk_addr = dblk_addr;
/* Protect the data block */
if(NULL == (ret_value = (H5EA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_DBLOCK, dblk_addr, &udata, rw)))
if(NULL == (ret_value = (H5EA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_DBLOCK, dblk_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array data block, address = %llu", (unsigned long long)dblk_addr)
CATCH
@ -399,7 +403,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(dblk_nelmts > 0);
/* Protect data block */
if(NULL == (dblock = H5EA__dblock_protect(hdr, dxpl_id, parent, dblk_addr, dblk_nelmts, H5AC_WRITE)))
if(NULL == (dblock = H5EA__dblock_protect(hdr, dxpl_id, parent, dblk_addr, dblk_nelmts, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array data block, address = %llu", (unsigned long long)dblk_addr)
/* Check if this is a paged data block */

View File

@ -627,16 +627,25 @@ END_FUNC(PKG) /* end H5EA__hdr_modified() */
BEGIN_FUNC(PKG, ERR,
H5EA_hdr_t *, NULL, NULL,
H5EA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr, void *ctx_udata,
H5AC_protect_t rw))
unsigned flags))
/* Local variables */
H5EA_hdr_cache_ud_t udata; /* User data for cache callbacks */
/* Sanity check */
HDassert(f);
HDassert(H5F_addr_defined(ea_addr));
/* only the H5AC__READ_ONLY_FLAG may appear in flags */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Set up user data for cache callbacks */
udata.f = f;
udata.addr = ea_addr;
udata.ctx_udata = ctx_udata;
/* Protect the header */
if(NULL == (ret_value = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, ctx_udata, rw)))
if(NULL == (ret_value = (H5EA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_EARRAY_HDR, ea_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array header, address = %llu", (unsigned long long)ea_addr)
CATCH

View File

@ -279,7 +279,7 @@ END_FUNC(PKG) /* end H5EA__iblock_create() */
*/
BEGIN_FUNC(PKG, ERR,
H5EA_iblock_t *, NULL, NULL,
H5EA__iblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, H5AC_protect_t rw))
H5EA__iblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, unsigned flags))
#ifdef QAK
HDfprintf(stderr, "%s: Called\n", FUNC);
@ -288,8 +288,11 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Sanity check */
HDassert(hdr);
/* only the H5AC__READ_ONLY_FLAG may be set */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Protect the index block */
if(NULL == (ret_value = (H5EA_iblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_IBLOCK, hdr->idx_blk_addr, hdr, rw)))
if(NULL == (ret_value = (H5EA_iblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_IBLOCK, hdr->idx_blk_addr, hdr, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array index block, address = %llu", (unsigned long long)hdr->idx_blk_addr)
CATCH
@ -361,7 +364,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(H5F_addr_defined(hdr->idx_blk_addr));
/* Protect index block */
if(NULL == (iblock = H5EA__iblock_protect(hdr, dxpl_id, H5AC_WRITE)))
if(NULL == (iblock = H5EA__iblock_protect(hdr, dxpl_id, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array index block, address = %llu", (unsigned long long)hdr->idx_blk_addr)
/* Check for index block having data block pointers */
@ -436,7 +439,6 @@ H5EA__iblock_dest(H5EA_iblock_t *iblock))
/* Sanity check */
HDassert(iblock);
HDassert(iblock->rc == 0);
/* Check if shared header field has been initialized */
if(iblock->hdr) {

View File

@ -229,7 +229,6 @@ typedef struct H5EA_iblock_t {
haddr_t *sblk_addrs; /* Buffer for addresses of super blocks in index block */
/* Internal array information (not stored) */
size_t rc; /* Reference count of objects using this block */
H5EA_hdr_t *hdr; /* Shared array header info */
haddr_t addr; /* Address of this index block on disk */
size_t size; /* Size of index block on disk */
@ -251,7 +250,6 @@ typedef struct H5EA_sblock_t {
uint8_t *page_init; /* Bitmap of whether a data block page is initialized */
/* Internal array information (not stored) */
size_t rc; /* Reference count of objects using this block */
H5EA_hdr_t *hdr; /* Shared array header info */
H5EA_iblock_t *parent; /* Parent object for super block (index block) */
haddr_t addr; /* Address of this index block on disk */
@ -312,11 +310,19 @@ struct H5EA_t {
/* Metadata cache callback user data types */
/* Info needed for loading header */
typedef struct H5EA_hdr_cache_ud_t {
H5F_t *f; /* Pointer to file for extensible array */
haddr_t addr; /* Address of header on disk */
void *ctx_udata; /* User context for class */
} H5EA_hdr_cache_ud_t;
/* Info needed for loading super block */
typedef struct H5EA_sblock_cache_ud_t {
H5EA_hdr_t *hdr; /* Shared extensible array information */
H5EA_iblock_t *parent; /* Pointer to parent object for super block (index block) */
unsigned sblk_idx; /* Index of super block */
haddr_t sblk_addr; /* Address of super block */
} H5EA_sblock_cache_ud_t;
/* Info needed for loading data block */
@ -324,12 +330,14 @@ typedef struct H5EA_dblock_cache_ud_t {
H5EA_hdr_t *hdr; /* Shared extensible array information */
void *parent; /* Pointer to parent object for data block (index or super block) */
size_t nelmts; /* Number of elements in data block */
haddr_t dblk_addr; /* Address of data block */
} H5EA_dblock_cache_ud_t;
/* Info needed for loading data block page */
typedef struct H5EA_dblk_page_cache_ud_t {
H5EA_hdr_t *hdr; /* Shared extensible array information */
H5EA_sblock_t *parent; /* Pointer to parent object for data block page (super block) */
haddr_t dblk_page_addr; /* Address of data block page */
} H5EA_dblk_page_cache_ud_t;
#ifdef H5EA_TESTING
@ -388,7 +396,7 @@ H5_DLL herr_t H5EA__hdr_fuse_incr(H5EA_hdr_t *hdr);
H5_DLL size_t H5EA__hdr_fuse_decr(H5EA_hdr_t *hdr);
H5_DLL herr_t H5EA__hdr_modified(H5EA_hdr_t *hdr);
H5_DLL H5EA_hdr_t *H5EA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t ea_addr,
void *ctx_udata, H5AC_protect_t rw);
void *ctx_udata, unsigned flags);
H5_DLL herr_t H5EA__hdr_unprotect(H5EA_hdr_t *hdr, hid_t dxpl_id, unsigned cache_flags);
H5_DLL herr_t H5EA__hdr_delete(H5EA_hdr_t *hdr, hid_t dxpl_id);
H5_DLL herr_t H5EA__hdr_dest(H5EA_hdr_t *hdr);
@ -398,7 +406,7 @@ H5_DLL H5EA_iblock_t *H5EA__iblock_alloc(H5EA_hdr_t *hdr);
H5_DLL haddr_t H5EA__iblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id,
hbool_t *stats_changed);
H5_DLL H5EA_iblock_t *H5EA__iblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id,
H5AC_protect_t rw);
unsigned flags);
H5_DLL herr_t H5EA__iblock_unprotect(H5EA_iblock_t *iblock, hid_t dxpl_id,
unsigned cache_flags);
H5_DLL herr_t H5EA__iblock_delete(H5EA_hdr_t *hdr, hid_t dxpl_id);
@ -410,7 +418,8 @@ H5_DLL H5EA_sblock_t *H5EA__sblock_alloc(H5EA_hdr_t *hdr, H5EA_iblock_t *parent,
H5_DLL haddr_t H5EA__sblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id,
H5EA_iblock_t *parent, hbool_t *stats_changed, unsigned sblk_idx);
H5_DLL H5EA_sblock_t *H5EA__sblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id,
H5EA_iblock_t *parent, haddr_t sblk_addr, unsigned sblk_idx, H5AC_protect_t rw);
H5EA_iblock_t *parent, haddr_t sblk_addr, unsigned sblk_idx,
unsigned flags);
H5_DLL herr_t H5EA__sblock_unprotect(H5EA_sblock_t *sblock, hid_t dxpl_id,
unsigned cache_flags);
H5_DLL herr_t H5EA__sblock_delete(H5EA_hdr_t *hdr, hid_t dxpl_id,
@ -424,7 +433,7 @@ H5_DLL haddr_t H5EA__dblock_create(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent,
hbool_t *stats_changed, hsize_t dblk_off, size_t nelmts);
H5_DLL unsigned H5EA__dblock_sblk_idx(const H5EA_hdr_t *hdr, hsize_t idx);
H5_DLL H5EA_dblock_t *H5EA__dblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id,
void *parent, haddr_t dblk_addr, size_t dblk_nelmts, H5AC_protect_t rw);
void *parent, haddr_t dblk_addr, size_t dblk_nelmts, unsigned flags);
H5_DLL herr_t H5EA__dblock_unprotect(H5EA_dblock_t *dblock, hid_t dxpl_id,
unsigned cache_flags);
H5_DLL herr_t H5EA__dblock_delete(H5EA_hdr_t *hdr, hid_t dxpl_id, void *parent,
@ -436,7 +445,7 @@ H5_DLL H5EA_dblk_page_t *H5EA__dblk_page_alloc(H5EA_hdr_t *hdr, H5EA_sblock_t *p
H5_DLL herr_t H5EA__dblk_page_create(H5EA_hdr_t *hdr, hid_t dxpl_id,
H5EA_sblock_t *parent, haddr_t addr);
H5_DLL H5EA_dblk_page_t *H5EA__dblk_page_protect(H5EA_hdr_t *hdr, hid_t dxpl_id,
H5EA_sblock_t *parent, haddr_t dblk_page_addr, H5AC_protect_t rw);
H5EA_sblock_t *parent, haddr_t dblk_page_addr, unsigned flags);
H5_DLL herr_t H5EA__dblk_page_unprotect(H5EA_dblk_page_t *dblk_page,
hid_t dxpl_id, unsigned cache_flags);
H5_DLL herr_t H5EA__dblk_page_dest(H5EA_dblk_page_t *dblk_page);

View File

@ -276,7 +276,7 @@ END_FUNC(PKG) /* end H5EA__sblock_create() */
BEGIN_FUNC(PKG, ERR,
H5EA_sblock_t *, NULL, NULL,
H5EA__sblock_protect(H5EA_hdr_t *hdr, hid_t dxpl_id, H5EA_iblock_t *parent,
haddr_t sblk_addr, unsigned sblk_idx, H5AC_protect_t rw))
haddr_t sblk_addr, unsigned sblk_idx, unsigned flags))
/* Local variables */
H5EA_sblock_cache_ud_t udata; /* Information needed for loading super block */
@ -290,13 +290,17 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(hdr);
HDassert(H5F_addr_defined(sblk_addr));
/* only the H5AC__READ_ONLY_FLAG may be set */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Set up user data */
udata.hdr = hdr;
udata.parent = parent;
udata.sblk_idx = sblk_idx;
udata.sblk_addr = sblk_addr;
/* Protect the super block */
if(NULL == (ret_value = (H5EA_sblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_SBLOCK, sblk_addr, &udata, rw)))
if(NULL == (ret_value = (H5EA_sblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_EARRAY_SBLOCK, sblk_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array super block, address = %llu", (unsigned long long)sblk_addr)
CATCH
@ -370,7 +374,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(H5F_addr_defined(sblk_addr));
/* Protect super block */
if(NULL == (sblock = H5EA__sblock_protect(hdr, dxpl_id, parent, sblk_addr, sblk_idx, H5AC_WRITE)))
if(NULL == (sblock = H5EA__sblock_protect(hdr, dxpl_id, parent, sblk_addr, sblk_idx, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect extensible array super block, address = %llu", (unsigned long long)sblk_addr)
/* Iterate over data blocks */
@ -412,7 +416,6 @@ H5EA__sblock_dest(H5EA_sblock_t *sblock))
/* Sanity check */
HDassert(sblock);
HDassert(sblock->rc == 0);
#ifdef QAK
HDfprintf(stderr, "%s: sblock->hdr->dblk_page_nelmts = %Zu, sblock->ndblks = %Zu, sblock->dblk_nelmts = %Zu\n", FUNC, sblock->hdr->dblk_page_nelmts, sblock->ndblks, sblock->dblk_nelmts);
#endif /* QAK */

View File

@ -141,7 +141,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
H5E_THROW(H5E_CANTALLOC, "memory allocation failed for fixed array info")
/* Lock the array header into memory */
if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC_WRITE)))
if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to load fixed array header")
/* Point fixed array wrapper at header and bump it's ref count */
@ -201,7 +201,7 @@ H5FA_open(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata))
#ifdef H5FA_DEBUG
HDfprintf(stderr, "%s: fa_addr = %a\n", FUNC, fa_addr);
#endif /* H5FA_DEBUG */
if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC_READ)))
if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load fixed array header, address = %llu", (unsigned long long)fa_addr)
/* Check for pending array deletion */
@ -359,7 +359,7 @@ HDfprintf(stderr, "%s: fixed array data block address not defined!\n", FUNC, idx
HDassert(idx < hdr->cparam.nelmts);
/* Protect data block */
if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, hdr->dblk_addr, H5AC_WRITE)))
if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, hdr->dblk_addr, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)hdr->dblk_addr)
/* Check for paging data block */
@ -400,7 +400,7 @@ HDfprintf(stderr, "%s: fixed array data block address not defined!\n", FUNC, idx
} /* end if */
/* Protect the data block page */
if(NULL == (dblk_page = H5FA__dblk_page_protect(hdr, dxpl_id, dblk_page_addr, dblk_page_nelmts, H5AC_WRITE)))
if(NULL == (dblk_page = H5FA__dblk_page_protect(hdr, dxpl_id, dblk_page_addr, dblk_page_nelmts, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block page, address = %llu", (unsigned long long)dblk_page_addr)
/* Set the element in the data block page */
@ -467,7 +467,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
else {
/* Get the data block */
HDassert(H5F_addr_defined(hdr->dblk_addr));
if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, hdr->dblk_addr, H5AC_READ)))
if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, hdr->dblk_addr, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)hdr->dblk_addr)
/* Check for paged data block */
@ -507,7 +507,7 @@ HDfprintf(stderr, "%s: Index %Hu\n", FUNC, idx);
dblk_page_nelmts = dblock->dblk_page_nelmts;
/* Protect the data block page */
if(NULL == (dblk_page = H5FA__dblk_page_protect(hdr, dxpl_id, dblk_page_addr, dblk_page_nelmts, H5AC_READ)))
if(NULL == (dblk_page = H5FA__dblk_page_protect(hdr, dxpl_id, dblk_page_addr, dblk_page_nelmts, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block page, address = %llu", (unsigned long long)dblk_page_addr)
/* Retrieve element from data block */
@ -592,7 +592,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
/* Lock the array header into memory */
/* (OK to pass in NULL for callback context, since we know the header must be in the cache) */
if(NULL == (hdr = H5FA__hdr_protect(fa->f, dxpl_id, fa_addr, NULL, H5AC_WRITE)))
if(NULL == (hdr = H5FA__hdr_protect(fa->f, dxpl_id, fa_addr, NULL, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTLOAD, "unable to load fixed array header")
/* Set the shared array header's file context for this operation */
@ -655,7 +655,7 @@ H5FA_delete(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata))
#ifdef H5FA_DEBUG
HDfprintf(stderr, "%s: fa_addr = %a\n", FUNC, fa_addr);
#endif /* H5FA_DEBUG */
if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC_WRITE)))
if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, fa_addr, ctx_udata, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array header, address = %llu", (unsigned long long)fa_addr)
/* Check for files using shared array header */

File diff suppressed because it is too large Load Diff

View File

@ -117,7 +117,7 @@ H5FA__hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
} /* end if */
/* Load the fixed array header */
if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, addr, dbg_ctx, H5AC_READ)))
if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, addr, dbg_ctx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load fixed array header")
/* Print opening message */
@ -198,11 +198,11 @@ H5FA__dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
} /* end if */
/* Load the fixed array header */
if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC_READ)))
if(NULL == (hdr = H5FA__hdr_protect(f, dxpl_id, hdr_addr, dbg_ctx, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to load fixed array header")
/* Protect data block */
if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, addr, H5AC_READ)))
if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, addr, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)addr)
/* Print opening message */
@ -246,7 +246,7 @@ H5FA__dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int inde
if(((page_idx + 1) == dblock->npages) && (nelmts_left = hdr->cparam.nelmts % dblock->dblk_page_nelmts))
dblk_page_nelmts = (size_t)nelmts_left;
if(NULL == (dblk_page = H5FA__dblk_page_protect(hdr, dxpl_id, dblk_page_addr, dblk_page_nelmts, H5AC_READ)))
if(NULL == (dblk_page = H5FA__dblk_page_protect(hdr, dxpl_id, dblk_page_addr, dblk_page_nelmts, H5AC__READ_ONLY_FLAG)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block page, address = %llu", (unsigned long long)dblk_page_addr)
HDfprintf(stream, "%*sElements in page %Zu:\n", indent, "", page_idx);

View File

@ -207,7 +207,7 @@ END_FUNC(PKG) /* end H5FA__dblk_page_create() */
BEGIN_FUNC(PKG, ERR,
H5FA_dblk_page_t *, NULL, NULL,
H5FA__dblk_page_protect(H5FA_hdr_t *hdr, hid_t dxpl_id, haddr_t dblk_page_addr,
size_t dblk_page_nelmts, H5AC_protect_t rw))
size_t dblk_page_nelmts, unsigned flags))
/* Local variables */
H5FA_dblk_page_cache_ud_t udata; /* Information needed for loading data block page */
@ -220,12 +220,16 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(hdr);
HDassert(H5F_addr_defined(dblk_page_addr));
/* only the H5AC__READ_ONLY_FLAG is permitted */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Set up user data */
udata.hdr = hdr;
udata.nelmts = dblk_page_nelmts;
udata.dblk_page_addr = dblk_page_addr;
/* Protect the data block page */
if(NULL == (ret_value = (H5FA_dblk_page_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLK_PAGE, dblk_page_addr, &udata, rw)))
if(NULL == (ret_value = (H5FA_dblk_page_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLK_PAGE, dblk_page_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block page, address = %llu", (unsigned long long)dblk_page_addr)
CATCH

View File

@ -263,7 +263,7 @@ END_FUNC(PKG) /* end H5FA__dblock_create() */
BEGIN_FUNC(PKG, ERR,
H5FA_dblock_t *, NULL, NULL,
H5FA__dblock_protect(H5FA_hdr_t *hdr, hid_t dxpl_id, haddr_t dblk_addr,
H5AC_protect_t rw))
unsigned flags))
/* Local variables */
H5FA_dblock_cache_ud_t udata; /* Information needed for loading data block */
@ -276,11 +276,15 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(hdr);
HDassert(H5F_addr_defined(dblk_addr));
/* only the H5AC__READ_ONLY_FLAG flag is permitted */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Set up user data */
udata.hdr = hdr;
udata.dblk_addr = dblk_addr;
/* Protect the data block */
if(NULL == (ret_value = (H5FA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLOCK, dblk_addr, &udata, rw)))
if(NULL == (ret_value = (H5FA_dblock_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FARRAY_DBLOCK, dblk_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)dblk_addr)
CATCH
@ -350,7 +354,7 @@ HDfprintf(stderr, "%s: Called\n", FUNC);
HDassert(H5F_addr_defined(dblk_addr));
/* Protect data block */
if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, dblk_addr, H5AC_WRITE)))
if(NULL == (dblock = H5FA__dblock_protect(hdr, dxpl_id, dblk_addr, H5AC__NO_FLAGS_SET)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array data block, address = %llu", (unsigned long long)dblk_addr)
/* Check if data block is paged */

View File

@ -408,16 +408,25 @@ END_FUNC(PKG) /* end H5FA__hdr_modified() */
BEGIN_FUNC(PKG, ERR,
H5FA_hdr_t *, NULL, NULL,
H5FA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr, void *ctx_udata,
H5AC_protect_t rw))
unsigned flags))
/* Local variables */
H5FA_hdr_cache_ud_t udata; /* User data for cache callbacks */
/* Sanity check */
HDassert(f);
HDassert(H5F_addr_defined(fa_addr));
/* only the H5AC__READ_ONLY_FLAG is permitted */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Set up user data for cache callbacks */
udata.f = f;
udata.addr = fa_addr;
udata.ctx_udata = ctx_udata;
/* Protect the header */
if(NULL == (ret_value = (H5FA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, ctx_udata, rw)))
if(NULL == (ret_value = (H5FA_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FARRAY_HDR, fa_addr, &udata, flags)))
H5E_THROW(H5E_CANTPROTECT, "unable to protect fixed array header, address = %llu", (unsigned long long)fa_addr)
CATCH

View File

@ -206,15 +206,24 @@ struct H5FA_t {
/* Metadata cache callback user data types */
/* Info needed for loading header */
typedef struct H5FA_hdr_cache_ud_t {
H5F_t *f; /* Pointer to file for fixed array */
haddr_t addr; /* Address of header on disk */
void *ctx_udata; /* User context for class */
} H5FA_hdr_cache_ud_t;
/* Info needed for loading data block */
typedef struct H5FA_dblock_cache_ud_t {
H5FA_hdr_t *hdr; /* Shared fixed array information */
haddr_t dblk_addr; /* Address of data block on disk */
} H5FA_dblock_cache_ud_t;
/* Info needed for loading data block page */
typedef struct H5FA_dblk_page_cache_ud_t {
H5FA_hdr_t *hdr; /* Shared fixed array information */
size_t nelmts; /* Number of elements in data block page */
haddr_t dblk_page_addr; /* Address of data block page on disk */
} H5FA_dblk_page_cache_ud_t;
@ -254,7 +263,7 @@ H5_DLL herr_t H5FA__hdr_fuse_incr(H5FA_hdr_t *hdr);
H5_DLL size_t H5FA__hdr_fuse_decr(H5FA_hdr_t *hdr);
H5_DLL herr_t H5FA__hdr_modified(H5FA_hdr_t *hdr);
H5_DLL H5FA_hdr_t *H5FA__hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t fa_addr,
void *ctx_udata, H5AC_protect_t rw);
void *ctx_udata, unsigned flags);
H5_DLL herr_t H5FA__hdr_unprotect(H5FA_hdr_t *hdr, hid_t dxpl_id, unsigned cache_flags);
H5_DLL herr_t H5FA__hdr_delete(H5FA_hdr_t *hdr, hid_t dxpl_id);
H5_DLL herr_t H5FA__hdr_dest(H5FA_hdr_t *hdr);
@ -264,7 +273,7 @@ H5_DLL H5FA_dblock_t *H5FA__dblock_alloc(H5FA_hdr_t *hdr);
H5_DLL haddr_t H5FA__dblock_create(H5FA_hdr_t *hdr, hid_t dxpl_id, hbool_t *hdr_dirty);
H5_DLL unsigned H5FA__dblock_sblk_idx(const H5FA_hdr_t *hdr, hsize_t idx);
H5_DLL H5FA_dblock_t *H5FA__dblock_protect(H5FA_hdr_t *hdr, hid_t dxpl_id,
haddr_t dblk_addr, H5AC_protect_t rw);
haddr_t dblk_addr, unsigned flags);
H5_DLL herr_t H5FA__dblock_unprotect(H5FA_dblock_t *dblock, hid_t dxpl_id,
unsigned cache_flags);
H5_DLL herr_t H5FA__dblock_delete(H5FA_hdr_t *hdr, hid_t dxpl_id,
@ -276,7 +285,7 @@ H5_DLL herr_t H5FA__dblk_page_create(H5FA_hdr_t *hdr, hid_t dxpl_id,
haddr_t addr, size_t nelmts);
H5_DLL H5FA_dblk_page_t *H5FA__dblk_page_alloc(H5FA_hdr_t *hdr, size_t nelmts);
H5_DLL H5FA_dblk_page_t *H5FA__dblk_page_protect(H5FA_hdr_t *hdr, hid_t dxpl_id,
haddr_t dblk_page_addr, size_t dblk_page_nelmts, H5AC_protect_t rw);
haddr_t dblk_page_addr, size_t dblk_page_nelmts, unsigned flags);
H5_DLL herr_t H5FA__dblk_page_unprotect(H5FA_dblk_page_t *dblk_page,
hid_t dxpl_id, unsigned cache_flags);
H5_DLL herr_t H5FA__dblk_page_dest(H5FA_dblk_page_t *dblk_page);

View File

@ -905,7 +905,7 @@ H5FD_family_query(const H5FD_t * _file, unsigned long *flags /* out */)
/* Check for flags that are set by h5repart */
if(file && file->repart_members)
*flags |= H5FD_FEAT_DIRTY_SBLK_LOAD; /* Mark the superblock dirty when it is loaded (so the family member sizes are rewritten) */
*flags |= H5FD_FEAT_DIRTY_DRVRINFO_LOAD; /* Mark the superblock dirty when it is loaded (so the family member sizes are rewritten) */
} /* end if */
FUNC_LEAVE_NOAPI(SUCCEED)

View File

@ -198,12 +198,12 @@ typedef enum H5F_mem_t H5FD_mem_t;
*/
#define H5FD_FEAT_IGNORE_DRVRINFO 0x00000020
/*
* Defining the H5FD_FEAT_DIRTY_SBLK_LOAD for a VFL driver means that
* the library will mark the superblock dirty when the file is opened
* Defining the H5FD_FEAT_DIRTY_DRVRINFO_LOAD for a VFL driver means that
* the library will mark the driver info dirty when the file is opened
* R/W. This will cause the driver info to be re-encoded when the file
* is flushed/closed.
*/
#define H5FD_FEAT_DIRTY_SBLK_LOAD 0x00000040
#define H5FD_FEAT_DIRTY_DRVRINFO_LOAD 0x00000040
/*
* Defining the H5FD_FEAT_POSIX_COMPAT_HANDLE for a VFL driver means that
* the handle for the VFD (returned with the 'get_handle' callback) is

View File

@ -216,7 +216,7 @@ HDfprintf(stderr, "%s: Opening free space manager, fs_addr = %a, nclasses = %Zu\
cache_udata.addr = fs_addr;
/* Protect the free space header */
if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC_READ)))
if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, NULL, "unable to load free space header")
#ifdef H5FS_DEBUG
HDfprintf(stderr, "%s: fspace->sect_addr = %a\n", FUNC, fspace->sect_addr);
@ -329,7 +329,7 @@ HDfprintf(stderr, "%s: Deleting free space manager, fs_addr = %a\n", FUNC, fs_ad
#endif /* H5FS_DEBUG */
/* Protect the free space header */
if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC_WRITE)))
if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, FAIL, "unable to protect free space header")
/* Sanity check */
@ -361,8 +361,19 @@ HDfprintf(stderr, "%s: Expunging free space section info from cache\n", FUNC);
#endif /* H5FS_DEBUG */
/* Evict the free space section info from the metadata cache */
/* (Free file space) */
if(H5AC_expunge_entry(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, H5AC__FREE_FILE_SPACE_FLAG) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "unable to remove free space section info from cache")
{
unsigned cache_flags = H5AC__NO_FLAGS_SET;
/* if the indirect block is in real file space, tell
* the cache to free its file space.
*/
if (!H5F_IS_TMP_ADDR(f, fspace->sect_addr))
cache_flags |= H5AC__FREE_FILE_SPACE_FLAG;
if(H5AC_expunge_entry(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, cache_flags) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "unable to remove free space section info from cache")
}
#ifdef H5FS_DEBUG
HDfprintf(stderr, "%s: Done expunging free space section info from cache\n", FUNC);
#endif /* H5FS_DEBUG */
@ -938,7 +949,7 @@ H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id)
cache_udata.f = f;
cache_udata.dxpl_id = dxpl_id;
cache_udata.fspace = fspace;
if(NULL == (fspace->sinfo = (H5FS_sinfo_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, &cache_udata, H5AC_READ)))
if(NULL == (fspace->sinfo = (H5FS_sinfo_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, FAIL, "unable to protect free space section info")
/* Unload and release ownership of the free-space manager section info */
@ -979,7 +990,7 @@ H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id)
cache_udata.nclasses = 0;
cache_udata.classes = NULL;
cache_udata.cls_init_udata = NULL;
if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fspace->addr, &cache_udata, H5AC_READ)))
if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fspace->addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, FAIL, "unable to protect free space section info")
/* Unpin the free-space manager header */

File diff suppressed because it is too large Load Diff

View File

@ -121,7 +121,7 @@ H5FS_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int
/*
* Load the free space header.
*/
if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, addr, &cache_udata, H5AC_READ)))
if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, FAIL, "unable to load free space header")
/* Print opening message */
@ -263,7 +263,7 @@ H5FS_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t H5_ATTR_UNUSED addr, FILE *str
/*
* Load the free space header.
*/
if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC_READ)))
if(NULL == (fspace = (H5FS_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, FAIL, "unable to load free space header")
/* Retrieve the client id */

View File

@ -182,7 +182,9 @@ struct H5FS_t {
unsigned sinfo_lock_count; /* # of times the section info has been locked */
hbool_t sinfo_protected; /* Whether the section info was protected when locked */
hbool_t sinfo_modified; /* Whether the section info has been modified while locked */
H5AC_protect_t sinfo_accmode; /* Access mode for protecting the section info */
unsigned sinfo_accmode; /* Access mode for protecting the section info */
/* must be either H5C__NO_FLAGS_SET (i.e r/w) */
/* or H5AC__READ_ONLY_FLAG (i.e. r/o). */
size_t max_cls_serial_size; /* Max. additional size of serialized form of section */
hsize_t threshold; /* Threshold for alignment */
hsize_t alignment; /* Alignment */

View File

@ -204,7 +204,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
H5FS_sinfo_lock(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace, H5AC_protect_t accmode)
H5FS_sinfo_lock(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace, unsigned accmode)
{
H5FS_sinfo_cache_ud_t cache_udata; /* User-data for cache callback */
herr_t ret_value = SUCCEED; /* Return value */
@ -220,14 +220,21 @@ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n"
HDassert(f);
HDassert(fspace);
/* only H5AC__READ_ONLY_FLAG may appear in accmode */
HDassert((accmode & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* If the free space header doesn't already "own" the section info, load
* section info or create it
*/
if(fspace->sinfo) {
/* Check if the section info was protected & we want a different access mode */
/* only H5AC__READ_ONLY_FLAG may appear in fspace->sinfo_accmode */
HDassert(((fspace->sinfo_accmode) & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
if(fspace->sinfo_protected && accmode != fspace->sinfo_accmode) {
/* Check if we need to switch from read-only access to read-write */
if(H5AC_WRITE == accmode) {
if(0 == (accmode & (unsigned)(~H5AC__READ_ONLY_FLAG))) {
/* Unprotect the read-only section info */
if(H5AC_unprotect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, fspace->sinfo, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTUNPROTECT, FAIL, "unable to release free space section info")
@ -236,11 +243,11 @@ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n"
cache_udata.f = f;
cache_udata.dxpl_id = dxpl_id;
cache_udata.fspace = fspace;
if(NULL == (fspace->sinfo = (H5FS_sinfo_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, &cache_udata, H5AC_WRITE)))
if(NULL == (fspace->sinfo = (H5FS_sinfo_t *)H5AC_protect(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTPROTECT, FAIL, "unable to load free space sections")
/* Switch the access mode we have */
fspace->sinfo_accmode = H5AC_WRITE;
fspace->sinfo_accmode = H5AC__NO_FLAGS_SET;
} /* end if */
} /* end if */
} /* end if */
@ -331,7 +338,7 @@ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n"
/* Check if we modified any section */
if(modified) {
/* Check if the section info was protected with a different access mode */
if(fspace->sinfo_protected && fspace->sinfo_accmode != H5AC_WRITE)
if(fspace->sinfo_protected && (0 != ((fspace->sinfo_accmode) & H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTDIRTY, FAIL, "attempt to modify read-only section info")
/* If we modified the section info, mark it dirty */
@ -915,7 +922,7 @@ H5FS_sect_remove(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
HDassert(sect);
/* Get a pointer to the section info */
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
@ -1396,7 +1403,7 @@ HDfprintf(stderr, "%s: *sect = {%a, %Hu, %u, %s}\n", FUNC, sect->addr, sect->siz
HDassert(sect->size);
/* Get a pointer to the section info */
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
@ -1494,7 +1501,7 @@ HDfprintf(stderr, "%s: fspace->ghost_sect_count = %Hu\n", FUNC, fspace->ghost_se
H5FS_section_info_t *sect; /* Temporary free space section */
/* Get a pointer to the section info */
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
@ -1613,7 +1620,7 @@ H5FS_sect_try_merge(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace, H5FS_section_info_t
HDassert(sect->size);
/* Get a pointer to the section info */
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
saved_fs_size = sect->size;
@ -1849,7 +1856,7 @@ HDfprintf(stderr, "%s: fspace->ghost_sect_count = %Hu\n", FUNC, fspace->ghost_se
#endif /* QAK */
if(fspace->tot_sect_count > 0) {
/* Get a pointer to the section info */
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
@ -1997,7 +2004,7 @@ HDfprintf(stderr, "%s: fspace->tot_sect_count = %Hu\n", FUNC, fspace->tot_sect_c
unsigned bin; /* Current bin we are on */
/* Get a pointer to the section info */
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_READ) < 0)
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__READ_ONLY_FLAG) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
@ -2089,7 +2096,7 @@ H5FS_sect_change_class(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
HDassert(new_class < fspace->nclasses);
/* Get a pointer to the section info */
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;
@ -2403,7 +2410,7 @@ H5FS_sect_try_shrink_eoa(const H5F_t *f, hid_t dxpl_id, const H5FS_t *fspace, vo
/* Check arguments. */
HDassert(fspace);
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC_WRITE) < 0)
if(H5FS_sinfo_lock(f, dxpl_id, fspace, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "can't get section info")
sinfo_valid = TRUE;

View File

@ -803,6 +803,14 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
} /* end if */
/* if it exists, unpin the driver information block cache entry,
* since we're about to destroy the cache
*/
if(f->shared->drvinfo)
if(H5AC_unpin_entry(f->shared->drvinfo) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FSPACE, H5E_CANTUNPIN, FAIL, "unable to unpin drvinfo")
/* Unpin the superblock, since we're about to destroy the cache */
if(H5AC_unpin_entry(f->shared->sblock) < 0)
/* Push error, but keep going*/

View File

@ -81,6 +81,21 @@
#define H5F_SUPERBLOCK_FIXED_SIZE ( H5F_SIGNATURE_LEN \
+ 1) /* superblock version */
/* The H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE is the minimal amount of super block
* variable length data guarnateed to load the sizeof offsets and the sizeof
* lengths fields in all versions of the superblock.
*
* This is necessary in the V3 cache, as on the initial load, we need to
* get enough of the superblock to determine its version and size so that
* the metadata cache can load the correct amount of data from file to
* allow the second deserialization attempt to succeed.
*
* The value selected will have to be revisited for each new version
* of the super block. Note that the current value is one byte larger
* than it needs to be.
*/
#define H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE 7
/* Macros for computing variable-size superblock size */
#define H5F_SUPERBLOCK_VARLEN_SIZE_COMMON \
(2 /* freespace, and root group versions */ \
@ -113,20 +128,38 @@
+ (sizeof_addr) /* EOF address */ \
+ (sizeof_addr) /* root group object header address */ \
+ H5F_SIZEOF_CHKSUM) /* superblock checksum (keep this last) */
#define H5F_SUPERBLOCK_VARLEN_SIZE(v, f) ( \
(v == 0 ? H5F_SUPERBLOCK_VARLEN_SIZE_V0(H5F_SIZEOF_ADDR(f), H5F_SIZEOF_SIZE(f)) : 0) \
+ (v == 1 ? H5F_SUPERBLOCK_VARLEN_SIZE_V1(H5F_SIZEOF_ADDR(f), H5F_SIZEOF_SIZE(f)) : 0) \
+ (v == 2 ? H5F_SUPERBLOCK_VARLEN_SIZE_V2(H5F_SIZEOF_ADDR(f)) : 0))
#define H5F_SUPERBLOCK_VARLEN_SIZE(v, sizeof_addr, sizeof_size) ( \
(v == 0 ? H5F_SUPERBLOCK_VARLEN_SIZE_V0(sizeof_addr, sizeof_size) : 0) \
+ (v == 1 ? H5F_SUPERBLOCK_VARLEN_SIZE_V1(sizeof_addr, sizeof_size) : 0) \
+ (v == 2 ? H5F_SUPERBLOCK_VARLEN_SIZE_V2(sizeof_addr) : 0))
/* Total size of superblock, depends on superblock version */
#define H5F_SUPERBLOCK_SIZE(v, f) ( H5F_SUPERBLOCK_FIXED_SIZE \
+ H5F_SUPERBLOCK_VARLEN_SIZE(v, f))
#define H5F_SUPERBLOCK_SIZE(s) ( H5F_SUPERBLOCK_FIXED_SIZE \
+ H5F_SUPERBLOCK_VARLEN_SIZE((s)->super_vers, (s)->sizeof_addr, (s)->sizeof_size))
/* Forward declaration external file cache struct used below (defined in
* H5Fefc.c) */
typedef struct H5F_efc_t H5F_efc_t;
/* Structure for passing 'user data' to superblock cache callbacks */
typedef struct H5F_superblock_cache_ud_t {
/* IN: */
H5F_t *f; /* Pointer to file */
hbool_t ignore_drvrinfo; /* Indicate if the driver info should be ignored */
/* OUT: */
unsigned sym_leaf_k; /* Symbol table leaf node's 'K' value */
unsigned btree_k[H5B_NUM_BTREE_ID]; /* B-tree key values for each type */
haddr_t stored_eof; /* End-of-file in file */
hbool_t drvrinfo_removed; /* Indicate if the driver info was removed */
} H5F_superblock_cache_ud_t;
/* Structure for passing 'user data' to driver info block cache callbacks */
typedef struct H5F_drvrinfo_cache_ud_t {
H5F_t *f; /* Pointer to file */
haddr_t driver_addr; /* address of driver info block */
} H5F_drvrinfo_cache_ud_t;
/* Structure for metadata & "small [raw] data" block aggregation fields */
struct H5F_blk_aggr_t {
unsigned long feature_flag; /* Feature flag type */
@ -176,6 +209,8 @@ typedef struct H5F_mtab_t {
typedef struct H5F_super_t {
H5AC_info_t cache_info; /* Cache entry information structure */
unsigned super_vers; /* Superblock version */
uint8_t sizeof_addr; /* Size of addresses in file */
uint8_t sizeof_size; /* Size of offsets in file */
uint8_t status_flags; /* File status flags */
unsigned sym_leaf_k; /* Size of leaves in symbol tables */
unsigned btree_k[H5B_NUM_BTREE_ID]; /* B-tree key values for each type */
@ -197,6 +232,13 @@ typedef struct H5F_super_t {
struct H5F_file_t {
H5FD_t *lf; /* Lower level file handle for I/O */
H5F_super_t *sblock; /* Pointer to (pinned) superblock for file */
H5O_drvinfo_t *drvinfo; /* Pointer to the (pinned) driver info
* cache entry. This field is only defined
* for older versions of the super block,
* and then only when a driver information
* block is present. At all other times
* it should be NULL.
*/
unsigned nrefs; /* Ref count for times file is opened */
unsigned flags; /* Access Permissions for file */
H5F_mtab_t mtab; /* File mount table */
@ -283,6 +325,7 @@ H5FL_EXTERN(H5F_t);
H5FL_EXTERN(H5F_file_t);
H5_DLLVAR const H5AC_class_t H5AC_SUPERBLOCK[1];
H5_DLLVAR const H5AC_class_t H5AC_DRVRINFO[1];
/******************************/

View File

@ -258,14 +258,21 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id)
{
H5P_genplist_t *dxpl; /* DXPL object */
H5F_super_t * sblock = NULL; /* Superblock structure */
H5F_superblock_cache_ud_t udata; /* User data for cache callbacks */
H5P_genplist_t *c_plist; /* File creation property list */
unsigned sblock_flags = H5AC__NO_FLAGS_SET; /* flags used in superblock unprotect call */
haddr_t super_addr; /* Absolute address of superblock */
H5AC_protect_t rw; /* Read/write permissions for file */
hbool_t dirtied = FALSE; /* Bool for sblock protect call */
haddr_t eof; /* End of file address */
unsigned rw_flags; /* Read/write permissions for file */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
/* initialize the drvinfo to NULL -- we will overwrite this if there
* is a driver information block
*/
f->shared->drvinfo = NULL;
/* Get the DXPL plist object for DXPL ID */
if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
@ -284,23 +291,358 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id)
} /* end if */
/* Determine file intent for superblock protect */
if(H5F_INTENT(f) & H5F_ACC_RDWR)
rw = H5AC_WRITE;
else
rw = H5AC_READ;
/* Must tell cache at protect time that the super block is to be
* flushed last (and collectively in the parallel case).
*/
rw_flags = H5AC__FLUSH_LAST_FLAG;
#ifdef H5_HAVE_PARALLEL
rw_flags |= H5C__FLUSH_COLLECTIVELY_FLAG;
#endif /* H5_HAVE_PARALLEL */
if(!(H5F_INTENT(f) & H5F_ACC_RDWR))
rw_flags |= H5AC__READ_ONLY_FLAG;
/* Get the shared file creation property list */
if(NULL == (c_plist = (H5P_genplist_t *)H5I_object(f->shared->fcpl_id)))
HGOTO_ERROR(H5E_FILE, H5E_BADTYPE, FAIL, "can't get property list")
/* Make certain we can read the fixed-size portion of the superblock */
if(H5F__set_eoa(f, H5FD_MEM_SUPER,
H5F_SUPERBLOCK_FIXED_SIZE + H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "set end of space allocation request failed")
/* Set up the user data for cache callbacks */
udata.f = f;
udata.ignore_drvrinfo = H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO);
udata.sym_leaf_k = 0;
if(H5P_get(c_plist, H5F_CRT_BTREE_RANK_NAME, udata.btree_k) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes")
udata.stored_eof = HADDR_UNDEF;
udata.drvrinfo_removed = FALSE;
/* Look up the superblock */
if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &dirtied, rw)))
if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &udata, rw_flags)))
HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "unable to load superblock")
/* Mark the superblock dirty if it was modified during loading or VFD indicated to do so */
if((H5AC_WRITE == rw) && (dirtied || H5F_HAS_FEATURE(f, H5FD_FEAT_DIRTY_SBLK_LOAD)))
sblock_flags |= H5AC__DIRTIED_FLAG;
/* Pin the superblock in the cache */
if(H5AC_pin_protected_entry(sblock) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTPIN, FAIL, "unable to pin superblock")
/* Mark the superblock dirty if it was modified during loading */
if(((rw_flags & H5AC__READ_ONLY_FLAG) == 0) && udata.ignore_drvrinfo && udata.drvrinfo_removed) {
HDassert(sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2);
sblock_flags |= H5AC__DIRTIED_FLAG;
} /* end if */
/* The superblock must be flushed last (and collectively in parallel) */
sblock_flags |= H5AC__FLUSH_LAST_FLAG;
#ifdef H5_HAVE_PARALLEL
sblock_flags |= H5AC__FLUSH_COLLECTIVELY_FLAG;
#endif /* H5_HAVE_PARALLEL */
/* Check if superblock address is different from base address and adjust
* base address and "end of address" address if so.
*/
if(!H5F_addr_eq(super_addr, sblock->base_addr)) {
/* Check if the superblock moved earlier in the file */
if(H5F_addr_lt(super_addr, sblock->base_addr))
udata.stored_eof -= (sblock->base_addr - super_addr);
else
/* The superblock moved later in the file */
udata.stored_eof += (super_addr - sblock->base_addr);
/* Adjust base address for offsets of the HDF5 data in the file */
sblock->base_addr = super_addr;
/* Set the base address for the file in the VFD now */
if(H5F__set_base_addr(f, sblock->base_addr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "failed to set base address for file driver")
/* Indicate that the superblock should be marked dirty */
if((rw_flags & H5AC__READ_ONLY_FLAG) == 0)
sblock_flags |= H5AC__DIRTIED_FLAG;
} /* end if */
/* Set information in the file's creation property list */
if(H5P_set(c_plist, H5F_CRT_SUPER_VERS_NAME, &sblock->super_vers) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set superblock version")
if(H5P_set(c_plist, H5F_CRT_ADDR_BYTE_NUM_NAME, &sblock->sizeof_addr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set byte number in an address")
if(H5P_set(c_plist, H5F_CRT_OBJ_BYTE_NUM_NAME, &sblock->sizeof_size) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set byte number for object size")
/* Handle the B-tree 'K' values */
if(sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2) {
/* Sanity check */
HDassert(udata.sym_leaf_k != 0);
/* Set the symbol table internal node 'K' value */
if(H5P_set(c_plist, H5F_CRT_SYM_LEAF_NAME, &udata.sym_leaf_k) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set rank for symbol table leaf nodes")
sblock->sym_leaf_k = udata.sym_leaf_k;
/* Set the B-tree internal node values, etc */
if(H5P_set(c_plist, H5F_CRT_BTREE_RANK_NAME, udata.btree_k) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set rank for btree internal nodes")
HDmemcpy(sblock->btree_k, udata.btree_k, sizeof(unsigned) * (size_t)H5B_NUM_BTREE_ID);
} /* end if */
else {
/* Get the (default) B-tree internal node values, etc */
/* (Note: these may be reset in a superblock extension) */
if(H5P_get(c_plist, H5F_CRT_BTREE_RANK_NAME, sblock->btree_k) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes")
if(H5P_get(c_plist, H5F_CRT_SYM_LEAF_NAME, &sblock->sym_leaf_k) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes")
} /* end else */
/*
* The user-defined data is the area of the file before the base
* address.
*/
if(H5P_set(c_plist, H5F_CRT_USER_BLOCK_NAME, &sblock->base_addr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set userblock size")
/*
* Make sure that the data is not truncated. One case where this is
* possible is if the first file of a family of files was opened
* individually.
*/
if(HADDR_UNDEF == (eof = H5FD_get_eof(f->shared->lf, H5FD_MEM_DEFAULT)))
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to determine file size")
/* (Account for the stored EOA being absolute offset -QAK) */
if((eof + sblock->base_addr) < udata.stored_eof)
HGOTO_ERROR(H5E_FILE, H5E_TRUNCATED, FAIL, "truncated file: eof = %llu, sblock->base_addr = %llu, stored_eoa = %llu", (unsigned long long)eof, (unsigned long long)sblock->base_addr, (unsigned long long)udata.stored_eof)
/*
* Tell the file driver how much address space has already been
* allocated so that it knows how to allocate additional memory.
*/
/* Decode the optional driver information block */
if(H5F_addr_defined(sblock->driver_addr)) {
H5O_drvinfo_t *drvinfo; /* Driver info */
H5F_drvrinfo_cache_ud_t drvrinfo_udata; /* User data for metadata callbacks */
unsigned drvinfo_flags = H5AC__NO_FLAGS_SET; /* Flags used in driver info block unprotect call */
/* Sanity check - driver info block should only be defined for
* superblock version < 2.
*/
HDassert(sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2);
/* Set up user data */
drvrinfo_udata.f = f;
drvrinfo_udata.driver_addr = sblock->driver_addr;
/* extend EOA so we can read at least the fixed sized
* portion of the driver info block
*/
if(H5FD_set_eoa(f->shared->lf, H5FD_MEM_SUPER, sblock->driver_addr + H5F_DRVINFOBLOCK_HDR_SIZE) < 0) /* will extend eoa later if required */
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, \
"set end of space allocation request failed")
/* Look up the driver info block */
if(NULL == (drvinfo = (H5O_drvinfo_t *)H5AC_protect(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, &drvrinfo_udata, rw_flags)))
HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "unable to load driver info block")
/* Loading the driver info block is enough to set up the right info */
/* Check if we need to rewrite the driver info block info */
if ( ( (rw_flags & H5AC__READ_ONLY_FLAG) == 0 ) &&
( H5F_HAS_FEATURE(f, H5FD_FEAT_DIRTY_DRVRINFO_LOAD) ) ) {
drvinfo_flags |= H5AC__DIRTIED_FLAG;
} /* end if */
/* set the pin entry flag so that the driver information block
* cache entry will be pinned in the cache.
*/
drvinfo_flags |= H5AC__PIN_ENTRY_FLAG;
/* Release the driver info block */
if(H5AC_unprotect(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, drvinfo, drvinfo_flags) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTUNPROTECT, FAIL, "unable to release driver info block")
/* save a pointer to the driver information cache entry */
f->shared->drvinfo = drvinfo;
} /* end if */
/* (Account for the stored EOA being absolute offset -NAF) */
if(H5F__set_eoa(f, H5FD_MEM_SUPER, udata.stored_eof - sblock->base_addr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set end-of-address marker for file")
/* Decode the optional superblock extension info */
if(H5F_addr_defined(sblock->ext_addr)) {
H5O_loc_t ext_loc; /* "Object location" for superblock extension */
H5O_btreek_t btreek; /* v1 B-tree 'K' value message from superblock extension */
H5O_drvinfo_t drvinfo; /* Driver info message from superblock extension */
size_t u; /* Local index variable */
htri_t status; /* Status for message existing */
/* Sanity check - superblock extension should only be defined for
* superblock version >= 2.
*/
HDassert(sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2);
/* Check for superblock extension being located "outside" the stored
* 'eoa' value, which can occur with the split/multi VFD.
*/
if(H5F_addr_gt(sblock->ext_addr, udata.stored_eof)) {
/* Set the 'eoa' for the object header memory type large enough
* to give some room for a reasonably sized superblock extension.
* (This is _rather_ a kludge -QAK)
*/
if(H5F__set_eoa(f, H5FD_MEM_OHDR, (haddr_t)(sblock->ext_addr + 1024)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set end-of-address marker for file")
} /* end if */
/* Open the superblock extension */
if(H5F_super_ext_open(f, sblock->ext_addr, &ext_loc) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, FAIL, "unable to open file's superblock extension")
/* Check for the extension having a 'driver info' message */
if((status = H5O_msg_exists(&ext_loc, H5O_DRVINFO_ID, dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
if(status) {
/* Check for ignoring the driver info for this file */
if(!udata.ignore_drvrinfo) {
/* Retrieve the 'driver info' structure */
if(NULL == H5O_msg_read(&ext_loc, H5O_DRVINFO_ID, &drvinfo, dxpl_id))
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "driver info message not present")
/* Validate and decode driver information */
if(H5FD_sb_load(f->shared->lf, drvinfo.name, drvinfo.buf) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, FAIL, "unable to decode driver information")
/* Reset driver info message */
H5O_msg_reset(H5O_DRVINFO_ID, &drvinfo);
} /* end else */
} /* end if */
/* Read in the shared OH message information if there is any */
if(H5SM_get_info(&ext_loc, c_plist, dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to read SOHM table information")
/* Check for the extension having a 'v1 B-tree "K"' message */
if((status = H5O_msg_exists(&ext_loc, H5O_BTREEK_ID, dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
if(status) {
/* Retrieve the 'v1 B-tree "K"' structure */
if(NULL == H5O_msg_read(&ext_loc, H5O_BTREEK_ID, &btreek, dxpl_id))
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "v1 B-tree 'K' info message not present")
/* Set non-default v1 B-tree 'K' value info from file */
sblock->btree_k[H5B_CHUNK_ID] = btreek.btree_k[H5B_CHUNK_ID];
sblock->btree_k[H5B_SNODE_ID] = btreek.btree_k[H5B_SNODE_ID];
sblock->sym_leaf_k = btreek.sym_leaf_k;
/* Set non-default v1 B-tree 'K' values in the property list */
if(H5P_set(c_plist, H5F_CRT_BTREE_RANK_NAME, btreek.btree_k) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set rank for btree internal nodes")
if(H5P_set(c_plist, H5F_CRT_SYM_LEAF_NAME, &btreek.sym_leaf_k) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set rank for symbol table leaf nodes")
} /* end if */
/* Check for the extension having a 'free-space manager info' message */
if((status = H5O_msg_exists(&ext_loc, H5O_FSINFO_ID, dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
if(status) {
H5O_fsinfo_t fsinfo; /* Free-space manager info message from superblock extension */
/* Retrieve the 'free-space manager info' structure */
if(NULL == H5O_msg_read(&ext_loc, H5O_FSINFO_ID, &fsinfo, dxpl_id))
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get free-space manager info message")
/* Check for non-default info */
if(f->shared->fs_strategy != fsinfo.strategy) {
f->shared->fs_strategy = fsinfo.strategy;
/* Set non-default strategy in the property list */
if(H5P_set(c_plist, H5F_CRT_FILE_SPACE_STRATEGY_NAME, &fsinfo.strategy) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set file space strategy")
} /* end if */
if(f->shared->fs_threshold != fsinfo.threshold) {
f->shared->fs_threshold = fsinfo.threshold;
/* Set non-default threshold in the property list */
if(H5P_set(c_plist, H5F_CRT_FREE_SPACE_THRESHOLD_NAME, &fsinfo.threshold) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set file space strategy")
} /* end if */
/* Set free-space manager addresses */
f->shared->fs_addr[0] = HADDR_UNDEF;
for(u = 1; u < NELMTS(f->shared->fs_addr); u++)
f->shared->fs_addr[u] = fsinfo.fs_addr[u-1];
} /* end if */
/* Close superblock extension */
if(H5F_super_ext_close(f, &ext_loc, dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEOBJ, FAIL, "unable to close file's superblock extension")
} /* end if */
/* Update the driver info if VFD indicated to do so */
/* (NOTE: only for later versions of superblock, earlier versions are handled
* earlier in this routine.
*/
if(((rw_flags & H5AC__READ_ONLY_FLAG) == 0) &&
sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2 &&
H5F_addr_defined(sblock->ext_addr)) {
/* Check for modifying the driver info when opening the file */
if(H5F_HAS_FEATURE(f, H5FD_FEAT_DIRTY_DRVRINFO_LOAD)) {
size_t driver_size; /* Size of driver info block (bytes) */
/* Check for driver info message */
H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
if(driver_size > 0) {
H5O_drvinfo_t drvinfo; /* Driver info */
uint8_t dbuf[H5F_MAX_DRVINFOBLOCK_SIZE]; /* Driver info block encoding buffer */
/* Sanity check */
HDassert(driver_size <= H5F_MAX_DRVINFOBLOCK_SIZE);
/* Encode driver-specific data */
if(H5FD_sb_encode(f->shared->lf, drvinfo.name, dbuf) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to encode driver information")
/* Set the driver info information for the superblock extension */
drvinfo.len = driver_size;
drvinfo.buf = dbuf;
/* Write driver info information to the superblock extension */
#if 1 /* bug fix test code -- tidy this up if all goes well */ /* JRM */
/* KLUGE ALERT!!
*
* H5F_super_ext_write_msg() expects f->shared->sblock to
* be set -- verify that it is NULL, and then set it.
* Set it back to NULL when we are done.
*/
HDassert(f->shared->sblock == NULL);
f->shared->sblock = sblock;
#endif /* JRM */
if(H5F_super_ext_write_msg(f, dxpl_id, &drvinfo, H5O_DRVINFO_ID, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
#if 1 /* bug fix test code -- tidy this up if all goes well */ /* JRM */
f->shared->sblock = NULL;
#endif /* JRM */
} /* end if */
} /* end if */
/* Check for eliminating the driver info block */
else if(H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO)) {
/* Remove the driver info message from the superblock extension */
if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_DRVINFO_ID) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "error in removing message from superblock extension")
/* Check if the superblock extension was removed */
if(!H5F_addr_defined(sblock->ext_addr))
sblock_flags |= H5AC__DIRTIED_FLAG;
} /* end if */
} /* end if */
/* Set the pointer to the pinned superblock */
f->shared->sblock = sblock;
@ -309,6 +651,32 @@ done:
if(sblock && H5AC_unprotect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, sblock, sblock_flags) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTUNPROTECT, FAIL, "unable to close superblock")
/* If we have failed, make sure no entries are left in the
* metadata cache, so that it can be shut down and discarded.
*/
if(ret_value < 0) {
/* Unpin and discard drvinfo cache entry */
if(f->shared->drvinfo) {
if(H5AC_unpin_entry(f->shared->drvinfo) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTUNPIN, FAIL, "unable to unpin driver info")
/* Evict the driver info block from the cache */
if(H5AC_expunge_entry(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge driver info block")
} /* end if */
/* Unpin & discard superblock */
if(sblock) {
/* Unpin superblock in cache */
if(H5AC_unpin_entry(sblock) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTUNPIN, FAIL, "unable to unpin superblock")
/* Evict the superblock from the cache */
if(H5AC_expunge_entry(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge superblock")
} /* end if */
} /* end if */
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5F__super_read() */
@ -334,6 +702,8 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
{
H5F_super_t *sblock = NULL; /* Superblock cache structure */
hbool_t sblock_in_cache = FALSE; /* Whether the superblock has been inserted into the metadata cache */
H5O_drvinfo_t *drvinfo = NULL; /* Driver info */
hbool_t drvinfo_in_cache = FALSE; /* Whether the driver info block has been inserted into the metadata cache */
H5P_genplist_t *plist; /* File creation property list */
hsize_t userblock_size; /* Size of userblock, in bytes */
hsize_t superblock_size; /* Size of superblock, in bytes */
@ -427,11 +797,13 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
if(H5F__set_base_addr(f, sblock->base_addr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "failed to set base address for file driver")
/* Save a local copy of the superblock version number */
/* Save a local copy of the superblock version number, size of addresses & offsets */
sblock->super_vers = super_vers;
sblock->sizeof_addr = f->shared->sizeof_addr;
sblock->sizeof_size = f->shared->sizeof_size;
/* Compute the size of the superblock */
superblock_size = (hsize_t)H5F_SUPERBLOCK_SIZE(super_vers, f);
superblock_size = (hsize_t)H5F_SUPERBLOCK_SIZE(sblock);
/* Compute the size of the driver information block */
H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
@ -446,10 +818,10 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
} /* end if */
/*
* Allocate space for the userblock, superblock & driver info blocks.
* We do it with one allocation request because the userblock and
* superblock need to be at the beginning of the file and only the first
* allocation request is required to return memory at format address zero.
* Allocate space for the superblock & driver info block.
* We do it with one allocation request because the superblock needs to be
* at the beginning of the file and only the first allocation request is
* required to return memory at format address zero.
*/
if(super_vers < HDF5_SUPERBLOCK_VERSION_2)
superblock_size += driver_size;
@ -466,6 +838,9 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
/* Keep a copy of the superblock info */
f->shared->sblock = sblock;
/* set the drvinfo filed to NULL -- will overwrite this later if needed */
f->shared->drvinfo = NULL;
/*
* Determine if we will need a superblock extension
*/
@ -572,6 +947,33 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to update free-space info header message")
} /* end if */
} /* end if */
else {
/* Check for creating an "old-style" driver info block */
if(driver_size > 0) {
/* Sanity check */
HDassert(H5F_addr_defined(sblock->driver_addr));
/* Allocate space for the driver info */
if(NULL == (drvinfo = (H5O_drvinfo_t *)H5MM_calloc(sizeof(H5O_drvinfo_t))))
HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, FAIL, "memory allocation failed for driver info message")
/* Set up driver info message */
/* (NOTE: All the actual information (name & driver information) is
* actually based on the VFD info in the file handle and
* will be encoded by the VFD's 'encode' callback, so it
* doesn't need to be set here. -QAK, 7/20/2013
*/
H5_CHECKED_ASSIGN(drvinfo->len, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
/* Insert driver info block into cache */
if(H5AC_insert_entry(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, drvinfo, H5AC__PIN_ENTRY_FLAG | H5AC__FLUSH_LAST_FLAG | H5AC__FLUSH_COLLECTIVELY_FLAG) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINS, FAIL, "can't add driver info block to cache")
drvinfo_in_cache = TRUE;
f->shared->drvinfo = drvinfo;
} /* end if */
else
HDassert(!H5F_addr_defined(sblock->driver_addr));
} /* end if */
done:
/* Close superblock extension, if it was created */
@ -580,6 +982,23 @@ done:
/* Cleanup on failure */
if(ret_value < 0) {
/* Check if the driver info block has been allocated yet */
if(drvinfo) {
/* Check if we've cached it already */
if(drvinfo_in_cache) {
/* Unpin drvinfo in cache */
if(H5AC_unpin_entry(drvinfo) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTUNPIN, FAIL, "unable to unpin driver info")
/* Evict the driver info block from the cache */
if(H5AC_expunge_entry(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge driver info block")
} /* end if */
else
/* Free driver info block */
H5MM_xfree(drvinfo);
} /* end if */
/* Check if the superblock has been allocated yet */
if(sblock) {
/* Check if we've cached it already */
@ -635,6 +1054,14 @@ H5F_super_dirty(H5F_t *f)
if(H5AC_mark_entry_dirty(f->shared->sblock) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
/* if the driver information block exists, mark it dirty as well
* so that the change in eoa will be reflected there as well if
* appropriate.
*/
if ( f->shared->drvinfo )
if(H5AC_mark_entry_dirty(f->shared->drvinfo) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark drvinfo as dirty")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5F_super_dirty() */
@ -698,7 +1125,7 @@ H5F__super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext
/* Set the superblock size */
if(super_size)
*super_size = (hsize_t)H5F_SUPERBLOCK_SIZE(f->shared->sblock->super_vers, f);
*super_size = (hsize_t)H5F_SUPERBLOCK_SIZE(f->shared->sblock);
/* Set the superblock extension size */
if(super_ext_size) {

File diff suppressed because it is too large Load Diff

View File

@ -46,7 +46,6 @@
/****************/
#define H5G_NODE_VERS 1 /* Symbol table node version number */
#define H5G_NODE_BUF_SIZE 512 /* Size of stack buffer for serialized nodes */
/******************/
@ -64,12 +63,14 @@
/********************/
/* Metadata cache (H5AC) callbacks */
static H5G_node_t *H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
static herr_t H5G_node_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr,
H5G_node_t *sym, unsigned *flags_ptr);
static herr_t H5G_node_dest(H5F_t *f, H5G_node_t *sym);
static herr_t H5G_node_clear(H5F_t *f, H5G_node_t *sym, hbool_t destroy);
static herr_t H5G_node_size(const H5F_t *f, const H5G_node_t *sym, size_t *size_ptr);
static herr_t H5G__cache_node_get_load_size(const void *udata, size_t *image_len);
static void *H5G__cache_node_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5G__cache_node_image_len(const void *thing, size_t *image_len,
hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static herr_t H5G__cache_node_serialize(const H5F_t *f, void *image,
size_t len, void *thing);
static herr_t H5G__cache_node_free_icr(void *thing);
/*********************/
@ -88,13 +89,19 @@ static herr_t H5G_node_size(const H5F_t *f, const H5G_node_t *sym, size_t *size_
/* Symbol table nodes inherit cache-like properties from H5AC */
const H5AC_class_t H5AC_SNODE[1] = {{
H5AC_SNODE_ID,
(H5AC_load_func_t)H5G_node_load,
(H5AC_flush_func_t)H5G_node_flush,
(H5AC_dest_func_t)H5G_node_dest,
(H5AC_clear_func_t)H5G_node_clear,
(H5AC_notify_func_t)NULL,
(H5AC_size_func_t)H5G_node_size,
H5AC_SNODE_ID, /* Metadata client ID */
"Symbol table node", /* Metadata client name (for debugging) */
H5FD_MEM_BTREE, /* File space memory type for client */
H5AC__CLASS_NO_FLAGS_SET, /* Client class behavior flags */
H5G__cache_node_get_load_size, /* 'get_load_size' callback */
H5G__cache_node_deserialize, /* 'deserialize' callback */
H5G__cache_node_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
H5G__cache_node_serialize, /* 'serialize' callback */
NULL, /* 'notify' callback */
H5G__cache_node_free_icr, /* 'free_icr' callback */
NULL, /* 'clear' callback */
NULL, /* 'fsf_size' callback */
}};
@ -106,42 +113,80 @@ H5FL_SEQ_EXTERN(H5G_entry_t);
/*-------------------------------------------------------------------------
* Function: H5G_node_load
* Function: H5G__cache_node_get_load_size()
*
* Purpose: Loads a symbol table node from the file.
* Purpose: Determine the size of the on disk image of the node, and
* return this value in *image_len.
*
* Return: Success: Ptr to the new table.
* Note that this computation requires access to the file pointer,
* which is not provided in the parameter list for this callback.
* Finesse this issue by passing in the file pointer twice to the
* H5AC_protect() call -- once as the file pointer proper, and
* again as the user data.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Failure: NULL
*
* Programmer: Robb Matzke
* matzke@llnl.gov
* Jun 23 1997
* Programmer: John Mainzer
* 7/21/14
*
*-------------------------------------------------------------------------
*/
static H5G_node_t *
H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
static herr_t
H5G__cache_node_get_load_size(const void *_udata, size_t *image_len)
{
H5G_node_t *sym = NULL;
H5WB_t *wb = NULL; /* Wrapped buffer for node data */
uint8_t node_buf[H5G_NODE_BUF_SIZE]; /* Buffer for node */
uint8_t *node; /* Pointer to node buffer */
const uint8_t *p;
H5G_node_t *ret_value; /*for error handling */
const H5F_t *f = (const H5F_t *)_udata; /* User data for callback */
FUNC_ENTER_NOAPI_NOINIT
FUNC_ENTER_STATIC_NOERR
/*
* Check arguments.
*/
/* Sanity checks */
HDassert(f);
HDassert(H5F_addr_defined(addr));
HDassert(udata);
HDassert(image_len);
/*
* Initialize variables.
*/
/* report image length */
*image_len = (size_t)(H5G_NODE_SIZE(f));
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5G__cache_node_get_load_size() */
/*-------------------------------------------------------------------------
* Function: H5G__cache_node_deserialize
*
* Purpose: Given a buffer containing the on disk image of a symbol table
* node, allocate an instance of H5G_node_t, load the contence of the
* image into it, and return a pointer to the instance.
*
* Note that deserializing the image requires access to the file
* pointer, which is not included in the parameter list for this
* callback. Finesse this issue by passing in the file pointer
* twice to the H5AC_protect() call -- once as the file pointer
* proper, and again as the user data
*
* Return: Success: Pointer to in core representation
* Failure: NULL
*
* Programmer: John Mainzer
* 6/21/14
*
*-------------------------------------------------------------------------
*/
static void *
H5G__cache_node_deserialize(const void *_image, size_t len, void *_udata,
hbool_t H5_ATTR_UNUSED *dirty)
{
H5F_t *f = (H5F_t *)_udata; /* User data for callback */
H5G_node_t *sym = NULL; /* Symbol table node created */
const uint8_t *image = (const uint8_t *)_image; /* Pointer to image to deserialize */
void * ret_value; /* Return value */
FUNC_ENTER_STATIC
/* Sanity checks */
HDassert(image);
HDassert(len > 0);
HDassert(f);
HDassert(dirty);
/* Allocate symbol table data structures */
if(NULL == (sym = H5FL_CALLOC(H5G_node_t)))
@ -150,184 +195,169 @@ H5G_node_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
if(NULL == (sym->entry = H5FL_SEQ_CALLOC(H5G_entry_t, (size_t)(2 * H5F_SYM_LEAF_K(f)))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Wrap the local buffer for serialized node info */
if(NULL == (wb = H5WB_wrap(node_buf, sizeof(node_buf))))
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, NULL, "can't wrap buffer")
/* Get a pointer to a buffer that's large enough for node */
if(NULL == (node = (uint8_t *)H5WB_actual(wb, sym->node_size)))
HGOTO_ERROR(H5E_SYM, H5E_NOSPACE, NULL, "can't get actual buffer")
/* Read the serialized symbol table node. */
if(H5F_block_read(f, H5FD_MEM_BTREE, addr, sym->node_size, dxpl_id, node) < 0)
HGOTO_ERROR(H5E_SYM, H5E_READERROR, NULL, "unable to read symbol table node")
/* Get temporary pointer to serialized node */
p = node;
/* magic */
if(HDmemcmp(p, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "bad symbol table node signature")
p += H5_SIZEOF_MAGIC;
if(HDmemcmp(image, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_SYM, H5E_BADVALUE, NULL, "bad symbol table node signature")
image += H5_SIZEOF_MAGIC;
/* version */
if(H5G_NODE_VERS != *p++)
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "bad symbol table node version")
if(H5G_NODE_VERS != *image++)
HGOTO_ERROR(H5E_SYM, H5E_VERSION, NULL, "bad symbol table node version")
/* reserved */
p++;
image++;
/* number of symbols */
UINT16DECODE(p, sym->nsyms);
UINT16DECODE(image, sym->nsyms);
/* entries */
if(H5G__ent_decode_vec(f, &p, sym->entry, sym->nsyms) < 0)
if(H5G__ent_decode_vec(f, &image, sym->entry, sym->nsyms) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, NULL, "unable to decode symbol table entries")
/* Set return value */
ret_value = sym;
done:
/* Release resources */
if(wb && H5WB_unwrap(wb) < 0)
HDONE_ERROR(H5E_SYM, H5E_CLOSEERROR, NULL, "can't close wrapped buffer")
if(!ret_value)
if(sym && H5G__node_free(sym) < 0)
HDONE_ERROR(H5E_SYM, H5E_CANTFREE, NULL, "unable to destroy symbol table node")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5G_node_load() */
} /* end H5G__cache_node_deserialize() */
/*-------------------------------------------------------------------------
* Function: H5G_node_flush
* Function: H5G__cache_node_image_len
*
* Purpose: Flush a symbol table node to disk.
* Purpose: Compute the size of the data structure on disk and return
* it in *image_len.
*
* Return: Non-negative on success/Negative on failure
* Return: Success: SUCCEED
* Failure: FAIL
*
* Programmer: Robb Matzke
* matzke@llnl.gov
* Jun 23 1997
* Programmer: John Mainzer
* 6/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
H5G_node_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5G_node_t *sym, unsigned H5_ATTR_UNUSED * flags_ptr)
H5G__cache_node_image_len(const void *_thing, size_t *image_len,
hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
H5WB_t *wb = NULL; /* Wrapped buffer for node data */
uint8_t node_buf[H5G_NODE_BUF_SIZE]; /* Buffer for node */
herr_t ret_value = SUCCEED; /* Return value */
const H5G_node_t *sym = (const H5G_node_t *)_thing; /* Pointer to object */
FUNC_ENTER_NOAPI_NOINIT
FUNC_ENTER_STATIC_NOERR
/*
* Check arguments.
*/
HDassert(f);
HDassert(H5F_addr_defined(addr));
/* Sanity checks */
HDassert(sym);
HDassert(sym->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(sym->cache_info.type == H5AC_SNODE);
HDassert(image_len);
/*
* Write the symbol node to disk.
*/
if(sym->cache_info.is_dirty) {
uint8_t *node; /* Pointer to node buffer */
uint8_t *p; /* Pointer into raw data buffer */
*image_len = sym->node_size;
/* Wrap the local buffer for serialized node info */
if(NULL == (wb = H5WB_wrap(node_buf, sizeof(node_buf))))
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "can't wrap buffer")
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5G__cache_node_image_len() */
/* Get a pointer to a buffer that's large enough for node */
if(NULL == (node = (uint8_t *)H5WB_actual(wb, sym->node_size)))
HGOTO_ERROR(H5E_SYM, H5E_NOSPACE, FAIL, "can't get actual buffer")
/* Get temporary pointer to serialized symbol table node */
p = node;
/*************************************/
/* no H5G__cache_node_pre_serialize() */
/*************************************/
/* magic number */
HDmemcpy(p, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC);
p += H5_SIZEOF_MAGIC;
/*-------------------------------------------------------------------------
* Function: H5G__cache_node_serialize
*
* Purpose: Given a correctly sized buffer and an instace of H5G_node_t,
* serialize the contents of the instance of H5G_node_t, and write
* this data into the supplied buffer. This buffer will be written
* to disk.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Programmer: John Mainzer
* 7/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
H5G__cache_node_serialize(const H5F_t *f, void *_image, size_t len,
void *_thing)
{
H5G_node_t *sym = (H5G_node_t *)_thing; /* Pointer to object */
uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */
herr_t ret_value = SUCCEED; /* Return value */
/* version number */
*p++ = H5G_NODE_VERS;
FUNC_ENTER_STATIC
/* reserved */
*p++ = 0;
/* Sanity checks */
HDassert(f);
HDassert(image);
HDassert(sym);
HDassert(sym->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(sym->cache_info.type == H5AC_SNODE);
HDassert(len == sym->node_size);
/* number of symbols */
UINT16ENCODE(p, sym->nsyms);
/* magic number */
HDmemcpy(image, H5G_NODE_MAGIC, (size_t)H5_SIZEOF_MAGIC);
image += H5_SIZEOF_MAGIC;
/* entries */
if(H5G__ent_encode_vec(f, &p, sym->entry, sym->nsyms) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTENCODE, FAIL, "can't serialize")
HDmemset(p, 0, sym->node_size - (size_t)(p - node));
/* version number */
*image++ = H5G_NODE_VERS;
/* Write the serialized symbol table node. */
if(H5F_block_write(f, H5FD_MEM_BTREE, addr, sym->node_size, dxpl_id, node) < 0)
HGOTO_ERROR(H5E_SYM, H5E_WRITEERROR, FAIL, "unable to write symbol table node to the file")
/* reserved */
*image++ = 0;
/* Reset the node's dirty flag */
sym->cache_info.is_dirty = FALSE;
} /* end if */
/* number of symbols */
UINT16ENCODE(image, sym->nsyms);
/*
* Destroy the symbol node? This might happen if the node is being
* preempted from the cache.
*/
if(destroy)
if(H5G_node_dest(f, sym) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTFREE, FAIL, "unable to destroy symbol table node")
/* entries */
if(H5G__ent_encode_vec(f, &image, sym->entry, sym->nsyms) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTENCODE, FAIL, "can't serialize")
/* Clear rest of symbol table node */
HDmemset(image, 0, sym->node_size - (size_t)(image - (uint8_t *)_image));
done:
/* Release resources */
if(wb && H5WB_unwrap(wb) < 0)
HDONE_ERROR(H5E_SYM, H5E_CLOSEERROR, FAIL, "can't close wrapped buffer")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5G_node_flush() */
} /* end H5G__cache_node_serialize() */
/***************************************/
/* no H5G__cache_node_notify() function */
/***************************************/
/*-------------------------------------------------------------------------
* Function: H5G_node_dest
* Function: H5G__cache_node_free_icr
*
* Purpose: Destroy a symbol table node in memory.
* Purpose: Destroys a symbol table node in memory.
*
* Return: Non-negative on success/Negative on failure
* Note: The metadata cache sets the object's cache_info.magic to
* H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
* callback (checked in assert).
*
* Programmer: Quincey Koziol
* koziol@ncsa.uiuc.edu
* Jan 15 2003
* Return: Success: SUCCEED
* Failure: FAIL
*
* Programmer: John Mainzer
* 6/21/14
*
*-------------------------------------------------------------------------
*/
static herr_t
H5G_node_dest(H5F_t *f, H5G_node_t *sym)
H5G__cache_node_free_icr(void *_thing)
{
herr_t ret_value = SUCCEED; /* Return value */
H5G_node_t *sym = (H5G_node_t *)_thing; /* Pointer to the object */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
FUNC_ENTER_STATIC
/*
* Check arguments.
*/
HDassert(f);
/* Sanity checks */
HDassert(sym);
/* Verify that node is clean */
HDassert(sym->cache_info.is_dirty == FALSE);
/* If we're going to free the space on disk, the address must be valid */
HDassert(!sym->cache_info.free_file_space_on_destroy || H5F_addr_defined(sym->cache_info.addr));
/* Check for freeing file space for symbol table node */
if(sym->cache_info.free_file_space_on_destroy) {
/* Release the space on disk */
/* (XXX: Nasty usage of internal DXPL value! -QAK) */
if(H5MF_xfree(f, H5FD_MEM_BTREE, H5AC_dxpl_id, sym->cache_info.addr, (hsize_t)sym->node_size) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTFREE, FAIL, "unable to free symbol table node")
} /* end if */
HDassert(sym->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
HDassert(sym->cache_info.type == H5AC_SNODE);
/* Destroy symbol table node */
if(H5G__node_free(sym) < 0)
@ -335,77 +365,5 @@ H5G_node_dest(H5F_t *f, H5G_node_t *sym)
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5G_node_dest() */
/*-------------------------------------------------------------------------
* Function: H5G_node_clear
*
* Purpose: Mark a symbol table node in memory as non-dirty.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* koziol@ncsa.uiuc.edu
* Mar 20 2003
*
*-------------------------------------------------------------------------
*/
static herr_t
H5G_node_clear(H5F_t *f, H5G_node_t *sym, hbool_t destroy)
{
herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI_NOINIT
/*
* Check arguments.
*/
HDassert(sym);
/* Reset the node's dirty flag */
sym->cache_info.is_dirty = FALSE;
/*
* Destroy the symbol node? This might happen if the node is being
* preempted from the cache.
*/
if(destroy)
if(H5G_node_dest(f, sym) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTFREE, FAIL, "unable to destroy symbol table node")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5G_node_clear() */
/*-------------------------------------------------------------------------
* Function: H5G_node_size
*
* Purpose: Compute the size in bytes of the specified instance of
* H5G_node_t on disk, and return it in *size_ptr. On failure
* the value of size_ptr is undefined.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
* 5/13/04
*
*-------------------------------------------------------------------------
*/
static herr_t
H5G_node_size(const H5F_t H5_ATTR_UNUSED *f, const H5G_node_t *sym, size_t *size_ptr)
{
FUNC_ENTER_NOAPI_NOINIT_NOERR
/*
* Check arguments.
*/
HDassert(f);
HDassert(size_ptr);
*size_ptr = sym->node_size;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5G_node_size() */
} /* end H5G__cache_node_free_icr() */

View File

@ -460,7 +460,7 @@ H5G__ent_convert(H5F_t *f, hid_t dxpl_id, H5HL_t *heap, const char *name,
targ_oloc.addr = lnk->u.hard.addr;
/* Get the object header */
if(NULL == (oh = H5O_protect(&targ_oloc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(&targ_oloc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, FAIL, "unable to protect target object header")
/* Check if a symbol table message exists */

View File

@ -541,7 +541,7 @@ H5G_node_found(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void H5_ATTR_UNUSED
/*
* Load the symbol table node for exclusive access.
*/
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, FAIL, "unable to protect symbol table node")
/* Get base address of heap */
@ -647,7 +647,7 @@ H5G_node_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr,
/*
* Load the symbol node.
*/
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_WRITE)))
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5B_INS_ERROR, "unable to protect symbol table node")
/* Get base address of heap */
@ -691,7 +691,7 @@ H5G_node_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr,
if(H5G_node_create(f, dxpl_id, H5B_INS_FIRST, NULL, NULL, NULL, new_node_p/*out*/) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, H5B_INS_ERROR, "unable to split symbol table node")
if(NULL == (snrt = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, *new_node_p, f, H5AC_WRITE)))
if(NULL == (snrt = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, *new_node_p, f, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5B_INS_ERROR, "unable to split symbol table node")
HDmemcpy(snrt->entry, sn->entry + H5F_SYM_LEAF_K(f),
@ -808,7 +808,7 @@ H5G_node_remove(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key/*in,out*/,
HDassert(udata && udata->common.heap);
/* Load the symbol table */
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_WRITE)))
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5B_INS_ERROR, "unable to protect symbol table node")
/* "Normal" removal of a single entry from the symbol table node */
@ -1001,7 +1001,7 @@ H5G__node_iterate(H5F_t *f, hid_t dxpl_id, const void H5_ATTR_UNUSED *_lt_key, h
HDassert(udata && udata->heap);
/* Protect the symbol table node & local heap while we iterate over entries */
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node")
/*
@ -1078,7 +1078,7 @@ H5G__node_sumup(H5F_t *f, hid_t dxpl_id, const void H5_ATTR_UNUSED *_lt_key, had
HDassert(num_objs);
/* Find the object node and add the number of symbol entries. */
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node")
*num_objs += sn->nsyms;
@ -1123,7 +1123,7 @@ H5G__node_by_idx(H5F_t *f, hid_t dxpl_id, const void H5_ATTR_UNUSED *_lt_key, ha
HDassert(udata);
/* Get a pointer to the symbol table node */
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node");
/* Find the node, locate the object symbol table entry and retrieve the name */
@ -1261,11 +1261,11 @@ H5G__node_copy(H5F_t *f, hid_t dxpl_id, const void H5_ATTR_UNUSED *_lt_key, hadd
HDassert(udata);
/* load the symbol table into memory from the source file */
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node")
/* get the base address of the heap */
if(NULL == (heap = H5HL_protect(f, dxpl_id, udata->src_heap_addr, H5AC_READ)))
if(NULL == (heap = H5HL_protect(f, dxpl_id, udata->src_heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, H5_ITER_ERROR, "unable to protect symbol name")
/* copy object in this node one by one */
@ -1420,7 +1420,7 @@ H5G__node_build_table(H5F_t *f, hid_t dxpl_id, const void H5_ATTR_UNUSED *_lt_ke
* Save information about the symbol table node since we can't lock it
* because we're about to call an application function.
*/
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node")
/* Check if the link table needs to be extended */
@ -1527,14 +1527,14 @@ H5G_node_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent,
/* Pin the heap down in memory */
if(heap_addr > 0 && H5F_addr_defined(heap_addr))
if(NULL == (heap = H5HL_protect(f, dxpl_id, heap_addr, H5AC_READ)))
if(NULL == (heap = H5HL_protect(f, dxpl_id, heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, FAIL, "unable to protect symbol table heap")
/*
* If we couldn't load the symbol table node, then try loading the
* B-tree node.
*/
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ))) {
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG))) {
H5G_bt_common_t udata; /*data to pass through B-tree */
H5E_clear_stack(NULL); /* discard that error */

View File

@ -160,7 +160,7 @@ H5G__stab_create_components(H5F_t *f, H5O_stab_t *stab, size_t size_hint, hid_t
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "can't create heap")
/* Pin the heap down in memory */
if(NULL == (heap = H5HL_protect(f, dxpl_id, stab->heap_addr, H5AC_WRITE)))
if(NULL == (heap = H5HL_protect(f, dxpl_id, stab->heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Insert name into the heap */
@ -276,7 +276,7 @@ H5G__stab_insert_real(H5F_t *f, const H5O_stab_t *stab, const char *name,
HDassert(obj_lnk);
/* Pin the heap down in memory */
if(NULL == (heap = H5HL_protect(f, dxpl_id, stab->heap_addr, H5AC_WRITE)))
if(NULL == (heap = H5HL_protect(f, dxpl_id, stab->heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Initialize data to pass through B-tree */
@ -373,7 +373,7 @@ H5G__stab_remove(const H5O_loc_t *loc, hid_t dxpl_id, H5RS_str_t *grp_full_path_
HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "not a symbol table")
/* Pin the heap down in memory */
if(NULL == (heap = H5HL_protect(loc->file, dxpl_id, stab.heap_addr, H5AC_WRITE)))
if(NULL == (heap = H5HL_protect(loc->file, dxpl_id, stab.heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Initialize data to pass through B-tree */
@ -431,7 +431,7 @@ H5G__stab_remove_by_idx(const H5O_loc_t *grp_oloc, hid_t dxpl_id, H5RS_str_t *gr
HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "not a symbol table")
/* Pin the heap down in memory */
if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC_WRITE)))
if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Initialize data to pass through B-tree */
@ -485,7 +485,7 @@ H5G__stab_delete(H5F_t *f, hid_t dxpl_id, const H5O_stab_t *stab)
HDassert(H5F_addr_defined(stab->heap_addr));
/* Pin the heap down in memory */
if(NULL == (heap = H5HL_protect(f, dxpl_id, stab->heap_addr, H5AC_WRITE)))
if(NULL == (heap = H5HL_protect(f, dxpl_id, stab->heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Set up user data for B-tree deletion */
@ -546,7 +546,7 @@ H5G__stab_iterate(const H5O_loc_t *oloc, hid_t dxpl_id, H5_iter_order_t order,
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to determine local heap address")
/* Pin the heap down in memory */
if(NULL == (heap = H5HL_protect(oloc->file, dxpl_id, stab.heap_addr, H5AC_READ)))
if(NULL == (heap = H5HL_protect(oloc->file, dxpl_id, stab.heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Check on iteration order */
@ -766,7 +766,7 @@ H5G__stab_get_name_by_idx(const H5O_loc_t *oloc, H5_iter_order_t order, hsize_t
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to determine local heap address")
/* Pin the heap down in memory */
if(NULL == (heap = H5HL_protect(oloc->file, dxpl_id, stab.heap_addr, H5AC_READ)))
if(NULL == (heap = H5HL_protect(oloc->file, dxpl_id, stab.heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Remap index for decreasing iteration order */
@ -888,7 +888,7 @@ H5G__stab_lookup(const H5O_loc_t *grp_oloc, const char *name, H5O_link_t *lnk,
HGOTO_ERROR(H5E_SYM, H5E_BADMESG, FAIL, "can't read message")
/* Pin the heap down in memory */
if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC_READ)))
if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Set up user data to pass to 'find' operation callback */
@ -989,7 +989,7 @@ H5G__stab_lookup_by_idx(const H5O_loc_t *grp_oloc, H5_iter_order_t order, hsize_
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "unable to determine local heap address")
/* Pin the heap down in memory */
if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC_READ)))
if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_PROTECT, FAIL, "unable to protect symbol table heap")
/* Remap index for decreasing iteration order */
@ -1081,10 +1081,10 @@ H5G__stab_valid(H5O_loc_t *grp_oloc, hid_t dxpl_id, H5O_stab_t *alt_stab)
} /* end if */
/* Check if the symbol table message's heap address is valid */
if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC_READ))) {
if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC__READ_ONLY_FLAG))) {
/* Address is invalid, try the heap address in the alternate symbol
* table message */
if(!alt_stab || NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, alt_stab->heap_addr, H5AC_READ)))
if(!alt_stab || NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, alt_stab->heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "unable to locate heap")
else {
/* The alternate symbol table's heap address is valid. Adjust the

View File

@ -637,7 +637,7 @@ H5G__verify_cached_stab_test(H5O_loc_t *grp_oloc, H5G_entry_t *ent)
HGOTO_ERROR(H5E_BTREE, H5E_NOTFOUND, FAIL, "b-tree address is invalid")
/* Verify that the heap address is valid */
if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC_READ)))
if(NULL == (heap = H5HL_protect(grp_oloc->file, dxpl_id, stab.heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_NOTFOUND, FAIL, "heap address is invalid")
done:
@ -686,7 +686,7 @@ H5G_verify_cached_stabs_test_cb(H5F_t *f, hid_t dxpl_id,
HDassert(H5F_addr_defined(addr));
/* Load the node */
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC_READ)))
if(NULL == (sn = (H5G_node_t *)H5AC_protect(f, dxpl_id, H5AC_SNODE, addr, f, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTLOAD, H5_ITER_ERROR, "unable to load symbol table node")
/* Check each target object to see if its stab message (if present) matches
@ -701,7 +701,7 @@ H5G_verify_cached_stabs_test_cb(H5F_t *f, hid_t dxpl_id,
targ_oloc.addr = sn->entry[i].header;
/* Load target object header */
if(NULL == (targ_oh = H5O_protect(&targ_oloc, dxpl_id, H5AC_READ)))
if(NULL == (targ_oh = H5O_protect(&targ_oloc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_CANTPROTECT, H5_ITER_ERROR, "unable to protect target object header")
/* Check if a symbol table message exists */

View File

@ -172,7 +172,7 @@ H5HF_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam)
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "memory allocation failed for fractal heap info")
/* Lock the heap header into memory */
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC_WRITE)))
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap header")
/* Point fractal heap wrapper at header and bump it's ref count */
@ -231,7 +231,7 @@ H5HF_open(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr)
HDassert(H5F_addr_defined(fh_addr));
/* Load the heap header into memory */
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC_READ)))
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap header")
/* Check for pending heap deletion */
@ -821,7 +821,7 @@ H5HF_close(H5HF_t *fh, hid_t dxpl_id)
H5HF_hdr_t *hdr; /* Another pointer to fractal heap header */
/* Lock the heap header into memory */
if(NULL == (hdr = H5HF_hdr_protect(fh->f, dxpl_id, heap_addr, H5AC_WRITE)))
if(NULL == (hdr = H5HF_hdr_protect(fh->f, dxpl_id, heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/* Delete heap, starting with header (unprotects header) */
@ -865,7 +865,7 @@ H5HF_delete(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr)
HDassert(H5F_addr_defined(fh_addr));
/* Lock the heap header into memory */
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC_WRITE)))
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/* Check for files using shared heap header */

File diff suppressed because it is too large Load Diff

View File

@ -323,7 +323,7 @@ H5HF_hdr_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
HDassert(fwidth >= 0);
/* Load the fractal heap header */
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, addr, H5AC_READ)))
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/* Print the information about the heap's header */
@ -459,13 +459,13 @@ H5HF_dblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream,
HDassert(block_size > 0);
/* Load the fractal heap header */
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, hdr_addr, H5AC_READ)))
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, hdr_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/*
* Load the heap direct block
*/
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, addr, block_size, NULL, 0, H5AC_READ)))
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, addr, block_size, NULL, 0, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap direct block")
/* Print opening message */
@ -716,13 +716,13 @@ H5HF_iblock_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream,
HDassert(nrows > 0);
/* Load the fractal heap header */
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, hdr_addr, H5AC_READ)))
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, hdr_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/*
* Load the heap indirect block
*/
if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, addr, nrows, NULL, 0, FALSE, H5AC_READ, &did_protect)))
if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, addr, nrows, NULL, 0, FALSE, H5AC__READ_ONLY_FLAG, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap indirect block")
/* Print the information about the heap's indirect block */
@ -825,7 +825,7 @@ H5HF_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t fh_addr,
HDassert(fwidth >= 0);
/* Load the fractal heap header */
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC_READ)))
if(NULL == (hdr = H5HF_hdr_protect(f, dxpl_id, fh_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap header")
/* Initialize the free space information for the heap */

View File

@ -149,6 +149,9 @@ H5HF_man_dblock_create(hid_t dxpl_id, H5HF_hdr_t *hdr, H5HF_indirect_t *par_iblo
HDmemset(dblock->blk, 0, dblock->size);
#endif /* H5_CLEAR_MEMORY */
dblock->write_buf = NULL;
dblock->write_size = 0;
/* Allocate [temporary] space for the direct block on disk */
if(H5F_USE_TMP_SPACE(hdr->f)) {
if(HADDR_UNDEF == (dblock_addr = H5MF_alloc_tmp(hdr->f, (hsize_t)dblock->size)))
@ -308,9 +311,13 @@ H5HF_man_dblock_destroy(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_direct_t *dblock,
} /* end if */
} /* end else */
/* Indicate that the indirect block should be deleted & file space freed */
/* Indicate that the indirect block should be deleted */
dblock->file_size = dblock_size;
cache_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
cache_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG;
/* If the dblock is in real file space, also tell the cache to free its file space */
if (!H5F_IS_TMP_ADDR(hdr->f, dblock_addr))
cache_flags |= H5AC__FREE_FILE_SPACE_FLAG;
done:
/* Unprotect the indirect block, with appropriate flags */
@ -436,7 +443,7 @@ done:
H5HF_direct_t *
H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr,
size_t dblock_size, H5HF_indirect_t *par_iblock, unsigned par_entry,
H5AC_protect_t rw)
unsigned flags)
{
H5HF_direct_t *dblock; /* Direct block from cache */
H5HF_dblock_cache_ud_t udata; /* parent and other infor for deserializing direct block */
@ -451,6 +458,9 @@ H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr,
HDassert(H5F_addr_defined(dblock_addr));
HDassert(dblock_size > 0);
/* only H5AC__READ_ONLY_FLAG may appear in flags */
HDassert((flags & (~H5AC__READ_ONLY_FLAG)) == 0);
/* Set up parent info */
udata.par_info.hdr = hdr;
udata.par_info.iblock = par_iblock;
@ -485,7 +495,7 @@ H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr,
} /* end else */
/* Protect the direct block */
if(NULL == (dblock = (H5HF_direct_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_DBLOCK, dblock_addr, &udata, rw)))
if(NULL == (dblock = (H5HF_direct_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_DBLOCK, dblock_addr, &udata, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap direct block")
/* Set the return value */
@ -512,7 +522,7 @@ done:
herr_t
H5HF_man_dblock_locate(H5HF_hdr_t *hdr, hid_t dxpl_id, hsize_t obj_off,
H5HF_indirect_t **ret_iblock, unsigned *ret_entry, hbool_t *ret_did_protect,
H5AC_protect_t rw)
unsigned flags)
{
haddr_t iblock_addr; /* Indirect block's address */
H5HF_indirect_t *iblock; /* Pointer to indirect block */
@ -531,6 +541,9 @@ H5HF_man_dblock_locate(H5HF_hdr_t *hdr, hid_t dxpl_id, hsize_t obj_off,
HDassert(ret_iblock);
HDassert(ret_did_protect);
/* only H5AC__READ_ONLY_FLAG may appear in flags */
HDassert((flags & (~H5AC__READ_ONLY_FLAG)) == 0);
/* Look up row & column for object */
if(H5HF_dtable_lookup(&hdr->man_dtable, obj_off, &row, &col) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCOMPUTE, FAIL, "can't compute row & column of object")
@ -539,7 +552,7 @@ H5HF_man_dblock_locate(H5HF_hdr_t *hdr, hid_t dxpl_id, hsize_t obj_off,
iblock_addr = hdr->man_dtable.table_addr;
/* Lock root indirect block */
if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, hdr->man_dtable.curr_root_rows, NULL, 0, FALSE, rw, &did_protect)))
if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, hdr->man_dtable.curr_root_rows, NULL, 0, FALSE, flags, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
/* Check for indirect block row */
@ -569,7 +582,7 @@ H5HF_man_dblock_locate(H5HF_hdr_t *hdr, hid_t dxpl_id, hsize_t obj_off,
} /* end if */
/* Lock child indirect block */
if(NULL == (new_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, nrows, iblock, entry, FALSE, rw, &new_did_protect)))
if(NULL == (new_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, nrows, iblock, entry, FALSE, flags, &new_did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
/* Release the current indirect block */

View File

@ -529,7 +529,7 @@ done:
*-------------------------------------------------------------------------
*/
H5HF_hdr_t *
H5HF_hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
H5HF_hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, unsigned flags)
{
H5HF_hdr_cache_ud_t cache_udata; /* User-data for callback */
H5HF_hdr_t *hdr; /* Fractal heap header */
@ -541,12 +541,15 @@ H5HF_hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
HDassert(f);
HDassert(H5F_addr_defined(addr));
/* only H5AC__READ_ONLY_FLAG may appear in flags */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Set up userdata for protect call */
cache_udata.f = f;
cache_udata.dxpl_id = dxpl_id;
/* Lock the heap header into memory */
if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, addr, &cache_udata, rw)))
if(NULL == (hdr = (H5HF_hdr_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_HDR, addr, &cache_udata, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap header")
/* Set the header's address */
@ -1109,7 +1112,7 @@ H5HF_hdr_update_iter(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t min_dblock_size)
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, FAIL, "can't allocate fractal heap indirect block")
/* Lock new indirect block */
if(NULL == (new_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, new_iblock_addr, child_nrows, iblock, next_entry, FALSE, H5AC_WRITE, &did_protect)))
if(NULL == (new_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, new_iblock_addr, child_nrows, iblock, next_entry, FALSE, H5AC__NO_FLAGS_SET, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
/* Move iterator down one level (pins indirect block) */
@ -1303,7 +1306,7 @@ H5HF_hdr_reverse_iter(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr)
child_nrows = H5HF_dtable_size_to_rows(&hdr->man_dtable, hdr->man_dtable.row_block_size[row]);
/* Lock child indirect block */
if(NULL == (child_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock->ents[curr_entry].addr, child_nrows, iblock, curr_entry, FALSE, H5AC_WRITE, &did_protect)))
if(NULL == (child_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock->ents[curr_entry].addr, child_nrows, iblock, curr_entry, FALSE, H5AC__NO_FLAGS_SET, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
/* Set the current location of the iterator */

View File

@ -330,8 +330,15 @@ H5HF_iblock_decr(H5HF_indirect_t *iblock)
/* Check for expunging the indirect block from the metadata cache */
if(expunge_iblock) {
/* Evict the indirect block from the metadata cache */
if(H5AC_expunge_entry(hdr->f, H5AC_dxpl_id, H5AC_FHEAP_IBLOCK, iblock_addr, H5AC__FREE_FILE_SPACE_FLAG) < 0)
unsigned cache_flags = H5AC__NO_FLAGS_SET;
/* if the indirect block is in real file space, tell
* the cache to free its file space.
*/
if (!H5F_IS_TMP_ADDR(hdr->f, iblock_addr))
cache_flags |= H5AC__FREE_FILE_SPACE_FLAG;
if(H5AC_expunge_entry(hdr->f, H5AC_dxpl_id, H5AC_FHEAP_IBLOCK, iblock_addr, cache_flags) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "unable to remove indirect block from cache")
} /* end if */
} /* end if */
@ -424,7 +431,7 @@ H5HF_man_iblock_root_create(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t min_dblock_si
/* Move current direct block (used as root) into new indirect block */
/* Lock new indirect block */
if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, nrows, NULL, 0, FALSE, H5AC_WRITE, &did_protect)))
if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, nrows, NULL, 0, FALSE, H5AC__NO_FLAGS_SET, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
/* Check if there's already a direct block as root) */
@ -433,7 +440,7 @@ H5HF_man_iblock_root_create(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t min_dblock_si
H5HF_direct_t *dblock; /* Pointer to direct block to query */
/* Lock first (root) direct block */
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, hdr->man_dtable.table_addr, hdr->man_dtable.cparam.start_block_size, NULL, 0, H5AC_WRITE)))
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, hdr->man_dtable.table_addr, hdr->man_dtable.cparam.start_block_size, NULL, 0, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap direct block")
/* Attach direct block to new root indirect block */
@ -879,7 +886,7 @@ H5HF_man_iblock_root_revert(H5HF_indirect_t *root_iblock, hid_t dxpl_id)
dblock_size = hdr->man_dtable.cparam.start_block_size;
/* Get pointer to last direct block */
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, root_iblock, 0, H5AC_WRITE)))
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, root_iblock, 0, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap direct block")
HDassert(dblock->parent == root_iblock);
HDassert(dblock->par_entry == 0);
@ -1159,7 +1166,7 @@ done:
H5HF_indirect_t *
H5HF_man_iblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t iblock_addr,
unsigned iblock_nrows, H5HF_indirect_t *par_iblock, unsigned par_entry,
hbool_t must_protect, H5AC_protect_t rw, hbool_t *did_protect)
hbool_t must_protect, unsigned flags, hbool_t *did_protect)
{
H5HF_parent_t par_info; /* Parent info for loading block */
H5HF_indirect_t *iblock = NULL; /* Indirect block from cache */
@ -1176,6 +1183,9 @@ H5HF_man_iblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t iblock_addr,
HDassert(iblock_nrows > 0);
HDassert(did_protect);
/* only H5AC__READ_ONLY_FLAG may appear in flags */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Check if we are allowed to use existing pinned iblock pointer */
if(!must_protect) {
/* Check for this block already being pinned */
@ -1235,7 +1245,7 @@ H5HF_man_iblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t iblock_addr,
cache_udata.nrows = &iblock_nrows;
/* Protect the indirect block */
if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_IBLOCK, iblock_addr, &cache_udata, rw)))
if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_IBLOCK, iblock_addr, &cache_udata, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap indirect block")
/* Set the indirect block's address */
@ -1579,7 +1589,7 @@ H5HF_man_iblock_delete(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t iblock_addr,
HDassert(iblock_nrows > 0);
/* Lock indirect block */
if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, iblock_nrows, par_iblock, par_entry, TRUE, H5AC_WRITE, &did_protect)))
if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, iblock_nrows, par_iblock, par_entry, TRUE, H5AC__NO_FLAGS_SET, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
HDassert(iblock->nchildren > 0);
HDassert(did_protect == TRUE);
@ -1637,8 +1647,14 @@ H5HF_man_iblock_delete(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t iblock_addr,
}
#endif /* NDEBUG */
/* Indicate that the indirect block should be deleted & file space freed */
cache_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG | H5AC__FREE_FILE_SPACE_FLAG;
/* Indicate that the indirect block should be deleted */
cache_flags |= H5AC__DIRTIED_FLAG | H5AC__DELETED_FLAG;
/* If the indirect block is in real file space, tell
* the cache to free its file space as well.
*/
if (!H5F_IS_TMP_ADDR(hdr->f, iblock_addr))
cache_flags |= H5AC__FREE_FILE_SPACE_FLAG;
done:
/* Unprotect the indirect block, with appropriate flags */
@ -1680,7 +1696,7 @@ H5HF_man_iblock_size(H5F_t *f, hid_t dxpl_id, H5HF_hdr_t *hdr, haddr_t iblock_ad
HDassert(heap_size);
/* Protect the indirect block */
if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, nrows, par_iblock, par_entry, FALSE, H5AC_READ, &did_protect)))
if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, nrows, par_iblock, par_entry, FALSE, H5AC__READ_ONLY_FLAG, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load fractal heap indirect block")
/* Accumulate size of this indirect block */

View File

@ -217,7 +217,7 @@ H5HF_man_iter_start_offset(H5HF_hdr_t *hdr, hid_t dxpl_id,
} /* end else */
/* Load indirect block for this context location */
if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, iblock_nrows, iblock_parent, iblock_par_entry, FALSE, H5AC_WRITE, &did_protect)))
if(NULL == (iblock = H5HF_man_iblock_protect(hdr, dxpl_id, iblock_addr, iblock_nrows, iblock_parent, iblock_par_entry, FALSE, H5AC__NO_FLAGS_SET, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
/* Make indirect block the context for the current location */

View File

@ -161,7 +161,7 @@ H5HF_man_insert(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t obj_size, const void *obj
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't retrieve direct block information")
/* Lock direct block */
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, sec_node->u.single.parent, sec_node->u.single.par_entry, H5AC_WRITE)))
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, sec_node->u.single.parent, sec_node->u.single.par_entry, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load fractal heap direct block")
/* Insert object into block */
@ -274,7 +274,11 @@ H5HF_man_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
H5HF_operator_t op, void *op_data, unsigned op_flags)
{
H5HF_direct_t *dblock = NULL; /* Pointer to direct block to query */
H5AC_protect_t dblock_access; /* Access method for direct block */
unsigned dblock_access_flags; /* Access method for direct block */
/* must equal either
* H5AC__NO_FLAGS_SET or
* H5AC__READ_ONLY_FLAG
*/
haddr_t dblock_addr; /* Direct block address */
size_t dblock_size; /* Direct block size */
unsigned dblock_cache_flags; /* Flags for unprotecting direct block */
@ -298,11 +302,11 @@ H5HF_man_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
/* Check pipeline */
H5HF_MAN_WRITE_CHECK_PLINE(hdr)
dblock_access = H5AC_WRITE;
dblock_access_flags = H5AC__NO_FLAGS_SET;
dblock_cache_flags = H5AC__DIRTIED_FLAG;
} /* end if */
else {
dblock_access = H5AC_READ;
dblock_access_flags = H5AC__READ_ONLY_FLAG;
dblock_cache_flags = H5AC__NO_FLAGS_SET;
} /* end else */
@ -332,7 +336,7 @@ H5HF_man_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
dblock_size = hdr->man_dtable.cparam.start_block_size;
/* Lock direct block */
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, NULL, 0, dblock_access)))
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, NULL, 0, dblock_access_flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap direct block")
} /* end if */
else {
@ -341,7 +345,7 @@ H5HF_man_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
unsigned entry; /* Entry of block */
/* Look up indirect block containing direct block */
if(H5HF_man_dblock_locate(hdr, dxpl_id, obj_off, &iblock, &entry, &did_protect, H5AC_READ) < 0)
if(H5HF_man_dblock_locate(hdr, dxpl_id, obj_off, &iblock, &entry, &did_protect, H5AC__READ_ONLY_FLAG) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCOMPUTE, FAIL, "can't compute row & column of section")
/* Set direct block info */
@ -359,7 +363,7 @@ H5HF_man_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
} /* end if */
/* Lock direct block */
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, iblock, entry, dblock_access))) {
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, iblock, entry, dblock_access_flags))) {
/* Unlock indirect block */
if(H5HF_man_iblock_unprotect(iblock, dxpl_id, H5AC__NO_FLAGS_SET, did_protect) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "unable to release fractal heap indirect block")
@ -578,7 +582,7 @@ H5HF_man_remove(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id)
} /* end if */
else {
/* Look up indirect block containing direct block */
if(H5HF_man_dblock_locate(hdr, dxpl_id, obj_off, &iblock, &dblock_entry, &did_protect, H5AC_WRITE) < 0)
if(H5HF_man_dblock_locate(hdr, dxpl_id, obj_off, &iblock, &dblock_entry, &did_protect, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCOMPUTE, FAIL, "can't compute row & column of section")
/* Check for offset of invalid direct block */

View File

@ -341,7 +341,7 @@ typedef struct H5HF_hdr_t {
size_t rc; /* Reference count of heap's components using heap header */
haddr_t heap_addr; /* Address of heap header in the file */
size_t heap_size; /* Size of heap header in the file */
H5AC_protect_t mode; /* Access mode for heap */
unsigned mode; /* Access mode for heap */
H5F_t *f; /* Pointer to file for heap */
size_t file_rc; /* Reference count of files using heap header */
hbool_t pending_delete; /* Heap is pending deletion */
@ -424,6 +424,31 @@ typedef struct H5HF_direct_t {
size_t size; /* Size of direct block */
hsize_t file_size; /* Size of direct block in file (only valid when block's space is being freed) */
uint8_t *blk; /* Pointer to buffer containing block data */
uint8_t *write_buf; /* Pointer to buffer containing the block data */
/* in form ready to copy to the metadata */
/* cache's image buffer. */
/* */
/* This field is used by */
/* H5HF_cache_dblock_pre_serialize() to pass */
/* the serialized image of the direct block to */
/* H5HF_cache_dblock_serialize(). It should */
/* NULL at all other times. */
/* */
/* If I/O filters are enabled, the pre- */
/* the pre-serialize function will allocate */
/* a buffer, copy the filtered version of the */
/* direct block image into it, and place the */
/* base address of the buffer in this field. */
/* The serialize function must discard this */
/* buffer after it copies the contents into */
/* the image buffer provided by the metadata */
/* cache. */
/* */
/* If I/O filters are not enabled, the */
/* write_buf field is simply set equal to the */
/* blk field by the pre-serialize function, */
/* and back to NULL by the serialize function. */
size_t write_size; /* size of the buffer pointed to by write_buf. */
/* Stored values */
hsize_t block_off; /* Offset of the block within the heap's address space */
@ -597,7 +622,7 @@ H5_DLL hsize_t H5HF_dtable_span_size(const H5HF_dtable_t *dtable, unsigned start
H5_DLL H5HF_hdr_t * H5HF_hdr_alloc(H5F_t *f);
H5_DLL haddr_t H5HF_hdr_create(H5F_t *f, hid_t dxpl_id, const H5HF_create_t *cparam);
H5_DLL H5HF_hdr_t *H5HF_hdr_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr,
H5AC_protect_t rw);
unsigned flags);
H5_DLL herr_t H5HF_hdr_finish_init_phase1(H5HF_hdr_t *hdr);
H5_DLL herr_t H5HF_hdr_finish_init_phase2(H5HF_hdr_t *hdr);
H5_DLL herr_t H5HF_hdr_finish_init(H5HF_hdr_t *hdr);
@ -638,7 +663,7 @@ H5_DLL herr_t H5HF_man_iblock_create(H5HF_hdr_t *hdr, hid_t dxpl_id,
H5_DLL H5HF_indirect_t *H5HF_man_iblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id,
haddr_t iblock_addr, unsigned iblock_nrows,
H5HF_indirect_t *par_iblock, unsigned par_entry, hbool_t must_protect,
H5AC_protect_t rw, hbool_t *did_protect);
unsigned flags, hbool_t *did_protect);
H5_DLL herr_t H5HF_man_iblock_unprotect(H5HF_indirect_t *iblock, hid_t dxpl_id,
unsigned cache_flags, hbool_t did_protect);
H5_DLL herr_t H5HF_man_iblock_attach(H5HF_indirect_t *iblock, unsigned entry,
@ -664,10 +689,10 @@ H5_DLL herr_t H5HF_man_dblock_destroy(H5HF_hdr_t *hdr, hid_t dxpl_id,
H5_DLL H5HF_direct_t *H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id,
haddr_t dblock_addr, size_t dblock_size,
H5HF_indirect_t *par_iblock, unsigned par_entry,
H5AC_protect_t rw);
unsigned flags);
H5_DLL herr_t H5HF_man_dblock_locate(H5HF_hdr_t *hdr, hid_t dxpl_id,
hsize_t obj_off, H5HF_indirect_t **par_iblock,
unsigned *par_entry, hbool_t *par_did_protect, H5AC_protect_t rw);
unsigned *par_entry, hbool_t *par_did_protect, unsigned flags);
H5_DLL herr_t H5HF_man_dblock_delete(H5F_t *f, hid_t dxpl_id, haddr_t dblock_addr,
hsize_t dblock_size);
H5_DLL herr_t H5HF_man_dblock_dest(H5HF_direct_t *dblock);

View File

@ -555,7 +555,7 @@ H5HF_sect_single_locate_parent(H5HF_hdr_t *hdr, hid_t dxpl_id, hbool_t refresh,
HDassert(sect);
/* Look up indirect block containing direct blocks for range */
if(H5HF_man_dblock_locate(hdr, dxpl_id, sect->sect_info.addr, &sec_iblock, &sec_entry, &did_protect, H5AC_READ) < 0)
if(H5HF_man_dblock_locate(hdr, dxpl_id, sect->sect_info.addr, &sec_iblock, &sec_entry, &did_protect, H5AC__READ_ONLY_FLAG) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCOMPUTE, FAIL, "can't compute row & column of section")
/* Increment reference count on indirect block that free section is in */
@ -776,7 +776,7 @@ H5HF_sect_single_full_dblock(H5HF_hdr_t *hdr, hid_t dxpl_id,
hdr->man_dtable.curr_root_rows > 0) {
H5HF_direct_t *dblock; /* Pointer to direct block for section */
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, sect->u.single.parent, sect->u.single.par_entry, H5AC_WRITE)))
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr, dblock_size, sect->u.single.parent, sect->u.single.par_entry, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load fractal heap direct block")
HDassert(H5F_addr_eq(dblock->block_off + dblock_overhead, sect->sect_info.addr));
@ -1094,7 +1094,7 @@ H5HF_sect_single_shrink(H5FS_section_info_t **_sect, void H5_ATTR_UNUSED *_udata
/* (should be a root direct block) */
HDassert(dblock_addr == hdr->man_dtable.table_addr);
if(NULL == (dblock = H5HF_man_dblock_protect(hdr, dxpl_id, dblock_addr,
dblock_size, (*sect)->u.single.parent, (*sect)->u.single.par_entry, H5AC_WRITE)))
dblock_size, (*sect)->u.single.parent, (*sect)->u.single.par_entry, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load fractal heap direct block")
HDassert(H5F_addr_eq(dblock->block_off + dblock_size, (*sect)->sect_info.addr + (*sect)->sect_info.size));
@ -1221,7 +1221,7 @@ H5HF_sect_single_valid(const H5FS_section_class_t H5_ATTR_UNUSED *cls, const H5F
H5HF_direct_t *dblock; /* Direct block for section */
/* Protect the direct block for the section */
dblock = H5HF_man_dblock_protect(iblock->hdr, H5AC_dxpl_id, dblock_addr, dblock_size, iblock, sect->u.single.par_entry, H5AC_READ);
dblock = H5HF_man_dblock_protect(iblock->hdr, H5AC_dxpl_id, dblock_addr, dblock_size, iblock, sect->u.single.par_entry, H5AC__READ_ONLY_FLAG);
HDassert(dblock);
/* Sanity check settings for section */
@ -2536,7 +2536,7 @@ H5HF_sect_indirect_init_rows(H5HF_hdr_t *hdr, hid_t dxpl_id,
/* If the child indirect block's address is defined, protect it */
if(H5F_addr_defined(child_iblock_addr)) {
if(NULL == (child_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, child_iblock_addr, child_nrows, sect->u.indirect.u.iblock, curr_entry, FALSE, H5AC_WRITE, &did_protect)))
if(NULL == (child_iblock = H5HF_man_iblock_protect(hdr, dxpl_id, child_iblock_addr, child_nrows, sect->u.indirect.u.iblock, curr_entry, FALSE, H5AC__NO_FLAGS_SET, &did_protect)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect fractal heap indirect block")
} /* end if */
else
@ -2771,7 +2771,7 @@ H5HF_sect_indirect_revive_row(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_
HDassert(sect->sect_info.state == H5FS_SECT_SERIALIZED);
/* Look up indirect block containing indirect blocks for section */
if(H5HF_man_dblock_locate(hdr, dxpl_id, sect->sect_info.addr, &sec_iblock, NULL, &did_protect, H5AC_READ) < 0)
if(H5HF_man_dblock_locate(hdr, dxpl_id, sect->sect_info.addr, &sec_iblock, NULL, &did_protect, H5AC__READ_ONLY_FLAG) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCOMPUTE, FAIL, "can't compute row & column of section")
/* Increment reference count on indirect block that free section is in */

View File

@ -246,7 +246,7 @@ done:
*-------------------------------------------------------------------------
*/
H5HG_heap_t *
H5HG_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
H5HG_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, unsigned flags)
{
H5HG_heap_t *heap; /* Global heap */
H5HG_heap_t *ret_value; /* Return value */
@ -257,8 +257,11 @@ H5HG_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
HDassert(f);
HDassert(H5F_addr_defined(addr));
/* only H5AC__READ_ONLY_FLAG may appear in flags */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Lock the heap into memory */
if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, addr, f, rw)))
if(NULL == (heap = (H5HG_heap_t *)H5AC_protect(f, dxpl_id, H5AC_GHEAP, addr, f, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect global heap")
/* Set the heap's address */
@ -440,7 +443,7 @@ H5HG_extend(H5F_t *f, hid_t dxpl_id, haddr_t addr, size_t need)
HDassert(H5F_addr_defined(addr));
/* Protect the heap */
if(NULL == (heap = H5HG_protect(f, dxpl_id, addr, H5AC_WRITE)))
if(NULL == (heap = H5HG_protect(f, dxpl_id, addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
/* Re-allocate the heap information in memory */
@ -554,7 +557,7 @@ H5HG_insert(H5F_t *f, hid_t dxpl_id, size_t size, void *obj, H5HG_t *hobj/*out*/
} /* end if */
HDassert(H5F_addr_defined(addr));
if(NULL == (heap = H5HG_protect(f, dxpl_id, addr, H5AC_WRITE)))
if(NULL == (heap = H5HG_protect(f, dxpl_id, addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
/* Split the free space to make room for the new object */
@ -618,7 +621,7 @@ H5HG_read(H5F_t *f, hid_t dxpl_id, H5HG_t *hobj, void *object/*out*/,
HDassert(hobj);
/* Load the heap */
if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC_READ)))
if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect global heap")
HDassert(hobj->idx < heap->nused);
@ -692,7 +695,7 @@ H5HG_link(H5F_t *f, hid_t dxpl_id, const H5HG_t *hobj, int adjust)
HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "no write intent on file")
/* Load the heap */
if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC_WRITE)))
if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
if(adjust != 0) {
@ -755,7 +758,7 @@ H5HG_remove (H5F_t *f, hid_t dxpl_id, H5HG_t *hobj)
HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "no write intent on file")
/* Load the heap */
if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC_WRITE)))
if(NULL == (heap = H5HG_protect(f, dxpl_id, hobj->addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap")
HDassert(hobj->idx < heap->nused);

View File

@ -62,12 +62,14 @@
/********************/
/* Metadata cache callbacks */
static H5HG_heap_t *H5HG_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata);
static herr_t H5HG_flush(H5F_t *f, hid_t dxpl_id, hbool_t dest, haddr_t addr,
H5HG_heap_t *heap, unsigned H5_ATTR_UNUSED * flags_ptr);
static herr_t H5HG_dest(H5F_t *f, H5HG_heap_t *heap);
static herr_t H5HG_clear(H5F_t *f, H5HG_heap_t *heap, hbool_t destroy);
static herr_t H5HG_size(const H5F_t *f, const H5HG_heap_t *heap, size_t *size_ptr);
static herr_t H5HG__cache_heap_get_load_size(const void *udata, size_t *image_len);
static void *H5HG__cache_heap_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5HG__cache_heap_image_len(const void *thing, size_t *image_len,
hbool_t *compressed_ptr, size_t *compressed_image_len_ptr);
static herr_t H5HG__cache_heap_serialize(const H5F_t *f, void *image,
size_t len, void *thing);
static herr_t H5HG__cache_heap_free_icr(void *thing);
/*********************/
@ -76,13 +78,19 @@ static herr_t H5HG_size(const H5F_t *f, const H5HG_heap_t *heap, size_t *size_pt
/* H5HG inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_GHEAP[1] = {{
H5AC_GHEAP_ID,
(H5AC_load_func_t)H5HG_load,
(H5AC_flush_func_t)H5HG_flush,
(H5AC_dest_func_t)H5HG_dest,
(H5AC_clear_func_t)H5HG_clear,
(H5AC_notify_func_t)NULL,
(H5AC_size_func_t)H5HG_size,
H5AC_GHEAP_ID, /* Metadata client ID */
"global heap", /* Metadata client name (for debugging) */
H5FD_MEM_GHEAP, /* File space memory type for client */
H5AC__CLASS_SPECULATIVE_LOAD_FLAG, /* Client class behavior flags */
H5HG__cache_heap_get_load_size, /* 'get_load_size' callback */
H5HG__cache_heap_deserialize, /* 'deserialize' callback */
H5HG__cache_heap_image_len, /* 'image_len' callback */
NULL, /* 'pre_serialize' callback */
H5HG__cache_heap_serialize, /* 'serialize' callback */
NULL, /* 'notify' callback */
H5HG__cache_heap_free_icr, /* 'free_icr' callback */
NULL, /* 'clear' callback */
NULL, /* 'fsf_size' callback */
}};
@ -98,162 +106,204 @@ const H5AC_class_t H5AC_GHEAP[1] = {{
/*-------------------------------------------------------------------------
* Function: H5HG_load
* Function: H5HG__cache_heap_get_load_size()
*
* Purpose: Loads a global heap collection from disk.
* Purpose: Return the initial speculative read size to the metadata
* cache. This size will be used in the initial attempt to read
* the global heap. If this read is too small, the cache will
* try again with the correct value obtained from
* H5HG__cache_heap_image_len().
*
* Return: Success: Ptr to a global heap collection.
* Return: Success: SUCCEED
* Failure: FAIL
*
* Failure: NULL
*
* Programmer: Robb Matzke
* Friday, March 27, 1998
* Programmer: John Mainzer
* 7/27/14
*
*-------------------------------------------------------------------------
*/
static H5HG_heap_t *
H5HG_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *udata)
static herr_t
H5HG__cache_heap_get_load_size(const void H5_ATTR_UNUSED *_udata, size_t *image_len)
{
H5HG_heap_t *heap = NULL;
uint8_t *p;
size_t nalloc, need;
size_t max_idx = 0; /* The maximum index seen */
H5HG_heap_t *ret_value = NULL; /* Return value */
FUNC_ENTER_STATIC_NOERR
FUNC_ENTER_NOAPI_NOINIT
HDassert(image_len);
/* check arguments */
*image_len = (size_t)H5HG_MINSIZE;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5HG__cache_heap_get_load_size() */
/*-------------------------------------------------------------------------
* Function: H5HG__cache_heap_deserialize
*
* Purpose: Given a buffer containing the on disk image of the global
* heap, deserialize it, load its contents into a newly allocated
* instance of H5HG_heap_t, and return a pointer to the new instance.
*
* Note that this heap client uses speculative reads. If the supplied
* buffer is too small, we simply make note of the correct size, and
* wait for the metadata cache to try again.
*
* Return: Success: Pointer to in core representation
* Failure: NULL
*
* Programmer: John Mainzer
* 7/27/14
*
*-------------------------------------------------------------------------
*/
static void *
H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata,
hbool_t H5_ATTR_UNUSED *dirty)
{
H5F_t *f = (H5F_t *)_udata; /* File pointer -- obtained from user data */
H5HG_heap_t *heap = NULL; /* New global heap */
uint8_t *image; /* Pointer to image to decode */
void *ret_value; /* Return value */
FUNC_ENTER_STATIC
/* Sanity checks */
HDassert(_image);
HDassert(len >= (size_t)H5HG_MINSIZE);
HDassert(f);
HDassert(H5F_addr_defined(addr));
HDassert(udata);
HDassert(dirty);
/* Read the initial 4k page */
/* Allocate a new global heap */
if(NULL == (heap = H5FL_CALLOC(H5HG_heap_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
heap->shared = H5F_SHARED(f);
if(NULL == (heap->chunk = H5FL_BLK_MALLOC(gheap_chunk, (size_t)H5HG_MINSIZE)))
if(NULL == (heap->chunk = H5FL_BLK_MALLOC(gheap_chunk, len)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
if(H5F_block_read(f, H5FD_MEM_GHEAP, addr, (size_t)H5HG_MINSIZE, dxpl_id, heap->chunk) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "unable to read global heap collection")
p = heap->chunk;
/* copy the image buffer into the newly allocate chunk */
HDmemcpy(heap->chunk, _image, len);
image = heap->chunk;
/* Magic number */
if(HDmemcmp(p, H5HG_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "bad global heap collection signature")
p += H5_SIZEOF_MAGIC;
if(HDmemcmp(image, H5HG_MAGIC, (size_t)H5_SIZEOF_MAGIC))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "bad global heap collection signature")
image += H5_SIZEOF_MAGIC;
/* Version */
if(H5HG_VERSION != *p++)
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong version number in global heap")
if(H5HG_VERSION != *image++)
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong version number in global heap")
/* Reserved */
p += 3;
image += 3;
/* Size */
H5F_DECODE_LENGTH(f, p, heap->size);
H5F_DECODE_LENGTH(f, image, heap->size);
HDassert(heap->size >= H5HG_MINSIZE);
HDassert((len == H5HG_MINSIZE) /* first try */ ||
((len == heap->size) && (len > H5HG_MINSIZE))); /* second try */
if(len == heap->size) { /* proceed with the deserialize */
size_t max_idx = 0;
size_t nalloc;
/*
* If we didn't read enough in the first try, then read the rest of the
* collection now.
*/
if(heap->size > H5HG_MINSIZE) {
haddr_t next_addr = addr + (hsize_t)H5HG_MINSIZE;
/* Decode each object */
image = heap->chunk + H5HG_SIZEOF_HDR(f);
nalloc = H5HG_NOBJS(f, heap->size);
if(NULL == (heap->chunk = H5FL_BLK_REALLOC(gheap_chunk, heap->chunk, heap->size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
if(H5F_block_read(f, H5FD_MEM_GHEAP, next_addr, (heap->size - H5HG_MINSIZE), dxpl_id, heap->chunk + H5HG_MINSIZE) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_READERROR, NULL, "unable to read global heap collection")
} /* end if */
/* Calloc the obj array because the file format spec makes no guarantee
* about the order of the objects, and unused slots must be set to zero.
*/
if(NULL == (heap->obj = H5FL_SEQ_CALLOC(H5HG_obj_t, nalloc)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
heap->nalloc = nalloc;
/* Decode each object */
p = heap->chunk + H5HG_SIZEOF_HDR(f);
nalloc = H5HG_NOBJS(f, heap->size);
/* Calloc the obj array because the file format spec makes no guarantee
* about the order of the objects, and unused slots must be set to zero.
*/
if(NULL == (heap->obj = H5FL_SEQ_CALLOC(H5HG_obj_t, nalloc)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
heap->nalloc = nalloc;
while(p < (heap->chunk + heap->size)) {
if((p + H5HG_SIZEOF_OBJHDR(f)) > (heap->chunk + heap->size)) {
/*
* The last bit of space is too tiny for an object header, so we
* assume that it's free space.
*/
HDassert(NULL == heap->obj[0].begin);
heap->obj[0].size = (size_t)(((const uint8_t *)heap->chunk + heap->size) - p);
heap->obj[0].begin = p;
p += heap->obj[0].size;
} /* end if */
else {
unsigned idx;
uint8_t *begin = p;
UINT16DECODE(p, idx);
/* Check if we need more room to store heap objects */
if(idx >= heap->nalloc) {
size_t new_alloc; /* New allocation number */
H5HG_obj_t *new_obj; /* New array of object descriptions */
/* Determine the new number of objects to index */
new_alloc = MAX(heap->nalloc * 2, (idx + 1));
HDassert(idx < new_alloc);
/* Reallocate array of objects */
if(NULL == (new_obj = H5FL_SEQ_REALLOC(H5HG_obj_t, heap->obj, new_alloc)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Clear newly allocated space */
HDmemset(&new_obj[heap->nalloc], 0, (new_alloc - heap->nalloc) * sizeof(heap->obj[0]));
/* Update heap information */
heap->nalloc = new_alloc;
heap->obj = new_obj;
HDassert(heap->nalloc > heap->nused);
while(image < (heap->chunk + heap->size)) {
if((image + H5HG_SIZEOF_OBJHDR(f)) > (heap->chunk + heap->size)) {
/*
* The last bit of space is too tiny for an object header, so
* we assume that it's free space.
*/
HDassert(NULL == heap->obj[0].begin);
heap->obj[0].size = (size_t)(((const uint8_t *)heap->chunk + heap->size) - image);
heap->obj[0].begin = image;
image += heap->obj[0].size;
} /* end if */
else {
size_t need;
unsigned idx;
uint8_t *begin = image;
UINT16DECODE(p, heap->obj[idx].nrefs);
p += 4; /*reserved*/
H5F_DECODE_LENGTH(f, p, heap->obj[idx].size);
heap->obj[idx].begin = begin;
UINT16DECODE(image, idx);
/*
* The total storage size includes the size of the object header
* and is zero padded so the next object header is properly
* aligned. The entire obj array was calloc'ed, so no need to zero
* the space here. The last bit of space is the free space object
* whose size is never padded and already includes the object
* header.
*/
if(idx > 0) {
need = H5HG_SIZEOF_OBJHDR(f) + H5HG_ALIGN(heap->obj[idx].size);
/* Check if we need more room to store heap objects */
if(idx >= heap->nalloc) {
size_t new_alloc; /* New allocation number */
H5HG_obj_t *new_obj; /* New array of object descriptions */
if(idx > max_idx)
max_idx = idx;
} /* end if */
else
need = heap->obj[idx].size;
p = begin + need;
} /* end else */
} /* end while */
HDassert(p == heap->chunk + heap->size);
HDassert(H5HG_ISALIGNED(heap->obj[0].size));
/* Determine the new number of objects to index */
new_alloc = MAX(heap->nalloc * 2, (idx + 1));
HDassert(idx < new_alloc);
/* Set the next index value to use */
if(max_idx > 0)
heap->nused = max_idx + 1;
/* Reallocate array of objects */
if(NULL == (new_obj = H5FL_SEQ_REALLOC(H5HG_obj_t, heap->obj, new_alloc)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
/* Clear newly allocated space */
HDmemset(&new_obj[heap->nalloc], 0, (new_alloc - heap->nalloc) * sizeof(heap->obj[0]));
/* Update heap information */
heap->nalloc = new_alloc;
heap->obj = new_obj;
HDassert(heap->nalloc > heap->nused);
} /* end if */
UINT16DECODE(image, heap->obj[idx].nrefs);
image += 4; /*reserved*/
H5F_DECODE_LENGTH(f, image, heap->obj[idx].size);
heap->obj[idx].begin = begin;
/*
* The total storage size includes the size of the object
* header and is zero padded so the next object header is
* properly aligned. The entire obj array was calloc'ed,
* so no need to zero the space here. The last bit of space
* is the free space object whose size is never padded and
* already includes the object header.
*/
if(idx > 0) {
need = H5HG_SIZEOF_OBJHDR(f) + H5HG_ALIGN(heap->obj[idx].size);
if(idx > max_idx)
max_idx = idx;
} /* end if */
else
need = heap->obj[idx].size;
image = begin + need;
} /* end else */
} /* end while */
HDassert(image == heap->chunk + heap->size);
HDassert(H5HG_ISALIGNED(heap->obj[0].size));
/* Set the next index value to use */
if(max_idx > 0)
heap->nused = max_idx + 1;
else
heap->nused = 1;
HDassert(max_idx < heap->nused);
/* Add the new heap to the CWFS list for the file */
if(H5F_cwfs_add(f, heap) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "unable to add global heap collection to file's CWFS")
} /* end if ( len == heap->size ) */
else
heap->nused = 1;
HDassert(max_idx < heap->nused);
/* Add the new heap to the CWFS list for the file */
if(H5F_cwfs_add(f, heap) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, NULL, "unable to add global heap collection to file's CWFS")
/* if len is less than heap size, then the initial speculative
* read was too small. In this case we return without reporting
* failure. H5C_load_entry() will call H5HG__cache_heap_image_len()
* to get the actual read size, and then repeat the read with the
* correct size, and call this function a second time.
*/
HDassert(len < heap->size);
ret_value = heap;
@ -263,85 +313,120 @@ done:
HDONE_ERROR(H5E_HEAP, H5E_CANTFREE, NULL, "unable to destroy global heap collection")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5HG_load() */
} /* end H5HG__cache_heap_deserialize() */
/*-------------------------------------------------------------------------
* Function: H5HG_flush
* Function: H5HG__cache_heap_image_len
*
* Purpose: Flushes a global heap collection from memory to disk if it's
* dirty. Optionally deletes teh heap from memory.
* Purpose: Return the on disk image size of the global heap to the
* metadata cache via the image_len.
*
* Return: Non-negative on success/Negative on failure
* Return: Success: SUCCEED
* Failure: FAIL
*
* Programmer: Robb Matzke
* Friday, March 27, 1998
* Programmer: John Mainzer
* 7/27/14
*
*-------------------------------------------------------------------------
*/
static herr_t
H5HG_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, H5HG_heap_t *heap, unsigned H5_ATTR_UNUSED * flags_ptr)
H5HG__cache_heap_image_len(const void *_thing, size_t *image_len,
hbool_t H5_ATTR_UNUSED *compressed_ptr, size_t H5_ATTR_UNUSED *compressed_image_len_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
const H5HG_heap_t *heap = (const H5HG_heap_t *)_thing;
FUNC_ENTER_NOAPI_NOINIT
FUNC_ENTER_STATIC_NOERR
/* Sanity checks */
HDassert(heap);
HDassert(heap->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(heap->cache_info.type == H5AC_GHEAP);
HDassert(heap->size >= H5HG_MINSIZE);
HDassert(image_len);
*image_len = heap->size;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5HG__cache_heap_image_len() */
/**************************************/
/* no H5HG_cache_heap_pre_serialize() */
/**************************************/
/*-------------------------------------------------------------------------
* Function: H5HG__cache_heap_serialize
*
* Purpose: Given an appropriately sized buffer and an instance of
* H5HG_heap_t, serialize the global heap for writing to file,
* and copy the serialized verion into the buffer.
*
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Programmer: John Mainzer
* 7/27/14
*
*-------------------------------------------------------------------------
*/
static herr_t
H5HG__cache_heap_serialize(const H5F_t *f, void *image, size_t len,
void *_thing)
{
H5HG_heap_t *heap = (H5HG_heap_t *)_thing;
FUNC_ENTER_STATIC_NOERR
/* Check arguments */
HDassert(f);
HDassert(H5F_addr_defined(addr));
HDassert(H5F_addr_eq(addr, heap->addr));
HDassert(image);
HDassert(heap);
HDassert(heap->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(heap->cache_info.type == H5AC_GHEAP);
HDassert(heap->size == len);
HDassert(heap->chunk);
if(heap->cache_info.is_dirty) {
if(H5F_block_write(f, H5FD_MEM_GHEAP, addr, heap->size, dxpl_id, heap->chunk) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_WRITEERROR, FAIL, "unable to write global heap collection to file")
heap->cache_info.is_dirty = FALSE;
} /* end if */
/* copy the image into the buffer */
HDmemcpy(image, heap->chunk, len);
if(destroy)
if(H5HG_dest(f, heap) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy global heap collection")
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5HG__cache_heap_serialize() */
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5HG_flush() */
/****************************************/
/* no H5HG_cache_heap_notify() function */
/****************************************/
/*-------------------------------------------------------------------------
* Function: H5HG_dest
* Function: H5HG__cache_heap_free_icr
*
* Purpose: Destroys a global heap collection in memory
* Purpose: Free the in memory representation of the supplied global heap.
*
* Return: Non-negative on success/Negative on failure
* Note: The metadata cache sets the object's cache_info.magic to
* H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling a free_icr
* callback (checked in assert).
*
* Programmer: Quincey Koziol
* Wednesday, January 15, 2003
* Return: Success: SUCCEED
* Failure: FAIL
*
* Programmer: John Mainzer
* 7/27/14
*
*-------------------------------------------------------------------------
*/
static herr_t
H5HG_dest(H5F_t *f, H5HG_heap_t *heap)
H5HG__cache_heap_free_icr(void *_thing)
{
herr_t ret_value = SUCCEED; /* Return value */
H5HG_heap_t *heap = (H5HG_heap_t *)_thing;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
FUNC_ENTER_STATIC
/* Check arguments */
/* Sanity checks */
HDassert(heap);
/* Verify that node is clean */
HDassert(heap->cache_info.is_dirty == FALSE);
/* If we're going to free the space on disk, the address must be valid */
HDassert(!heap->cache_info.free_file_space_on_destroy || H5F_addr_defined(heap->cache_info.addr));
/* Check for freeing file space for globalheap */
if(heap->cache_info.free_file_space_on_destroy) {
/* Release the space on disk */
/* (XXX: Nasty usage of internal DXPL value! -QAK) */
if(H5MF_xfree(f, H5FD_MEM_GHEAP, H5AC_dxpl_id, heap->cache_info.addr, (hsize_t)heap->size) < 0)
HGOTO_ERROR(H5E_BTREE, H5E_CANTFREE, FAIL, "unable to free global heap")
} /* end if */
HDassert(heap->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
HDassert(heap->cache_info.type == H5AC_GHEAP);
/* Destroy global heap collection */
if(H5HG_free(heap) < 0)
@ -349,68 +434,5 @@ H5HG_dest(H5F_t *f, H5HG_heap_t *heap)
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5HG_dest() */
/*-------------------------------------------------------------------------
* Function: H5HG_clear
*
* Purpose: Mark a global heap in memory as non-dirty.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* Thursday, March 20, 2003
*
*-------------------------------------------------------------------------
*/
static herr_t
H5HG_clear(H5F_t *f, H5HG_heap_t *heap, hbool_t destroy)
{
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
/* Sanity checks */
HDassert(heap);
/* Mark heap as clean */
heap->cache_info.is_dirty = FALSE;
if(destroy)
if(H5HG_dest(f, heap) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTFREE, FAIL, "unable to destroy global heap collection")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5HG_clear() */
/*-------------------------------------------------------------------------
* Function: H5HG_size
*
* Purpose: Compute the size in bytes of the specified instance of
* H5HG_heap_t on disk, and return it in *len_ptr. On failure,
* the value of *len_ptr is undefined.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
* 5/13/04
*
*-------------------------------------------------------------------------
*/
static herr_t
H5HG_size(const H5F_t H5_ATTR_UNUSED *f, const H5HG_heap_t *heap, size_t *size_ptr)
{
FUNC_ENTER_NOAPI_NOINIT_NOERR
/* Check arguments */
HDassert(heap);
HDassert(size_ptr);
*size_ptr = heap->size;
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5HG_size() */
} /* end H5HG__cache_heap_free_icr() */

View File

@ -103,7 +103,7 @@ H5HG_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent,
HDassert(indent >= 0);
HDassert(fwidth >= 0);
if(NULL == (h = H5HG_protect(f, dxpl_id, addr, H5AC_READ)))
if(NULL == (h = H5HG_protect(f, dxpl_id, addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to protect global heap collection");
HDfprintf(stream, "%*sGlobal Heap Collection...\n", indent, "");

View File

@ -143,7 +143,7 @@ struct H5HG_heap_t {
/* Package Private Prototypes */
/******************************/
H5_DLL herr_t H5HG_free(H5HG_heap_t *heap);
H5_DLL H5HG_heap_t *H5HG_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw);
H5_DLL H5HG_heap_t *H5HG_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, unsigned flags);
#endif /* _H5HGpkg_H */

View File

@ -441,7 +441,7 @@ done:
*-------------------------------------------------------------------------
*/
H5HL_t *
H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, unsigned flags)
{
H5HL_cache_prfx_ud_t prfx_udata; /* User data for protecting local heap prefix */
H5HL_prfx_t *prfx = NULL; /* Local heap prefix */
@ -457,14 +457,19 @@ H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
HDassert(f);
HDassert(H5F_addr_defined(addr));
/* only the H5AC__READ_ONLY_FLAG may appear in flags */
HDassert((flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Construct the user data for protect callback */
prfx_udata.made_attempt = FALSE;
prfx_udata.sizeof_size = H5F_SIZEOF_SIZE(f);
prfx_udata.sizeof_addr = H5F_SIZEOF_ADDR(f);
prfx_udata.prfx_addr = addr;
prfx_udata.sizeof_prfx = H5HL_SIZEOF_HDR(f);
prfx_udata.loaded = FALSE;
/* Protect the local heap prefix */
if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, rw)))
if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to load heap prefix")
/* Get the pointer to the heap */
@ -486,7 +491,7 @@ H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw)
dblk_udata.loaded = FALSE;
/* Protect the local heap data block */
if(NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_DBLK, heap->dblk_addr, &dblk_udata, rw)))
if(NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_DBLK, heap->dblk_addr, &dblk_udata, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to load heap data block")
/* Pin the prefix, if the data block was loaded from file */
@ -1071,13 +1076,15 @@ H5HL_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr)
HDassert(H5F_addr_defined(addr));
/* Construct the user data for protect callback */
prfx_udata.made_attempt = FALSE;
prfx_udata.sizeof_size = H5F_SIZEOF_SIZE(f);
prfx_udata.sizeof_addr = H5F_SIZEOF_ADDR(f);
prfx_udata.prfx_addr = addr;
prfx_udata.sizeof_prfx = H5HL_SIZEOF_HDR(f);
prfx_udata.loaded = FALSE;
/* Protect the local heap prefix */
if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC_WRITE)))
if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load heap prefix")
/* Get the pointer to the heap */
@ -1092,7 +1099,7 @@ H5HL_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr)
dblk_udata.loaded = FALSE;
/* Protect the local heap data block */
if(NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_DBLK, heap->dblk_addr, &dblk_udata, H5AC_WRITE)))
if(NULL == (dblk = (H5HL_dblk_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_DBLK, heap->dblk_addr, &dblk_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load heap data block")
/* Pin the prefix, if the data block was loaded from file */
@ -1147,13 +1154,15 @@ H5HL_get_size(H5F_t *f, hid_t dxpl_id, haddr_t addr, size_t *size)
HDassert(size);
/* Construct the user data for protect callback */
prfx_udata.made_attempt = FALSE;
prfx_udata.sizeof_size = H5F_SIZEOF_SIZE(f);
prfx_udata.sizeof_addr = H5F_SIZEOF_ADDR(f);
prfx_udata.prfx_addr = addr;
prfx_udata.sizeof_prfx = H5HL_SIZEOF_HDR(f);
prfx_udata.loaded = FALSE;
/* Protect the local heap prefix */
if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC_READ)))
if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load heap prefix")
/* Get the pointer to the heap */
@ -1199,13 +1208,15 @@ H5HL_heapsize(H5F_t *f, hid_t dxpl_id, haddr_t addr, hsize_t *heap_size)
HDassert(heap_size);
/* Construct the user data for protect callback */
prfx_udata.made_attempt = FALSE;
prfx_udata.sizeof_size = H5F_SIZEOF_SIZE(f);
prfx_udata.sizeof_addr = H5F_SIZEOF_ADDR(f);
prfx_udata.prfx_addr = addr;
prfx_udata.sizeof_prfx = H5HL_SIZEOF_HDR(f);
prfx_udata.loaded = FALSE;
/* Protect the local heap prefix */
if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC_READ)))
if(NULL == (prfx = (H5HL_prfx_t *)H5AC_protect(f, dxpl_id, H5AC_LHEAP_PRFX, addr, &prfx_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, FAIL, "unable to load heap prefix")
/* Get the pointer to the heap */

File diff suppressed because it is too large Load Diff

View File

@ -69,7 +69,7 @@ H5HL_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int indent, int
HDassert(indent >= 0);
HDassert(fwidth >= 0);
if(NULL == (h = (H5HL_t *)H5HL_protect(f, dxpl_id, addr, H5AC_READ)))
if(NULL == (h = (H5HL_t *)H5HL_protect(f, dxpl_id, addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, FAIL, "unable to load heap")
HDfprintf(stream, "%*sLocal Heap...\n", indent, "");

View File

@ -122,12 +122,16 @@ struct H5HL_prfx_t {
/* Callback information for loading local heap prefix from disk */
typedef struct H5HL_cache_prfx_ud_t {
/* Downwards */
hbool_t made_attempt; /* Whether the deserialize routine */
/* was already attempted */
size_t sizeof_size; /* Size of file sizes */
size_t sizeof_addr; /* Size of file addresses */
haddr_t prfx_addr; /* Address of prefix */
size_t sizeof_prfx; /* Size of heap prefix */
/* Upwards */
hbool_t loaded; /* Whether prefix was loaded */
/* from file */
} H5HL_cache_prfx_ud_t;
/* Callback information for loading local heap data block from disk */

View File

@ -61,7 +61,7 @@ typedef struct H5HL_t H5HL_t;
* Library prototypes...
*/
H5_DLL herr_t H5HL_create(H5F_t *f, hid_t dxpl_id, size_t size_hint, haddr_t *addr/*out*/);
H5_DLL H5HL_t *H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, H5AC_protect_t rw);
H5_DLL H5HL_t *H5HL_protect(H5F_t *f, hid_t dxpl_id, haddr_t addr, unsigned flags);
H5_DLL void *H5HL_offset_into(const H5HL_t *heap, size_t offset);
H5_DLL herr_t H5HL_remove(H5F_t *f, hid_t dxpl_id, H5HL_t *heap, size_t offset,
size_t size);

View File

@ -1652,7 +1652,7 @@ done:
*-------------------------------------------------------------------------
*/
H5O_t *
H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, unsigned prot_flags)
{
H5O_t *oh = NULL; /* Object header protected */
H5O_cache_ud_t udata; /* User data for protecting object header */
@ -1666,13 +1666,16 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
HDassert(loc);
HDassert(loc->file);
/* prot_flags may only contain the H5AC__READ_ONLY_FLAG */
HDassert((prot_flags & (unsigned)(~H5AC__READ_ONLY_FLAG)) == 0);
/* Check for valid address */
if(!H5F_addr_defined(loc->addr))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "address undefined")
/* Check for write access on the file */
file_intent = H5F_INTENT(loc->file);
if((prot == H5AC_WRITE) && (0 == (file_intent & H5F_ACC_RDWR)))
if((0 == (prot_flags & H5AC__READ_ONLY_FLAG)) && (0 == (file_intent & H5F_ACC_RDWR)))
HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, NULL, "no write intent on file")
/* Construct the user data for protect callback */
@ -1688,7 +1691,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
udata.common.addr = loc->addr;
/* Lock the object header into the cache */
if(NULL == (oh = (H5O_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR, loc->addr, &udata, prot)))
if(NULL == (oh = (H5O_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR, loc->addr, &udata, prot_flags)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header")
/* Check if there are any continuation messages to process */
@ -1725,7 +1728,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
/* (which adds to the object header) */
chk_udata.common.addr = cont_msg_info.msgs[curr_msg].addr;
chk_udata.size = cont_msg_info.msgs[curr_msg].size;
if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR_CHK, cont_msg_info.msgs[curr_msg].addr, &chk_udata, prot)))
if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(loc->file, dxpl_id, H5AC_OHDR_CHK, cont_msg_info.msgs[curr_msg].addr, &chk_udata, prot_flags)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header chunk")
/* Sanity check */
@ -1769,7 +1772,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
/* (object header will have been marked dirty during protect, if we
* have write access -QAK)
*/
if(prot != H5AC_WRITE)
if((prot_flags & H5AC__READ_ONLY_FLAG) != 0)
oh->prefix_modified = TRUE;
#ifndef NDEBUG
else {
@ -1787,7 +1790,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
} /* end if */
/* Check for any messages that were modified while being read in */
if(udata.common.mesgs_modified && prot != H5AC_WRITE)
if(udata.common.mesgs_modified && (0 == (prot_flags & H5AC__READ_ONLY_FLAG)))
oh->mesgs_modified = TRUE;
/* Reset the field that contained chunk 0's size during speculative load */
@ -1797,7 +1800,7 @@ H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot)
/* Take care of loose ends for modifications made while bringing in the
* object header & chunks.
*/
if(prot == H5AC_WRITE) {
if(0 == (prot_flags & H5AC__READ_ONLY_FLAG)) {
/* Check for the object header prefix being modified somehow */
/* (usually through updating the # of object header messages) */
if(oh->prefix_modified) {
@ -1883,7 +1886,7 @@ H5O_pin(const H5O_loc_t *loc, hid_t dxpl_id)
HDassert(loc);
/* Get header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_WRITE)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to protect object header")
/* Increment the reference count on the object header */
@ -2096,7 +2099,7 @@ H5O_touch(const H5O_loc_t *loc, hbool_t force, hid_t dxpl_id)
HDassert(loc);
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_WRITE)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Create/Update the modification time message */
@ -2212,7 +2215,7 @@ H5O_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr)
loc.holding_file = FALSE;
/* Get the object header information */
if(NULL == (oh = H5O_protect(&loc, dxpl_id, H5AC_WRITE)))
if(NULL == (oh = H5O_protect(&loc, dxpl_id, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Delete object */
@ -2296,7 +2299,7 @@ H5O_obj_type(const H5O_loc_t *loc, H5O_type_t *obj_type, hid_t dxpl_id)
FUNC_ENTER_NOAPI_TAG(dxpl_id, loc->addr, FAIL)
/* Load the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Retrieve the type of the object */
@ -2374,7 +2377,7 @@ H5O_obj_class(const H5O_loc_t *loc, hid_t dxpl_id)
FUNC_ENTER_NOAPI_NOINIT_TAG(dxpl_id, loc->addr, NULL)
/* Load the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header")
/* Test whether entry qualifies as a particular type of object */
@ -2671,7 +2674,7 @@ H5O_get_hdr_info(const H5O_loc_t *loc, hid_t dxpl_id, H5O_hdr_info_t *hdr)
HDmemset(hdr, 0, sizeof(*hdr));
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, FAIL, "unable to load object header")
/* Get the information for the object header */
@ -2795,7 +2798,7 @@ H5O_get_info(const H5O_loc_t *loc, hid_t dxpl_id, hbool_t want_ih_info,
HDassert(oinfo);
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Reset the object info structure */
@ -2916,7 +2919,7 @@ H5O_get_create_plist(const H5O_loc_t *loc, hid_t dxpl_id, H5P_genplist_t *oc_pli
HDassert(oc_plist);
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Set property values, if they were used for the object */
@ -2971,7 +2974,7 @@ H5O_get_nlinks(const H5O_loc_t *loc, hid_t dxpl_id, hsize_t *nlinks)
HDassert(nlinks);
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Retrieve the # of link messages seen when the object header was loaded */
@ -3088,7 +3091,7 @@ H5O_get_rc_and_type(const H5O_loc_t *loc, hid_t dxpl_id, unsigned *rc, H5O_type_
HDassert(loc);
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Set the object's reference count */
@ -3461,7 +3464,7 @@ H5O_dec_rc_by_loc(const H5O_loc_t *loc, hid_t dxpl_id)
HDassert(loc);
/* Get header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Decrement the reference count on the object header */

View File

@ -483,7 +483,7 @@ H5O_attr_open_by_name(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
HDassert(name);
/* Protect the object header to iterate over */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, NULL, "unable to load object header")
/* Check for attribute info stored */
@ -632,7 +632,7 @@ H5O_attr_open_by_idx(const H5O_loc_t *loc, H5_index_t idx_type,
HGOTO_ERROR(H5E_ATTR, H5E_BADITER, NULL, "can't locate attribute")
/* Protect the object header to iterate over */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, NULL, "unable to load object header")
/* Find out whether it has already been opened. If it has, close the object
@ -1283,7 +1283,7 @@ H5O_attr_iterate_real(hid_t loc_id, const H5O_loc_t *loc, hid_t dxpl_id,
HDassert(attr_op);
/* Protect the object header to iterate over */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Check for attribute info stored */
@ -1846,7 +1846,7 @@ H5O_attr_exists(const H5O_loc_t *loc, const char *name, hid_t dxpl_id)
HDassert(name);
/* Protect the object header to iterate over */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Check for attribute info stored */
@ -2000,7 +2000,7 @@ H5O_attr_count(const H5O_loc_t *loc, hid_t dxpl_id)
HDassert(loc);
/* Protect the object header to iterate over */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_ATTR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Retrieve # of attributes on object */

File diff suppressed because it is too large Load Diff

View File

@ -184,7 +184,7 @@ H5O_chunk_protect(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
chk_udata.size = oh->chunk[idx].size;
/* Get the chunk proxy */
if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC_WRITE)))
if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to load object header chunk")
/* Sanity check */
@ -337,7 +337,7 @@ H5O_chunk_update_idx(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
chk_udata.size = oh->chunk[idx].size;
/* Get the chunk proxy */
if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC_WRITE)))
if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk")
/* Update index for chunk proxy in cache */
@ -389,7 +389,7 @@ H5O_chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh, unsigned idx)
chk_udata.size = oh->chunk[idx].size;
/* Get the chunk proxy */
if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC_WRITE)))
if(NULL == (chk_proxy = (H5O_chunk_proxy_t *)H5AC_protect(f, dxpl_id, H5AC_OHDR_CHK, oh->chunk[idx].addr, &chk_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header chunk")
/* Sanity check */

View File

@ -379,7 +379,7 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/,
}
/* Get source object header */
if(NULL == (oh_src = H5O_protect(oloc_src, dxpl_id, H5AC_READ)))
if(NULL == (oh_src = H5O_protect(oloc_src, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Retrieve user data for particular type of object to copy */

View File

@ -566,7 +566,7 @@ H5O_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE *stream, int indent, int f
loc.addr = addr;
loc.holding_file = FALSE;
if(NULL == (oh = H5O_protect(&loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(&loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* debug */

View File

@ -128,7 +128,7 @@ H5O_efl_decode(H5F_t *f, hid_t dxpl_id, H5O_t H5_ATTR_UNUSED *open_oh,
#ifndef NDEBUG
HDassert(H5F_addr_defined(mesg->heap_addr));
if(NULL == (heap = H5HL_protect(f, dxpl_id, mesg->heap_addr, H5AC_READ)))
if(NULL == (heap = H5HL_protect(f, dxpl_id, mesg->heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "unable to read protect link value")
s = (const char *)H5HL_offset_into(heap, 0);
@ -145,7 +145,7 @@ H5O_efl_decode(H5F_t *f, hid_t dxpl_id, H5O_t H5_ATTR_UNUSED *open_oh,
if(NULL == mesg->slot)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
if(NULL == (heap = H5HL_protect(f, dxpl_id, mesg->heap_addr, H5AC_READ)))
if(NULL == (heap = H5HL_protect(f, dxpl_id, mesg->heap_addr, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "unable to read protect link value")
for(u = 0; u < mesg->nused; u++) {
/* Name */
@ -487,7 +487,7 @@ H5O_efl_copy_file(H5F_t H5_ATTR_UNUSED *file_src, void *mesg_src, H5F_t *file_ds
HGOTO_ERROR(H5E_EFL, H5E_CANTINIT, NULL, "can't create heap")
/* Pin the heap down in memory */
if(NULL == (heap = H5HL_protect(file_dst, dxpl_id, efl_dst->heap_addr, H5AC_WRITE)))
if(NULL == (heap = H5HL_protect(file_dst, dxpl_id, efl_dst->heap_addr, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_EFL, H5E_PROTECT, NULL, "unable to protect EFL file name heap")
/* Insert "empty" name first */

View File

@ -476,7 +476,7 @@ H5O_msg_read(const H5O_loc_t *loc, unsigned type_id, void *mesg,
HDassert(type_id < NELMTS(H5O_msg_class_g));
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, NULL, "unable to protect object header")
/* Call the "real" read routine */
@ -802,7 +802,7 @@ H5O_msg_count(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
HDassert(type);
/* Load the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Count the messages of the correct type */
@ -884,7 +884,7 @@ H5O_msg_exists(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
HDassert(type_id < NELMTS(H5O_msg_class_g));
/* Load the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Call the "real" exists routine */
@ -1224,7 +1224,7 @@ H5O_msg_iterate(const H5O_loc_t *loc, unsigned type_id,
HDassert(op);
/* Protect the object header to iterate over */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Call the "real" iterate routine */
@ -2289,7 +2289,7 @@ H5O_msg_get_chunkno(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
HDassert(type);
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Locate message of correct type */
@ -2344,7 +2344,7 @@ H5O_msg_lock(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
HDassert(type);
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Locate message of correct type */
@ -2402,7 +2402,7 @@ H5O_msg_unlock(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
HDassert(type);
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Locate message of correct type */

View File

@ -564,6 +564,8 @@ typedef struct H5O_btreek_t {
* (Data structure in memory)
*/
typedef struct H5O_drvinfo_t {
/* Information for H5AC cache functions, _must_ be first field in structure */
H5AC_info_t cache_info;
char name[9]; /* Driver name */
size_t len; /* Length of encoded buffer */
uint8_t *buf; /* Buffer for encoded info */
@ -659,7 +661,7 @@ H5_DLL herr_t H5O_create(H5F_t *f, hid_t dxpl_id, size_t size_hint,
H5_DLL herr_t H5O_open(H5O_loc_t *loc);
H5_DLL herr_t H5O_close(H5O_loc_t *loc);
H5_DLL int H5O_link(const H5O_loc_t *loc, int adjust, hid_t dxpl_id);
H5_DLL H5O_t *H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, H5AC_protect_t prot);
H5_DLL H5O_t *H5O_protect(const H5O_loc_t *loc, hid_t dxpl_id, unsigned prot_flags);
H5_DLL H5O_t *H5O_pin(const H5O_loc_t *loc, hid_t dxpl_id);
H5_DLL herr_t H5O_unpin(H5O_t *oh);
H5_DLL herr_t H5O_dec_rc_by_loc(const H5O_loc_t *loc, hid_t dxpl_id);

View File

@ -108,7 +108,7 @@ H5O_is_attr_dense_test(hid_t oid)
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found")
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Check for attribute info stored */
@ -173,7 +173,7 @@ H5O_is_attr_empty_test(hid_t oid)
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found")
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Check for attribute info stored */
@ -266,7 +266,7 @@ H5O_num_attrs_test(hid_t oid, hsize_t *nattrs)
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found")
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Check for attribute info stored */
@ -361,7 +361,7 @@ H5O_attr_dense_info_test(hid_t oid, hsize_t *name_count, hsize_t *corder_count)
H5_BEGIN_TAG(H5AC_ind_dxpl_id, loc->addr, FAIL);
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR_TAG(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Check for attribute info stored */
@ -452,7 +452,7 @@ H5O_check_msg_marked_test(hid_t oid, hbool_t flag_val)
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "object not found")
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, H5AC_ind_dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to load object header")
/* Locate "unknown" message */
@ -511,7 +511,7 @@ H5O_expunge_chunks_test(const H5O_loc_t *loc, hid_t dxpl_id)
FUNC_ENTER_NOAPI(FAIL)
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_WRITE)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Safety check */
@ -571,7 +571,7 @@ H5O_get_rc(const H5O_loc_t *loc, hid_t dxpl_id, unsigned *rc)
HDassert(rc);
/* Get the object header */
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
/* Save the refcount for the object header */

View File

@ -358,7 +358,7 @@ H5SM_type_shared(H5F_t *f, unsigned type_id, hid_t dxpl_id)
/* Set up user data for callback */
cache_udata.f = f;
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_READ)))
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
} /* end if */
else
@ -412,7 +412,7 @@ H5SM_get_fheap_addr(H5F_t *f, hid_t dxpl_id, unsigned type_id, haddr_t *fheap_ad
cache_udata.f = f;
/* Look up the master SOHM table */
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_READ)))
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Look up index for message type */
@ -831,7 +831,7 @@ H5SM_convert_btree_to_list(H5F_t * f, H5SM_index_header_t * header, hid_t dxpl_i
cache_udata.header = header;
/* Protect the SOHM list */
if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC_WRITE)))
if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM list index")
/* Delete the B-tree and have messages copy themselves to the
@ -939,7 +939,7 @@ H5SM_can_share(H5F_t *f, hid_t dxpl_id, H5SM_master_table_t *table,
/* Set up user data for callback */
cache_udata.f = f;
if(NULL == (my_table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_READ)))
if(NULL == (my_table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
} /* end if */
@ -1071,7 +1071,7 @@ H5SM_try_share(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, unsigned defer_flags,
cache_udata.f = f;
/* Look up the master SOHM table */
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_WRITE)))
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* "complex" sharing checks */
@ -1277,7 +1277,7 @@ H5SM_write_mesg(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
cache_udata.header = header;
/* The index is a list; get it from the cache */
if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, defer ? H5AC_READ : H5AC_WRITE)))
if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, defer ? H5AC__READ_ONLY_FLAG : H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index")
/* See if the message is already in the index and get its location.
@ -1530,7 +1530,7 @@ H5SM_delete(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, H5O_shared_t *sh_mesg)
cache_udata.f = f;
/* Look up the master SOHM table */
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_WRITE)))
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Find the correct index and try to delete from it */
@ -1799,7 +1799,7 @@ H5SM_delete_from_index(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
cache_udata.header = header;
/* If the index is stored as a list, get it from the cache */
if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC_WRITE)))
if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &cache_udata, H5AC__NO_FLAGS_SET)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index")
/* Find the message in the list */
@ -1970,7 +1970,7 @@ H5SM_get_info(const H5O_loc_t *ext_loc, H5P_genplist_t *fc_plist, hid_t dxpl_id)
cache_udata.f = f;
/* Read the rest of the SOHM table information from the cache */
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_READ)))
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Get index conversion limits */
@ -2133,7 +2133,7 @@ H5SM_get_refcount(H5F_t *f, hid_t dxpl_id, unsigned type_id,
tbl_cache_udata.f = f;
/* Look up the master SOHM table */
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &tbl_cache_udata, H5AC_READ)))
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &tbl_cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Find the correct index and find the message in it */
@ -2172,7 +2172,7 @@ H5SM_get_refcount(H5F_t *f, hid_t dxpl_id, unsigned type_id,
lst_cache_udata.header = header;
/* If the index is stored as a list, get it from the cache */
if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &lst_cache_udata, H5AC_READ)))
if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, header->index_addr, &lst_cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index")
/* Find the message in the list */
@ -2375,7 +2375,7 @@ H5SM_read_mesg(H5F_t *f, const H5SM_sohm_t *mesg, H5HF_t *fheap,
HGOTO_ERROR(H5E_SOHM, H5E_CANTLOAD, FAIL, "unable to open object header")
/* Load the object header from the cache */
if(NULL == (oh = H5O_protect(&oloc, dxpl_id, H5AC_READ)))
if(NULL == (oh = H5O_protect(&oloc, dxpl_id, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load object header")
} /* end if */
else
@ -2530,7 +2530,7 @@ H5SM_table_debug(H5F_t *f, hid_t dxpl_id, haddr_t table_addr,
cache_udata.f = f;
/* Look up the master SOHM table */
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, table_addr, &cache_udata, H5AC_READ)))
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, table_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
HDfprintf(stream, "%*sShared Message Master Table...\n", indent, "");
@ -2616,7 +2616,7 @@ H5SM_list_debug(H5F_t *f, hid_t dxpl_id, haddr_t list_addr,
cache_udata.header = &header;
/* Get the list from the cache */
if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, list_addr, &cache_udata, H5AC_READ)))
if(NULL == (list = (H5SM_list_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_LIST, list_addr, &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM index")
HDfprintf(stream, "%*sShared Message List Index...\n", indent, "");
@ -2693,7 +2693,7 @@ H5SM_ih_size(H5F_t *f, hid_t dxpl_id, hsize_t *hdr_size, H5_ih_info_t *ih_info)
cache_udata.f = f;
/* Look up the master SOHM table */
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_READ)))
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Get SOHM header size */

File diff suppressed because it is too large Load Diff

View File

@ -98,7 +98,7 @@ H5SM_get_mesg_count_test(H5F_t *f, hid_t dxpl_id, unsigned type_id,
cache_udata.f = f;
/* Look up the master SOHM table */
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC_READ)))
if(NULL == (table = (H5SM_master_table_t *)H5AC_protect(f, dxpl_id, H5AC_SOHM_TABLE, H5F_SOHM_ADDR(f), &cache_udata, H5AC__READ_ONLY_FLAG)))
HGOTO_ERROR(H5E_SOHM, H5E_CANTPROTECT, FAIL, "unable to load SOHM master table")
/* Find the correct index for this message type */

File diff suppressed because it is too large Load Diff

View File

@ -25,6 +25,9 @@
#include "H5ACprivate.h"
#include "cache_common.h"
/* extern declarations */
extern const char *FILENAME[];
/* global variable declarations: */

File diff suppressed because it is too large Load Diff

View File

@ -138,12 +138,14 @@
* directly pinned by a single entry.
*/
#define FLUSH_OP__NO_OP 0
#define FLUSH_OP__DIRTY 1
#define FLUSH_OP__RESIZE 2
#define FLUSH_OP__MOVE 3
#define FLUSH_OP__ORDER 4
#define FLUSH_OP__MAX_OP 4
#define FLUSH_OP__NO_OP 0
#define FLUSH_OP__DIRTY 1
#define FLUSH_OP__RESIZE 2
#define FLUSH_OP__MOVE 3
#define FLUSH_OP__ORDER 4
#define FLUSH_OP__EXPUNGE 5
#define FLUSH_OP__DEST_FLUSH_DEP 6
#define FLUSH_OP__MAX_OP 6
#define MAX_FLUSH_OPS 10 /* Maximum number of flush operations
* that can be associated with a
@ -214,10 +216,32 @@ typedef struct test_entry_t
struct test_entry_t * self; /* pointer to this entry -- used for
* sanity checking.
*/
H5F_t * file_ptr; /* pointer to the file in which the
* entry resides, or NULL if the entry
* is not in a file.
*/
H5C_t * cache_ptr; /* pointer to the cache in which
* the entry resides, or NULL if the
* entry is not in cache.
*/
hbool_t written_to_main_addr;
/* Flag indicating whether an image
* of the entry has been written to
* its main address. Since we no
* longer have a flush callback, we
* set this field to true whenever the
* entry is serialized while at its
* main address.
*/
hbool_t written_to_alt_addr;
/* Flag indicating whether an image
* of the entry has been written to
* its alternate address. Since we no
* longer have a flush callback, we
* set this field to true whenever the
* entry is serialized while at its
* alternate address.
*/
haddr_t addr; /* where the cache thinks this entry
* is located
*/
@ -239,11 +263,11 @@ typedef struct test_entry_t
*/
int32_t index; /* index in its entry array
*/
int32_t reads; /* number of times this entry has
* been loaded.
int32_t serializes; /* number of times this entry has
* been serialized.
*/
int32_t writes; /* number of times this entry has
* been written
int32_t deserializes; /* number of times this entry has
* been deserialized
*/
hbool_t is_dirty; /* entry has been modified since
* last write
@ -302,18 +326,18 @@ typedef struct test_entry_t
* checking code that would otherwise
* cause a false test failure.
*/
hbool_t loaded; /* entry has been loaded since the
* last time it was reset.
hbool_t deserialized; /* entry has been deserialized since
* the last time it was reset.
*/
hbool_t cleared; /* entry has been cleared since the
* last time it was reset.
*/
hbool_t flushed; /* entry has been flushed since the
hbool_t serialized; /* entry has been serialized since the
* last time it was reset.
*/
hbool_t destroyed; /* entry has been destroyed since the
* last time it was reset.
*/
hbool_t expunged; /* entry has been expunged since the
* last time it was reset.
*/
int flush_dep_par_type; /* Entry type of flush dependency parent */
int flush_dep_par_idx; /* Index of flush dependency parent */
uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS];
@ -471,9 +495,6 @@ if ( ( (cache_ptr) == NULL ) || \
(i).flash_threshold = (e).flash_threshold; \
(i).decr_mode = (e).decr_mode; \
(i).upper_hr_threshold = (e).upper_hr_threshold; \
(i).flash_incr_mode = (e).flash_incr_mode; \
(i).flash_multiple = (e).flash_multiple; \
(i).flash_threshold = (e).flash_threshold; \
(i).decrement = (e).decrement; \
(i).apply_max_decrement = (e).apply_max_decrement; \
(i).max_decrement = (e).max_decrement; \
@ -490,15 +511,14 @@ struct expected_entry_status
int entry_type;
int entry_index;
size_t size;
unsigned char in_cache;
unsigned char at_main_addr;
unsigned char is_dirty;
unsigned char is_protected;
unsigned char is_pinned;
unsigned char loaded;
unsigned char cleared;
unsigned char flushed;
unsigned char destroyed;
hbool_t in_cache;
hbool_t at_main_addr;
hbool_t is_dirty;
hbool_t is_protected;
hbool_t is_pinned;
hbool_t deserialized;
hbool_t serialized;
hbool_t destroyed;
int flush_dep_par_type; /* Entry type of flush dependency parent */
int flush_dep_par_idx; /* Index of flush dependency parent */
uint64_t child_flush_dep_height_rc[H5C__NUM_FLUSH_DEP_HEIGHTS];
@ -514,12 +534,9 @@ struct expected_entry_status
/* global variable externs: */
extern const char *FILENAME[3];
extern haddr_t saved_actual_base_addr;
extern hbool_t write_permitted;
extern hbool_t pass; /* set to false on error */
extern hbool_t skip_long_tests;
extern hbool_t run_full_test;
extern const char *failure_mssg;
extern test_entry_t * entries[NUMBER_OF_ENTRY_TYPES];
@ -609,6 +626,7 @@ void resize_entry(H5F_t * file_ptr,
H5F_t *setup_cache(size_t max_cache_size, size_t min_clean_size);
void row_major_scan_forward(H5F_t * file_ptr,
int32_t max_index,
int32_t lag,
hbool_t verbose,
hbool_t reset_stats,
@ -631,6 +649,7 @@ void hl_row_major_scan_forward(H5F_t * file_ptr,
hbool_t do_inserts);
void row_major_scan_backward(H5F_t * file_ptr,
int32_t max_index,
int32_t lag,
hbool_t verbose,
hbool_t reset_stats,
@ -653,6 +672,7 @@ void hl_row_major_scan_backward(H5F_t * file_ptr,
hbool_t do_inserts);
void col_major_scan_forward(H5F_t * file_ptr,
int32_t max_index,
int32_t lag,
hbool_t verbose,
hbool_t reset_stats,
@ -671,6 +691,7 @@ void hl_col_major_scan_forward(H5F_t * file_ptr,
int dirty_unprotects);
void col_major_scan_backward(H5F_t * file_ptr,
int32_t max_index,
int32_t lag,
hbool_t verbose,
hbool_t reset_stats,
@ -747,5 +768,10 @@ void validate_mdc_config(hid_t file_id,
hbool_t compare_init,
int test_num);
/** Debugging functions -- normally commented out ***/
#if 0
void dump_LRU(H5F_t * file_ptr);
#endif
#endif /* _CACHE_COMMON_H */

View File

@ -457,7 +457,7 @@ static int evict_entries(hid_t fid)
/* Evict all we can from the cache to examine full tag creation tree */
/* This function will likely return failure since the root group
* is still protected. Thus, don't check its return value. */
H5C_flush_cache(f, H5P_DEFAULT, H5P_DEFAULT, H5C__FLUSH_INVALIDATE_FLAG);
H5C_flush_cache(f, H5P_DEFAULT, H5C__FLUSH_INVALIDATE_FLAG);
return 0;
@ -3866,13 +3866,13 @@ check_invalid_tag_application(void)
/* Call H5HL_protect to protect the local heap created above. */
/* This should fail as no tag is set up during the protect call */
if (( lheap = H5HL_protect(f, H5AC_ind_dxpl_id, addr, H5AC_WRITE)) != NULL ) TEST_ERROR;
if (( lheap = H5HL_protect(f, H5AC_ind_dxpl_id, addr, H5AC__NO_FLAGS_SET)) != NULL ) TEST_ERROR;
/* Again, set up a valid tag in the DXPL */
if ( H5AC_tag(H5AC_ind_dxpl_id, (haddr_t)25, NULL) < 0) TEST_ERROR;
/* Call H5HL_protect again to protect the local heap. This should succeed. */
if (( lheap = H5HL_protect(f, H5AC_ind_dxpl_id, addr, H5AC_WRITE)) == NULL ) TEST_ERROR;
if (( lheap = H5HL_protect(f, H5AC_ind_dxpl_id, addr, H5AC__NO_FLAGS_SET)) == NULL ) TEST_ERROR;
/* Now unprotect the heap, as we're done with the test. */
if ( H5HL_unprotect(lheap) < 0 ) TEST_ERROR;

View File

@ -160,11 +160,26 @@ typedef struct earray_test_t {
/* Local prototypes */
/* Metadata cache (H5AC) callbacks */
static earray_test_t *earray_cache_test_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, const void *udata, void *udata2);
static herr_t earray_cache_test_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t addr, earray_test_t *test, unsigned * flags_ptr);
static herr_t earray_cache_test_clear(H5F_t *f, earray_test_t *test, hbool_t destroy);
static herr_t earray_cache_test_size(const H5F_t *f, const earray_test_t *test, size_t *size_ptr);
static herr_t earray_cache_test_dest(H5F_t *f, earray_test_t *test);
static herr_t earray_cache_test_get_load_size(const void *udata_ptr,
size_t *image_len_ptr);
static void *earray_cache_test_deserialize(const void *image_ptr,
size_t len,
void *udata_ptr,
hbool_t *dirty_ptr);
static herr_t earray_cache_test_image_len(const void *thing,
size_t *image_len_ptr,
hbool_t *compressed_ptr,
size_t * compressed_len_ptr);
static herr_t earray_cache_test_serialize(const H5F_t *f,
void *image_ptr,
size_t len,
void *thing);
static herr_t earray_cache_test_free_icr(void *thing);
/* Local variables */
@ -181,13 +196,19 @@ h5_stat_size_t empty_size_g;
/* H5EA test object inherits cache-like properties from H5AC */
const H5AC_class_t H5AC_EARRAY_TEST[1] = {{
H5AC_TEST_ID,
(H5AC_load_func_t)earray_cache_test_load,
(H5AC_flush_func_t)earray_cache_test_flush,
(H5AC_dest_func_t)earray_cache_test_dest,
(H5AC_clear_func_t)earray_cache_test_clear,
(H5AC_notify_func_t)NULL,
(H5AC_size_func_t)earray_cache_test_size,
/* id */ H5AC_TEST_ID,
/* name */ "earray test",
/* mem_type */ H5FD_MEM_DEFAULT,
/* flags */ H5AC__CLASS_NO_IO_FLAG,
/* get_load_size */ (H5AC_get_load_size_func_t)earray_cache_test_get_load_size,
/* deserialize */ (H5AC_deserialize_func_t)earray_cache_test_deserialize,
/* image_len */ (H5AC_image_len_func_t)earray_cache_test_image_len,
/* pre_serialize */ (H5AC_pre_serialize_func_t)NULL,
/* serialize */ (H5AC_serialize_func_t)earray_cache_test_serialize,
/* notify */ (H5AC_notify_func_t)NULL,
/* free_icr */ (H5AC_free_icr_func_t)earray_cache_test_free_icr,
/* clear */ NULL,
/* fsf_size */ NULL,
}};
@ -610,196 +631,244 @@ error:
/*-------------------------------------------------------------------------
* Function: earray_cache_test_load
* Function: earray_cache_test_get_load_size()
*
* Purpose: Loads an extensible array test object from the disk.
* Purpose: place holder function -- should never be called
*
* Return: Success: Pointer to a new extensible array test object
* Failure: NULL
*
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
* May 26 2009
* A generic discussion of metadata cache callbacks of this type
* may be found in H5Cprivate.h:
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Programmer: John Mainzer
* 8/2/14
*
*-------------------------------------------------------------------------
*/
static earray_test_t *
earray_cache_test_load(H5F_t H5_ATTR_UNUSED *f, hid_t H5_ATTR_UNUSED dxpl_id, haddr_t H5_ATTR_UNUSED addr, const void H5_ATTR_UNUSED *udata1, void H5_ATTR_UNUSED *udata2)
static herr_t
earray_cache_test_get_load_size(const void *udata_ptr, size_t *image_len_ptr)
{
/* Check arguments */
HDassert(f);
HDassert(H5F_addr_defined(addr));
HDassert(udata_ptr);
HDassert(image_len_ptr);
/* Should never be called */
HDassert(0 && "Can't be called!");
*image_len_ptr = 0;
return(SUCCEED);
} /* end earray_cache_test_get_load_size() */
/*-------------------------------------------------------------------------
* Function: earray_cache_test_deserialize
*
* Purpose: place holder function -- should never be called.
*
*
* A generic discussion of metadata cache callbacks of this type
* may be found in H5Cprivate.h:
*
* Return: Success: Pointer to in core representation
* Failure: NULL
*
* Programmer: John Mainzer
* 8/2/14
*
*-------------------------------------------------------------------------
*/
static void *
earray_cache_test_deserialize(const void *image_ptr,
size_t len,
void *udata_ptr,
hbool_t *dirty_ptr)
{
HDassert(image_ptr);
HDassert(len > 0 );
HDassert(udata_ptr);
HDassert(dirty_ptr);
/* Should never be called */
HDassert(0 && "Can't be called!");
return(NULL);
} /* end earray_cache_test_load() */
} /* end earray_cache_test_deserialize() */
/*-------------------------------------------------------------------------
* Function: earray_cache_test_flush
* Function: earray_cache_test_image_len
*
* Purpose: Flushes a dirty extensible array test object to disk.
* Purpose: test code place holder function -- just set *image_len_ptr to
* one.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
* May 26 2009
* A generic discussion of metadata cache callbacks of this type
* may be found in H5Cprivate.h:
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Programmer: John Mainzer
* 8/2/14
*
*-------------------------------------------------------------------------
*/
static herr_t
earray_cache_test_flush(H5F_t H5_ATTR_UNUSED *f, hid_t H5_ATTR_UNUSED dxpl_id, hbool_t destroy, haddr_t H5_ATTR_UNUSED addr, earray_test_t *test, unsigned H5_ATTR_UNUSED * flags_ptr)
earray_cache_test_image_len(const void *thing, size_t *image_len_ptr,
hbool_t H5_ATTR_UNUSED * compressed_ptr, size_t H5_ATTR_UNUSED * compressed_len_ptr)
{
/* check arguments */
HDassert(thing);
HDassert(image_len_ptr);
/* Set size value */
/* (hard-code to 1) */
*image_len_ptr = 1;
return(SUCCEED);
} /* end earray_cache_test_image_len() */
/********************************/
/* no H5O_cache_pre_serialize() */
/********************************/
/*-------------------------------------------------------------------------
* Function: earray_cache_test_serialize
*
* Purpose: Validate the contents of the instance of earray_test_t.
*
*
* A generic discussion of metadata cache callbacks of this type
* may be found in H5Cprivate.h:
*
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Programmer: John Mainzer
* 8/2/14
*
*-------------------------------------------------------------------------
*/
static herr_t
earray_cache_test_serialize(const H5F_t *f,
void *image_ptr,
H5_ATTR_UNUSED size_t len,
void *thing)
{
earray_test_t *test = NULL;
HDassert(f);
HDassert(H5F_addr_defined(addr));
HDassert(image_ptr);
HDassert(thing);
test = (earray_test_t *)thing;
HDassert(test);
HDassert(test->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert((const H5AC_class_t *)(test->cache_info.type) ==
&(H5AC_EARRAY_TEST[0]));
if(test->cache_info.is_dirty) {
/* Check for out of order flush */
if(test->fd_info->base_obj)
TEST_ERROR
/* Check which index this entry corresponds to */
if((uint64_t)0 == test->idx) {
/* Check for out of order flush */
if(test->fd_info->base_obj)
if(test->fd_info->idx0_obj || test->fd_info->idx0_elem)
TEST_ERROR
/* Check which index this entry corresponds to */
if((uint64_t)0 == test->idx) {
/* Check for out of order flush */
if(test->fd_info->idx0_obj || test->fd_info->idx0_elem)
TEST_ERROR
/* Set flag for object flush */
test->fd_info->idx0_obj = TRUE;
} /* end if */
else if((uint64_t)1 == test->idx) {
/* Check for out of order flush */
if(test->fd_info->idx1_obj || test->fd_info->idx1_elem)
TEST_ERROR
/* Set flag for object flush */
test->fd_info->idx1_obj = TRUE;
} /* end if */
else if((uint64_t)10000 == test->idx) {
/* Check for out of order flush */
if(test->fd_info->idx10000_obj || test->fd_info->idx10000_elem)
TEST_ERROR
/* Set flag for object flush */
test->fd_info->idx10000_obj = TRUE;
} /* end if */
else if((uint64_t)-1 == test->idx) {
/* Set flag for object flush */
test->fd_info->base_obj = TRUE;
} /* end if */
/* Mark the entry as clean */
test->cache_info.is_dirty = FALSE;
/* Set flag for object flush */
test->fd_info->idx0_obj = TRUE;
} /* end if */
if(destroy)
if(earray_cache_test_dest(f, test) < 0)
else if((uint64_t)1 == test->idx) {
/* Check for out of order flush */
if(test->fd_info->idx1_obj || test->fd_info->idx1_elem)
TEST_ERROR
/* Set flag for object flush */
test->fd_info->idx1_obj = TRUE;
} /* end if */
else if((uint64_t)10000 == test->idx) {
/* Check for out of order flush */
if(test->fd_info->idx10000_obj || test->fd_info->idx10000_elem)
TEST_ERROR
/* Set flag for object flush */
test->fd_info->idx10000_obj = TRUE;
} /* end if */
else if((uint64_t)-1 == test->idx) {
/* Set flag for object flush */
test->fd_info->base_obj = TRUE;
} /* end if */
return(SUCCEED);
error:
return(FAIL);
} /* earray_cache_test_flush() */
} /* end earray_cache_test_serialize() */
/******************************************/
/* no earray_cache_test_notify() function */
/******************************************/
/*-------------------------------------------------------------------------
* Function: earray_cache_test_dest
* Function: earray_cache_test_free_icr
*
* Purpose: Destroys an extensible array test object in memory.
* Purpose: Destroy an extensible array test object in memory.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
* May 26 2009
* A generic discussion of metadata cache callbacks of this type
* may be found in H5Cprivate.h:
*
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Programmer: John Mainzer
* 8/2/14
*
*-------------------------------------------------------------------------
*/
herr_t
earray_cache_test_dest(H5F_t H5_ATTR_UNUSED *f, earray_test_t *test)
static herr_t
earray_cache_test_free_icr(void *thing)
{
/*
* Check arguments.
*/
earray_test_t *test = NULL;
HDassert(thing);
test = (earray_test_t *)thing;
HDassert(test);
/* the metadata cache sets cache_info.magic to
* H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling the
* free_icr routine. Hence the following assert:
*/
HDassert(test->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
HDassert((const H5AC_class_t *)(test->cache_info.type) ==
&(H5AC_EARRAY_TEST[0]));
/* Free the shared info itself */
HDfree(test);
return(SUCCEED);
} /* end earray_cache_test_dest() */
/*-------------------------------------------------------------------------
* Function: earray_cache_test_clear
*
* Purpose: Mark an extensible array test object in memory as non-dirty.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
* May 26 2009
*
*-------------------------------------------------------------------------
*/
static herr_t
earray_cache_test_clear(H5F_t *f, earray_test_t *test, hbool_t destroy)
{
/*
* Check arguments.
*/
HDassert(test);
/* Reset the dirty flag. */
test->cache_info.is_dirty = FALSE;
if(destroy)
if(earray_cache_test_dest(f, test) < 0)
TEST_ERROR
return(SUCCEED);
error:
return(FAIL);
} /* end earray_cache_test_clear() */
/*-------------------------------------------------------------------------
* Function: earray_cache_test_size
*
* Purpose: Compute the size in bytes of an extensible array test object
* on disk, and return it in *size_ptr. On failure,
* the value of *size_ptr is undefined.
*
* Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* koziol@hdfgroup.org
* May 26 2009
*
*-------------------------------------------------------------------------
*/
static herr_t
earray_cache_test_size(const H5F_t H5_ATTR_UNUSED *f, const earray_test_t H5_ATTR_UNUSED *test, size_t *size_ptr)
{
/* check arguments */
HDassert(f);
HDassert(test);
HDassert(size_ptr);
/* Set size value */
/* (hard-code to 1) */
*size_ptr = 1;
return(SUCCEED);
} /* earray_cache_test_size() */
} /* end earray_cache_test_free_icr() */
/*-------------------------------------------------------------------------
@ -1541,7 +1610,7 @@ test_flush_depend(hid_t fapl, H5EA_create_t *cparam, earray_test_param_t H5_ATTR
TEST_ERROR
/* Protect the base entry */
if(NULL == (base_entry = (earray_test_t *)H5AC_protect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, base_addr, NULL, H5AC_WRITE)))
if(NULL == (base_entry = (earray_test_t *)H5AC_protect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, base_addr, NULL, H5AC__NO_FLAGS_SET)))
TEST_ERROR
/* Unprotect & unpin the base entry */
@ -1553,7 +1622,7 @@ test_flush_depend(hid_t fapl, H5EA_create_t *cparam, earray_test_param_t H5_ATTR
TEST_ERROR
/* Protect the test entry */
if(NULL == (entry1 = (earray_test_t *)H5AC_protect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr1, NULL, H5AC_WRITE)))
if(NULL == (entry1 = (earray_test_t *)H5AC_protect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr1, NULL, H5AC__NO_FLAGS_SET)))
TEST_ERROR
/* Unprotect & unpin the test entry */
@ -1565,7 +1634,7 @@ test_flush_depend(hid_t fapl, H5EA_create_t *cparam, earray_test_param_t H5_ATTR
TEST_ERROR
/* Protect the test entry */
if(NULL == (entry2 = (earray_test_t *)H5AC_protect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr2, NULL, H5AC_WRITE)))
if(NULL == (entry2 = (earray_test_t *)H5AC_protect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr2, NULL, H5AC__NO_FLAGS_SET)))
TEST_ERROR
/* Unprotect & unpin the test entry */
@ -1577,7 +1646,7 @@ test_flush_depend(hid_t fapl, H5EA_create_t *cparam, earray_test_param_t H5_ATTR
TEST_ERROR
/* Protect the test entry */
if(NULL == (entry3 = (earray_test_t *)H5AC_protect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr3, NULL, H5AC_WRITE)))
if(NULL == (entry3 = (earray_test_t *)H5AC_protect(f, H5P_DATASET_XFER_DEFAULT, H5AC_EARRAY_TEST, addr3, NULL, H5AC__NO_FLAGS_SET)))
TEST_ERROR
/* Unprotect & unpin the test entry */

View File

@ -94,7 +94,7 @@ main(void)
H5Eprint2(H5E_DEFAULT, stdout);
goto error;
}
if (NULL == (heap = H5HL_protect(f, H5P_DATASET_XFER_DEFAULT, heap_addr, H5AC_WRITE))) {
if (NULL == (heap = H5HL_protect(f, H5P_DATASET_XFER_DEFAULT, heap_addr, H5AC__NO_FLAGS_SET))) {
H5_FAILED();
H5Eprint2(H5E_DEFAULT, stdout);
goto error;
@ -144,7 +144,7 @@ main(void)
if(j > 4)
buf[j] = '\0';
if (NULL == (heap = H5HL_protect(f, H5P_DATASET_XFER_DEFAULT, heap_addr, H5AC_READ))) {
if (NULL == (heap = H5HL_protect(f, H5P_DATASET_XFER_DEFAULT, heap_addr, H5AC__READ_ONLY_FLAG))) {
H5_FAILED();
H5Eprint2(H5E_DEFAULT, stdout);
goto error;

View File

@ -210,7 +210,7 @@ test_ohdr_cache(char *filename, hid_t fapl)
FAIL_STACK_ERROR
/* Protect local heap (which actually pins it in the cache) */
if(NULL == (lheap = H5HL_protect(f, my_dxpl, lheap_addr, H5AC_READ)))
if(NULL == (lheap = H5HL_protect(f, my_dxpl, lheap_addr, H5AC__READ_ONLY_FLAG)))
FAIL_STACK_ERROR
/* Create an object header */
@ -230,7 +230,7 @@ test_ohdr_cache(char *filename, hid_t fapl)
FAIL_STACK_ERROR
/* Protect local heap (which actually pins it in the cache) */
if(NULL == (lheap2 = H5HL_protect(f, my_dxpl, lheap_addr2, H5AC_READ)))
if(NULL == (lheap2 = H5HL_protect(f, my_dxpl, lheap_addr2, H5AC__READ_ONLY_FLAG)))
FAIL_STACK_ERROR
/* Unprotect local heap (which actually unpins it from the cache) */
@ -247,7 +247,7 @@ test_ohdr_cache(char *filename, hid_t fapl)
FAIL_STACK_ERROR
/* Protect local heap (which actually pins it in the cache) */
if(NULL == (lheap3 = H5HL_protect(f, my_dxpl, lheap_addr3, H5AC_READ)))
if(NULL == (lheap3 = H5HL_protect(f, my_dxpl, lheap_addr3, H5AC__READ_ONLY_FLAG)))
FAIL_STACK_ERROR
/* Unprotect local heap (which actually unpins it from the cache) */

File diff suppressed because it is too large Load Diff