diff --git a/java/src/hdf/hdf5lib/package-info.java b/java/src/hdf/hdf5lib/package-info.java index 7edfcb2933..2863f5a10f 100644 --- a/java/src/hdf/hdf5lib/package-info.java +++ b/java/src/hdf/hdf5lib/package-info.java @@ -171,4 +171,4 @@ * * See also: http://hdfgroup.org/HDF5" **/ -package hdf.hdf5lib; \ No newline at end of file +package hdf.hdf5lib; diff --git a/src/H5.c b/src/H5.c index 1585dd329d..7142234cba 100644 --- a/src/H5.c +++ b/src/H5.c @@ -83,6 +83,8 @@ hbool_t H5_libinit_g = FALSE; /* Library hasn't been initialized */ hbool_t H5_libterm_g = FALSE; /* Library isn't being shutdown */ #endif +hbool_t H5_use_selection_io_g = FALSE; + #ifdef H5_HAVE_MPE hbool_t H5_MPEinit_g = FALSE; /* MPE Library hasn't been initialized */ #endif @@ -144,7 +146,8 @@ herr_t H5_init_library(void) { size_t i; - herr_t ret_value = SUCCEED; + char * env_use_select_io = NULL; + herr_t ret_value = SUCCEED; FUNC_ENTER_NOAPI(FAIL) @@ -288,6 +291,14 @@ H5_init_library(void) } /* clang-format on */ + /* Check for HDF5_USE_SELECTION_IO env variable */ + env_use_select_io = HDgetenv("HDF5_USE_SELECTION_IO"); + if (NULL != env_use_select_io && HDstrcmp(env_use_select_io, "") && HDstrcmp(env_use_select_io, "0") && + HDstrcmp(env_use_select_io, "no") && HDstrcmp(env_use_select_io, "No") && + HDstrcmp(env_use_select_io, "NO") && HDstrcmp(env_use_select_io, "false") && + HDstrcmp(env_use_select_io, "False") && HDstrcmp(env_use_select_io, "FALSE")) + H5_use_selection_io_g = TRUE; + /* Debugging? */ H5__debug_mask("-all"); H5__debug_mask(HDgetenv("HDF5_DEBUG")); diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index e4d8706db4..8374b1730a 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -59,6 +59,7 @@ #include "H5Iprivate.h" /* IDs */ #include "H5MMprivate.h" /* Memory management */ #include "H5MFprivate.h" /* File memory management */ +#include "H5PBprivate.h" /* Page Buffer */ #include "H5VMprivate.h" /* Vector and array functions */ /****************/ @@ -70,6 +71,7 @@ #define H5D_CHUNK_GET_NODE_INFO(map, node) \ (map->use_single ? map->single_chunk_info : (H5D_chunk_info_t *)H5SL_item(node)) #define H5D_CHUNK_GET_NEXT_NODE(map, node) (map->use_single ? (H5SL_node_t *)NULL : H5SL_next(node)) +#define H5D_CHUNK_GET_NODE_COUNT(map) (map->use_single ? (size_t)1 : H5SL_count(map->sel_chunks)) /* Sanity check on chunk index types: commonly used by a lot of routines in this file */ #define H5D_CHUNK_STORAGE_INDEX_CHK(storage) \ @@ -261,8 +263,8 @@ typedef struct H5D_chunk_iter_ud_t { /* Chunked layout operation callbacks */ static herr_t H5D__chunk_construct(H5F_t *f, H5D_t *dset); static herr_t H5D__chunk_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id); -static herr_t H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *fm); +static herr_t H5D__chunk_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, + H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *fm); static herr_t H5D__chunk_io_init_selections(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, H5D_chunk_map_t *fm); static herr_t H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, @@ -304,6 +306,7 @@ static herr_t H5D__chunk_file_cb(void *elem, const H5T_t *type, unsigned ndims void *fm); static herr_t H5D__chunk_mem_cb(void *elem, const H5T_t *type, unsigned ndims, const hsize_t *coords, void *fm); +static htri_t H5D__chunk_may_use_select_io(const H5D_io_info_t *io_info); static unsigned H5D__chunk_hash_val(const H5D_shared_t *shared, const hsize_t *scaled); static herr_t H5D__chunk_flush_entry(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t reset); static herr_t H5D__chunk_cache_evict(const H5D_t *dset, H5D_rdcc_ent_t *ent, hbool_t flush); @@ -1065,16 +1068,17 @@ H5D__chunk_is_data_cached(const H5D_shared_t *shared_dset) *------------------------------------------------------------------------- */ static herr_t -H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, +H5D__chunk_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *fm) { const H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */ hssize_t old_offset[H5O_LAYOUT_NDIMS]; /* Old selection offset */ htri_t file_space_normalized = FALSE; /* File dataspace was normalized */ unsigned f_ndims; /* The number of dimensions of the file's dataspace */ - int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */ - unsigned u; /* Local index variable */ - herr_t ret_value = SUCCEED; /* Return value */ + int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */ + htri_t use_selection_io = FALSE; /* Whether to use selection I/O */ + unsigned u; /* Local index variable */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -1128,6 +1132,11 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf if (H5D__chunk_io_init_selections(io_info, type_info, fm) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to create file and memory chunk selections") + /* Check if we're performing selection I/O and save the result */ + if ((use_selection_io = H5D__chunk_may_use_select_io(io_info)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if selection I/O is possible") + io_info->use_select_io = (hbool_t)use_selection_io; + done: /* Reset the global dataspace info */ fm->file_space = NULL; @@ -2458,6 +2467,78 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__chunk_cacheable() */ +/*------------------------------------------------------------------------- + * Function: H5D__chunk_may_use_select_io + * + * Purpose: A small internal function to if it may be possible to use + * selection I/O. + * + * Return: TRUE or FALSE + * + * Programmer: Neil Fortner + * 4 May 2021 + * + *------------------------------------------------------------------------- + */ +static htri_t +H5D__chunk_may_use_select_io(const H5D_io_info_t *io_info) +{ + const H5D_t *dataset = NULL; /* Local pointer to dataset info */ + htri_t ret_value = FAIL; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity check */ + HDassert(io_info); + + dataset = io_info->dset; + HDassert(dataset); + + /* Don't use selection I/O if it's globally disabled, there is a type + * conversion, or if there are filters on the dataset (for now) */ + if (!H5_use_selection_io_g || io_info->io_ops.single_read != H5D__select_read || + dataset->shared->dcpl_cache.pline.nused > 0) + ret_value = FALSE; + else { + hbool_t page_buf_enabled; + + HDassert(io_info->io_ops.single_write == H5D__select_write); + + /* Check if the page buffer is enabled */ + if (H5PB_enabled(io_info->f_sh, H5FD_MEM_DRAW, &page_buf_enabled) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if page buffer is enabled") + if (page_buf_enabled) + ret_value = FALSE; + else { + /* Check if chunks in this dataset may be cached, if so don't use + * selection I/O (for now). Note that chunks temporarily cached for + * the purpose of writing the fill value don't count, since they are + * immediately evicted. */ +#ifdef H5_HAVE_PARALLEL + /* If MPI based VFD is used and the file is opened for write access, + * must bypass the chunk-cache scheme because other MPI processes + * could be writing to other elements in the same chunk. + */ + if (io_info->using_mpi_vfd && (H5F_ACC_RDWR & H5F_INTENT(dataset->oloc.file))) + ret_value = TRUE; + else { +#endif /* H5_HAVE_PARALLEL */ + /* Check if the chunk is too large to keep in the cache */ + H5_CHECK_OVERFLOW(dataset->shared->layout.u.chunk.size, uint32_t, size_t); + if ((size_t)dataset->shared->layout.u.chunk.size > dataset->shared->cache.chunk.nbytes_max) + ret_value = TRUE; + else + ret_value = FALSE; +#ifdef H5_HAVE_PARALLEL + } /* end else */ +#endif /* H5_HAVE_PARALLEL */ + } /* end else */ + } /* end else */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__chunk_may_use_select_io() */ + /*------------------------------------------------------------------------- * Function: H5D__chunk_read * @@ -2474,16 +2555,17 @@ static herr_t H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t H5_ATTR_UNUSED nelmts, H5S_t H5_ATTR_UNUSED *file_space, H5S_t H5_ATTR_UNUSED *mem_space, H5D_chunk_map_t *fm) { - H5SL_node_t * chunk_node; /* Current node in chunk skip list */ - H5D_io_info_t nonexistent_io_info; /* "nonexistent" I/O info object */ - H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */ - H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */ - H5D_io_info_t cpt_io_info; /* Compact I/O info object */ - H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */ - hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */ - uint32_t src_accessed_bytes = 0; /* Total accessed size in a chunk */ - hbool_t skip_missing_chunks = FALSE; /* Whether to skip missing chunks */ - herr_t ret_value = SUCCEED; /*return value */ + H5SL_node_t * chunk_node; /* Current node in chunk skip list */ + H5D_io_info_t nonexistent_io_info; /* "nonexistent" I/O info object */ + uint32_t src_accessed_bytes = 0; /* Total accessed size in a chunk */ + hbool_t skip_missing_chunks = FALSE; /* Whether to skip missing chunks */ + H5S_t ** chunk_mem_spaces = NULL; /* Array of chunk memory spaces */ + H5S_t * chunk_mem_spaces_static[8]; /* Static buffer for chunk_mem_spaces */ + H5S_t ** chunk_file_spaces = NULL; /* Array of chunk file spaces */ + H5S_t * chunk_file_spaces_static[8]; /* Static buffer for chunk_file_spaces */ + haddr_t * chunk_addrs = NULL; /* Array of chunk addresses */ + haddr_t chunk_addrs_static[8]; /* Static buffer for chunk_addrs */ + herr_t ret_value = SUCCEED; /*return value */ FUNC_ENTER_STATIC @@ -2497,23 +2579,6 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_ H5MM_memcpy(&nonexistent_io_info, io_info, sizeof(nonexistent_io_info)); nonexistent_io_info.layout_ops = *H5D_LOPS_NONEXISTENT; - /* Set up contiguous I/O info object */ - H5MM_memcpy(&ctg_io_info, io_info, sizeof(ctg_io_info)); - ctg_io_info.store = &ctg_store; - ctg_io_info.layout_ops = *H5D_LOPS_CONTIG; - - /* Initialize temporary contiguous storage info */ - H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size, - uint32_t); - - /* Set up compact I/O info object */ - H5MM_memcpy(&cpt_io_info, io_info, sizeof(cpt_io_info)); - cpt_io_info.store = &cpt_store; - cpt_io_info.layout_ops = *H5D_LOPS_COMPACT; - - /* Initialize temporary compact storage info */ - cpt_store.compact.dirty = &cpt_dirty; - { const H5O_fill_t *fill = &(io_info->dset->shared->dcpl_cache.fill); /* Fill value info */ H5D_fill_value_t fill_status; /* Fill value status */ @@ -2531,80 +2596,215 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_ skip_missing_chunks = TRUE; } - /* Iterate through nodes in chunk skip list */ - chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm); - while (chunk_node) { - H5D_chunk_info_t *chunk_info; /* Chunk information */ - H5D_chunk_ud_t udata; /* Chunk index pass-through */ + /* Different blocks depending on whether we're using selection I/O */ + if (io_info->use_select_io) { + size_t num_chunks; + size_t element_sizes[2] = {type_info->dst_type_size, 0}; + void * bufs[2] = {io_info->u.rbuf, NULL}; - /* Get the actual chunk information from the skip list node */ - chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); + /* Cache number of chunks */ + num_chunks = H5D_CHUNK_GET_NODE_COUNT(fm); - /* Get the info for the chunk in the file */ - if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") - - /* Sanity check */ - HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || - (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0)); - - /* Check for non-existent chunk & skip it if appropriate */ - if (H5F_addr_defined(udata.chunk_block.offset) || UINT_MAX != udata.idx_hint || - !skip_missing_chunks) { - H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */ - void * chunk = NULL; /* Pointer to locked chunk buffer */ - htri_t cacheable; /* Whether the chunk is cacheable */ - - /* Set chunk's [scaled] coordinates */ - io_info->store->chunk.scaled = chunk_info->scaled; - - /* Determine if we should use the chunk cache */ - if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable") - if (cacheable) { - /* Load the chunk into cache and lock it. */ - - /* Compute # of bytes accessed in chunk */ - H5_CHECK_OVERFLOW(type_info->src_type_size, /*From:*/ size_t, /*To:*/ uint32_t); - src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size; - - /* Lock the chunk into the cache */ - if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE))) - HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") - - /* Set up the storage buffer information for this chunk */ - cpt_store.compact.buf = chunk; - - /* Point I/O info at contiguous I/O info for this chunk */ - chk_io_info = &cpt_io_info; - } /* end if */ - else if (H5F_addr_defined(udata.chunk_block.offset)) { - /* Set up the storage address information for this chunk */ - ctg_store.contig.dset_addr = udata.chunk_block.offset; - - /* Point I/O info at temporary I/O info for this chunk */ - chk_io_info = &ctg_io_info; - } /* end else if */ - else { - /* Point I/O info at "nonexistent" I/O info for this chunk */ - chk_io_info = &nonexistent_io_info; - } /* end else */ - - /* Perform the actual read operation */ - if ((io_info->io_ops.single_read)(chk_io_info, type_info, (hsize_t)chunk_info->chunk_points, - chunk_info->fspace, chunk_info->mspace) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed") - - /* Release the cache lock on the chunk. */ - if (chunk && H5D__chunk_unlock(io_info, &udata, FALSE, chunk, src_accessed_bytes) < 0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk") + /* Allocate arrays of dataspaces and offsets for use with selection I/O, + * or point to static buffers */ + HDassert(sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]) == + sizeof(chunk_file_spaces_static) / sizeof(chunk_file_spaces_static[0])); + HDassert(sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]) == + sizeof(chunk_addrs_static) / sizeof(chunk_addrs_static[0])); + if (num_chunks > (sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]))) { + if (NULL == (chunk_mem_spaces = H5MM_malloc(num_chunks * sizeof(H5S_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for memory space list") + if (NULL == (chunk_file_spaces = H5MM_malloc(num_chunks * sizeof(H5S_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for file space list") + if (NULL == (chunk_addrs = H5MM_malloc(num_chunks * sizeof(haddr_t)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for chunk address list") } /* end if */ + else { + chunk_mem_spaces = chunk_mem_spaces_static; + chunk_file_spaces = chunk_file_spaces_static; + chunk_addrs = chunk_addrs_static; + } /* end else */ - /* Advance to next chunk in list */ - chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node); - } /* end while */ + /* Reset num_chunks */ + num_chunks = 0; + + /* Iterate through nodes in chunk skip list */ + chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm); + while (chunk_node) { + H5D_chunk_info_t *chunk_info; /* Chunk information */ + H5D_chunk_ud_t udata; /* Chunk index pass-through */ + + /* Get the actual chunk information from the skip list node */ + chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); + + /* Get the info for the chunk in the file */ + if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + + /* There should be no chunks cached */ + HDassert(UINT_MAX == udata.idx_hint); + + /* Sanity check */ + HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || + (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0)); + + /* Check for non-existent chunk & skip it if appropriate */ + if (H5F_addr_defined(udata.chunk_block.offset)) { + /* Add chunk to list for selection I/O */ + chunk_mem_spaces[num_chunks] = chunk_info->mspace; + chunk_file_spaces[num_chunks] = chunk_info->fspace; + chunk_addrs[num_chunks] = udata.chunk_block.offset; + num_chunks++; + } /* end if */ + else if (!skip_missing_chunks) { + /* Perform the actual read operation from the nonexistent chunk + */ + if ((io_info->io_ops.single_read)(&nonexistent_io_info, type_info, + (hsize_t)chunk_info->chunk_points, chunk_info->fspace, + chunk_info->mspace) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed") + } /* end if */ + + /* Advance to next chunk in list */ + chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node); + } /* end while */ + + /* Issue selection I/O call (we can skip the page buffer because we've + * already verified it won't be used, and the metadata accumulator + * because this is raw data) */ + if (H5F_shared_select_read(H5F_SHARED(io_info->dset->oloc.file), H5FD_MEM_DRAW, (uint32_t)num_chunks, + chunk_mem_spaces, chunk_file_spaces, chunk_addrs, element_sizes, bufs) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunk selection read failed") + + /* Clean up memory */ + if (chunk_mem_spaces != chunk_mem_spaces_static) { + HDassert(chunk_mem_spaces); + HDassert(chunk_file_spaces != chunk_file_spaces_static); + HDassert(chunk_addrs != chunk_addrs_static); + H5MM_free(chunk_mem_spaces); + chunk_mem_spaces = NULL; + H5MM_free(chunk_file_spaces); + chunk_file_spaces = NULL; + H5MM_free(chunk_addrs); + chunk_addrs = NULL; + } /* end if */ + } /* end if */ + else { + H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */ + H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */ + H5D_io_info_t cpt_io_info; /* Compact I/O info object */ + H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */ + hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */ + + /* Set up contiguous I/O info object */ + H5MM_memcpy(&ctg_io_info, io_info, sizeof(ctg_io_info)); + ctg_io_info.store = &ctg_store; + ctg_io_info.layout_ops = *H5D_LOPS_CONTIG; + + /* Initialize temporary contiguous storage info */ + H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size, + uint32_t); + + /* Set up compact I/O info object */ + H5MM_memcpy(&cpt_io_info, io_info, sizeof(cpt_io_info)); + cpt_io_info.store = &cpt_store; + cpt_io_info.layout_ops = *H5D_LOPS_COMPACT; + + /* Initialize temporary compact storage info */ + cpt_store.compact.dirty = &cpt_dirty; + + /* Iterate through nodes in chunk skip list */ + chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm); + while (chunk_node) { + H5D_chunk_info_t *chunk_info; /* Chunk information */ + H5D_chunk_ud_t udata; /* Chunk index pass-through */ + htri_t cacheable; /* Whether the chunk is cacheable */ + + /* Get the actual chunk information from the skip list node */ + chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); + + /* Get the info for the chunk in the file */ + if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + + /* Sanity check */ + HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || + (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0)); + + /* Check for non-existent chunk & skip it if appropriate */ + if (H5F_addr_defined(udata.chunk_block.offset) || UINT_MAX != udata.idx_hint || + !skip_missing_chunks) { + H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */ + void * chunk = NULL; /* Pointer to locked chunk buffer */ + + /* Set chunk's [scaled] coordinates */ + io_info->store->chunk.scaled = chunk_info->scaled; + + /* Determine if we should use the chunk cache */ + if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable") + if (cacheable) { + /* Load the chunk into cache and lock it. */ + + /* Compute # of bytes accessed in chunk */ + H5_CHECK_OVERFLOW(type_info->src_type_size, /*From:*/ size_t, /*To:*/ uint32_t); + src_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->src_type_size; + + /* Lock the chunk into the cache */ + if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, FALSE, FALSE))) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") + + /* Set up the storage buffer information for this chunk */ + cpt_store.compact.buf = chunk; + + /* Point I/O info at contiguous I/O info for this chunk */ + chk_io_info = &cpt_io_info; + } /* end if */ + else if (H5F_addr_defined(udata.chunk_block.offset)) { + /* Set up the storage address information for this chunk */ + ctg_store.contig.dset_addr = udata.chunk_block.offset; + + /* Point I/O info at temporary I/O info for this chunk */ + chk_io_info = &ctg_io_info; + } /* end else if */ + else { + /* Point I/O info at "nonexistent" I/O info for this chunk */ + chk_io_info = &nonexistent_io_info; + } /* end else */ + + /* Perform the actual read operation */ + if ((io_info->io_ops.single_read)(chk_io_info, type_info, (hsize_t)chunk_info->chunk_points, + chunk_info->fspace, chunk_info->mspace) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked read failed") + + /* Release the cache lock on the chunk. */ + if (chunk && H5D__chunk_unlock(io_info, &udata, FALSE, chunk, src_accessed_bytes) < 0) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk") + } /* end if */ + + /* Advance to next chunk in list */ + chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node); + } /* end while */ + } /* end else */ done: + /* Cleanup on failure */ + if (ret_value < 0) { + if (chunk_mem_spaces != chunk_mem_spaces_static) + chunk_mem_spaces = H5MM_xfree(chunk_mem_spaces); + if (chunk_file_spaces != chunk_file_spaces_static) + chunk_file_spaces = H5MM_xfree(chunk_file_spaces); + if (chunk_addrs != chunk_addrs_static) + chunk_addrs = H5MM_xfree(chunk_addrs); + } /* end if */ + + /* Make sure we cleaned up */ + HDassert(!chunk_mem_spaces || chunk_mem_spaces == chunk_mem_spaces_static); + HDassert(!chunk_file_spaces || chunk_file_spaces == chunk_file_spaces_static); + HDassert(!chunk_addrs || chunk_addrs == chunk_addrs_static); + FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_read() */ @@ -2624,14 +2824,20 @@ static herr_t H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t H5_ATTR_UNUSED nelmts, H5S_t H5_ATTR_UNUSED *file_space, H5S_t H5_ATTR_UNUSED *mem_space, H5D_chunk_map_t *fm) { - H5SL_node_t * chunk_node; /* Current node in chunk skip list */ - H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */ - H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */ - H5D_io_info_t cpt_io_info; /* Compact I/O info object */ - H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */ - hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */ - uint32_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */ - herr_t ret_value = SUCCEED; /* Return value */ + H5SL_node_t * chunk_node; /* Current node in chunk skip list */ + H5D_io_info_t ctg_io_info; /* Contiguous I/O info object */ + H5D_storage_t ctg_store; /* Chunk storage information as contiguous dataset */ + H5D_io_info_t cpt_io_info; /* Compact I/O info object */ + H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */ + hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */ + uint32_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */ + H5S_t ** chunk_mem_spaces = NULL; /* Array of chunk memory spaces */ + H5S_t * chunk_mem_spaces_static[8]; /* Static buffer for chunk_mem_spaces */ + H5S_t ** chunk_file_spaces = NULL; /* Array of chunk file spaces */ + H5S_t * chunk_file_spaces_static[8]; /* Static buffer for chunk_file_spaces */ + haddr_t * chunk_addrs = NULL; /* Array of chunk addresses */ + haddr_t chunk_addrs_static[8]; /* Static buffer for chunk_addrs */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -2658,116 +2864,295 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize /* Initialize temporary compact storage info */ cpt_store.compact.dirty = &cpt_dirty; - /* Iterate through nodes in chunk skip list */ - chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm); - while (chunk_node) { - H5D_chunk_info_t * chunk_info; /* Chunk information */ - H5D_chk_idx_info_t idx_info; /* Chunked index info */ - H5D_io_info_t * chk_io_info; /* Pointer to I/O info object for this chunk */ - void * chunk; /* Pointer to locked chunk buffer */ - H5D_chunk_ud_t udata; /* Index pass-through */ - htri_t cacheable; /* Whether the chunk is cacheable */ - hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */ + /* Different blocks depending on whether we're using selection I/O */ + if (io_info->use_select_io) { + size_t num_chunks; + size_t element_sizes[2] = {type_info->dst_type_size, 0}; + const void *bufs[2] = {io_info->u.wbuf, NULL}; - /* Get the actual chunk information from the skip list node */ - chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); + /* Cache number of chunks */ + num_chunks = H5D_CHUNK_GET_NODE_COUNT(fm); - /* Look up the chunk */ - if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") - - /* Sanity check */ - HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || - (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0)); - - /* Set chunk's [scaled] coordinates */ - io_info->store->chunk.scaled = chunk_info->scaled; - - /* Determine if we should use the chunk cache */ - if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable") - if (cacheable) { - /* Load the chunk into cache. But if the whole chunk is written, - * simply allocate space instead of load the chunk. */ - hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */ - - /* Compute # of bytes accessed in chunk */ - H5_CHECK_OVERFLOW(type_info->dst_type_size, /*From:*/ size_t, /*To:*/ uint32_t); - dst_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->dst_type_size; - - /* Determine if we will access all the data in the chunk */ - if (dst_accessed_bytes != ctg_store.contig.dset_size || - (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size || - fm->fsel_type == H5S_SEL_POINTS) - entire_chunk = FALSE; - - /* Lock the chunk into the cache */ - if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE))) - HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") - - /* Set up the storage buffer information for this chunk */ - cpt_store.compact.buf = chunk; - - /* Point I/O info at main I/O info for this chunk */ - chk_io_info = &cpt_io_info; + /* Allocate arrays of dataspaces and offsets for use with selection I/O, + * or point to static buffers */ + HDassert(sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]) == + sizeof(chunk_file_spaces_static) / sizeof(chunk_file_spaces_static[0])); + HDassert(sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]) == + sizeof(chunk_addrs_static) / sizeof(chunk_addrs_static[0])); + if (num_chunks > (sizeof(chunk_mem_spaces_static) / sizeof(chunk_mem_spaces_static[0]))) { + if (NULL == (chunk_mem_spaces = H5MM_malloc(num_chunks * sizeof(H5S_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for memory space list") + if (NULL == (chunk_file_spaces = H5MM_malloc(num_chunks * sizeof(H5S_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for file space list") + if (NULL == (chunk_addrs = H5MM_malloc(num_chunks * sizeof(haddr_t)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for chunk address list") } /* end if */ else { - /* If the chunk hasn't been allocated on disk, do so now. */ - if (!H5F_addr_defined(udata.chunk_block.offset)) { - /* Compose chunked index info struct */ - idx_info.f = io_info->dset->oloc.file; - idx_info.pline = &(io_info->dset->shared->dcpl_cache.pline); - idx_info.layout = &(io_info->dset->shared->layout.u.chunk); - idx_info.storage = &(io_info->dset->shared->layout.storage.u.chunk); + chunk_mem_spaces = chunk_mem_spaces_static; + chunk_file_spaces = chunk_file_spaces_static; + chunk_addrs = chunk_addrs_static; + } /* end else */ - /* Set up the size of chunk for user data */ - udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size; + /* Reset num_chunks */ + num_chunks = 0; - /* Allocate the chunk */ - if (H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, - chunk_info->scaled) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, - "unable to insert/resize chunk on chunk level") + /* Iterate through nodes in chunk skip list */ + chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm); + while (chunk_node) { + H5D_chunk_info_t * chunk_info; /* Chunk information */ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + H5D_chunk_ud_t udata; /* Index pass-through */ + htri_t cacheable; /* Whether the chunk is cacheable */ + hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */ - /* Make sure the address of the chunk is returned. */ - if (!H5F_addr_defined(udata.chunk_block.offset)) - HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined") + /* Get the actual chunk information from the skip list node */ + chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); - /* Cache the new chunk information */ - H5D__chunk_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, &udata); + /* Get the info for the chunk in the file */ + if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + + /* There should be no chunks cached */ + HDassert(UINT_MAX == udata.idx_hint); + + /* Sanity check */ + HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || + (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0)); + + /* Set chunk's [scaled] coordinates */ + io_info->store->chunk.scaled = chunk_info->scaled; + + /* Determine if we should use the chunk cache */ + if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable") + if (cacheable) { + /* Load the chunk into cache. But if the whole chunk is written, + * simply allocate space instead of load the chunk. */ + void * chunk; /* Pointer to locked chunk buffer */ + hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */ + + /* Compute # of bytes accessed in chunk */ + H5_CHECK_OVERFLOW(type_info->dst_type_size, /*From:*/ size_t, /*To:*/ uint32_t); + dst_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->dst_type_size; + + /* Determine if we will access all the data in the chunk */ + if (dst_accessed_bytes != ctg_store.contig.dset_size || + (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size || + fm->fsel_type == H5S_SEL_POINTS) + entire_chunk = FALSE; + + /* Lock the chunk into the cache */ + if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE))) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") + + /* Set up the storage buffer information for this chunk */ + cpt_store.compact.buf = chunk; + + /* Perform the actual write operation */ + if ((io_info->io_ops.single_write)(&cpt_io_info, type_info, (hsize_t)chunk_info->chunk_points, + chunk_info->fspace, chunk_info->mspace) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed") + + /* Release the cache lock on the chunk */ + if (H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk") } /* end if */ + else { + /* If the chunk hasn't been allocated on disk, do so now. */ + if (!H5F_addr_defined(udata.chunk_block.offset)) { + /* Compose chunked index info struct */ + idx_info.f = io_info->dset->oloc.file; + idx_info.pline = &(io_info->dset->shared->dcpl_cache.pline); + idx_info.layout = &(io_info->dset->shared->layout.u.chunk); + idx_info.storage = &(io_info->dset->shared->layout.storage.u.chunk); - /* Set up the storage address information for this chunk */ - ctg_store.contig.dset_addr = udata.chunk_block.offset; + /* Set up the size of chunk for user data */ + udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size; - /* No chunk cached */ - chunk = NULL; + /* Allocate the chunk */ + if (H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, + chunk_info->scaled) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, + "unable to insert/resize chunk on chunk level") - /* Point I/O info at temporary I/O info for this chunk */ - chk_io_info = &ctg_io_info; - } /* end else */ + /* Make sure the address of the chunk is returned. */ + if (!H5F_addr_defined(udata.chunk_block.offset)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined") - /* Perform the actual write operation */ - if ((io_info->io_ops.single_write)(chk_io_info, type_info, (hsize_t)chunk_info->chunk_points, - chunk_info->fspace, chunk_info->mspace) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed") + /* Cache the new chunk information */ + H5D__chunk_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, &udata); - /* Release the cache lock on the chunk, or insert chunk into index. */ - if (chunk) { - if (H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk") + /* Insert chunk into index */ + if (need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert) + if ((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, + NULL) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, + "unable to insert chunk addr into index") + } /* end if */ + + /* Add chunk to list for selection I/O */ + chunk_mem_spaces[num_chunks] = chunk_info->mspace; + chunk_file_spaces[num_chunks] = chunk_info->fspace; + chunk_addrs[num_chunks] = udata.chunk_block.offset; + num_chunks++; + } /* end else */ + + /* Advance to next chunk in list */ + chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node); + } /* end while */ + + /* Issue selection I/O call (we can skip the page buffer because we've + * already verified it won't be used, and the metadata accumulator + * because this is raw data) */ + if (H5F_shared_select_write(H5F_SHARED(io_info->dset->oloc.file), H5FD_MEM_DRAW, (uint32_t)num_chunks, + chunk_mem_spaces, chunk_file_spaces, chunk_addrs, element_sizes, + bufs) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunk selection read failed") + + /* Clean up memory */ + if (chunk_mem_spaces != chunk_mem_spaces_static) { + HDassert(chunk_mem_spaces); + HDassert(chunk_file_spaces != chunk_file_spaces_static); + HDassert(chunk_addrs != chunk_addrs_static); + H5MM_free(chunk_mem_spaces); + chunk_mem_spaces = NULL; + H5MM_free(chunk_file_spaces); + chunk_file_spaces = NULL; + H5MM_free(chunk_addrs); + chunk_addrs = NULL; } /* end if */ - else { - if (need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert) - if ((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, NULL) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index") - } /* end else */ + } /* end if */ + else { + /* Iterate through nodes in chunk skip list */ + chunk_node = H5D_CHUNK_GET_FIRST_NODE(fm); + while (chunk_node) { + H5D_chunk_info_t * chunk_info; /* Chunk information */ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ + H5D_io_info_t * chk_io_info; /* Pointer to I/O info object for this chunk */ + void * chunk; /* Pointer to locked chunk buffer */ + H5D_chunk_ud_t udata; /* Index pass-through */ + htri_t cacheable; /* Whether the chunk is cacheable */ + hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */ - /* Advance to next chunk in list */ - chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node); - } /* end while */ + /* Get the actual chunk information from the skip list node */ + chunk_info = H5D_CHUNK_GET_NODE_INFO(fm, chunk_node); + + /* Look up the chunk */ + if (H5D__chunk_lookup(io_info->dset, chunk_info->scaled, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address") + + /* Sanity check */ + HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) || + (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0)); + + /* Set chunk's [scaled] coordinates */ + io_info->store->chunk.scaled = chunk_info->scaled; + + /* Determine if we should use the chunk cache */ + if ((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, TRUE)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't tell if chunk is cacheable") + if (cacheable) { + /* Load the chunk into cache. But if the whole chunk is written, + * simply allocate space instead of load the chunk. */ + hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */ + + /* Compute # of bytes accessed in chunk */ + H5_CHECK_OVERFLOW(type_info->dst_type_size, /*From:*/ size_t, /*To:*/ uint32_t); + dst_accessed_bytes = chunk_info->chunk_points * (uint32_t)type_info->dst_type_size; + + /* Determine if we will access all the data in the chunk */ + if (dst_accessed_bytes != ctg_store.contig.dset_size || + (chunk_info->chunk_points * type_info->src_type_size) != ctg_store.contig.dset_size || + fm->fsel_type == H5S_SEL_POINTS) + entire_chunk = FALSE; + + /* Lock the chunk into the cache */ + if (NULL == (chunk = H5D__chunk_lock(io_info, &udata, entire_chunk, FALSE))) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk") + + /* Set up the storage buffer information for this chunk */ + cpt_store.compact.buf = chunk; + + /* Point I/O info at main I/O info for this chunk */ + chk_io_info = &cpt_io_info; + } /* end if */ + else { + /* If the chunk hasn't been allocated on disk, do so now. */ + if (!H5F_addr_defined(udata.chunk_block.offset)) { + /* Compose chunked index info struct */ + idx_info.f = io_info->dset->oloc.file; + idx_info.pline = &(io_info->dset->shared->dcpl_cache.pline); + idx_info.layout = &(io_info->dset->shared->layout.u.chunk); + idx_info.storage = &(io_info->dset->shared->layout.storage.u.chunk); + + /* Set up the size of chunk for user data */ + udata.chunk_block.length = io_info->dset->shared->layout.u.chunk.size; + + /* Allocate the chunk */ + if (H5D__chunk_file_alloc(&idx_info, NULL, &udata.chunk_block, &need_insert, + chunk_info->scaled) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, + "unable to insert/resize chunk on chunk level") + + /* Make sure the address of the chunk is returned. */ + if (!H5F_addr_defined(udata.chunk_block.offset)) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined") + + /* Cache the new chunk information */ + H5D__chunk_cinfo_cache_update(&io_info->dset->shared->cache.chunk.last, &udata); + } /* end if */ + + /* Set up the storage address information for this chunk */ + ctg_store.contig.dset_addr = udata.chunk_block.offset; + + /* No chunk cached */ + chunk = NULL; + + /* Point I/O info at temporary I/O info for this chunk */ + chk_io_info = &ctg_io_info; + } /* end else */ + + /* Perform the actual write operation */ + if ((io_info->io_ops.single_write)(chk_io_info, type_info, (hsize_t)chunk_info->chunk_points, + chunk_info->fspace, chunk_info->mspace) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "chunked write failed") + + /* Release the cache lock on the chunk, or insert chunk into index. */ + if (chunk) { + if (H5D__chunk_unlock(io_info, &udata, TRUE, chunk, dst_accessed_bytes) < 0) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk") + } /* end if */ + else { + if (need_insert && io_info->dset->shared->layout.storage.u.chunk.ops->insert) + if ((io_info->dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, NULL) < + 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, + "unable to insert chunk addr into index") + } /* end else */ + + /* Advance to next chunk in list */ + chunk_node = H5D_CHUNK_GET_NEXT_NODE(fm, chunk_node); + } /* end while */ + } /* end else */ done: + /* Cleanup on failure */ + if (ret_value < 0) { + if (chunk_mem_spaces != chunk_mem_spaces_static) + chunk_mem_spaces = H5MM_xfree(chunk_mem_spaces); + if (chunk_file_spaces != chunk_file_spaces_static) + chunk_file_spaces = H5MM_xfree(chunk_file_spaces); + if (chunk_addrs != chunk_addrs_static) + chunk_addrs = H5MM_xfree(chunk_addrs); + } /* end if */ + + /* Make sure we cleaned up */ + HDassert(!chunk_mem_spaces || chunk_mem_spaces == chunk_mem_spaces_static); + HDassert(!chunk_file_spaces || chunk_file_spaces == chunk_file_spaces_static); + HDassert(!chunk_addrs || chunk_addrs == chunk_addrs_static); + FUNC_LEAVE_NOAPI(ret_value) } /* H5D__chunk_write() */ diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c index 356a54ed18..1ac1267bc5 100644 --- a/src/H5Dcompact.c +++ b/src/H5Dcompact.c @@ -63,8 +63,8 @@ typedef struct H5D_compact_iovv_memmanage_ud_t { /* Layout operation callbacks */ static herr_t H5D__compact_construct(H5F_t *f, H5D_t *dset); static hbool_t H5D__compact_is_space_alloc(const H5O_storage_t *storage); -static herr_t H5D__compact_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *cm); +static herr_t H5D__compact_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, + H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *cm); static herr_t H5D__compact_iovv_memmanage_cb(hsize_t dst_off, hsize_t src_off, size_t len, void *_udata); static ssize_t H5D__compact_readvv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, @@ -247,7 +247,7 @@ H5D__compact_is_space_alloc(const H5O_storage_t H5_ATTR_UNUSED *storage) *------------------------------------------------------------------------- */ static herr_t -H5D__compact_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, +H5D__compact_io_init(H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, hsize_t H5_ATTR_UNUSED nelmts, H5S_t H5_ATTR_UNUSED *file_space, H5S_t H5_ATTR_UNUSED *mem_space, H5D_chunk_map_t H5_ATTR_UNUSED *cm) { diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c index 3828e8e474..840c7ec8d8 100644 --- a/src/H5Dcontig.c +++ b/src/H5Dcontig.c @@ -43,6 +43,7 @@ #include "H5FOprivate.h" /* File objects */ #include "H5Oprivate.h" /* Object headers */ #include "H5Pprivate.h" /* Property lists */ +#include "H5PBprivate.h" /* Page Buffer */ #include "H5VMprivate.h" /* Vector and array functions */ /****************/ @@ -90,8 +91,8 @@ typedef struct H5D_contig_writevv_ud_t { /* Layout operation callbacks */ static herr_t H5D__contig_construct(H5F_t *f, H5D_t *dset); static herr_t H5D__contig_init(H5F_t *f, const H5D_t *dset, hid_t dapl_id); -static herr_t H5D__contig_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, - hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *cm); +static herr_t H5D__contig_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, + H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *cm); static ssize_t H5D__contig_readvv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[]); @@ -102,6 +103,7 @@ static herr_t H5D__contig_flush(H5D_t *dset); /* Helper routines */ static herr_t H5D__contig_write_one(H5D_io_info_t *io_info, hsize_t offset, size_t size); +static htri_t H5D__contig_may_use_select_io(const H5D_io_info_t *io_info, H5D_io_op_type_t op_type); /*********************/ /* Package Variables */ @@ -566,18 +568,80 @@ H5D__contig_is_data_cached(const H5D_shared_t *shared_dset) *------------------------------------------------------------------------- */ static herr_t -H5D__contig_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, +H5D__contig_io_init(H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, hsize_t H5_ATTR_UNUSED nelmts, H5S_t H5_ATTR_UNUSED *file_space, H5S_t H5_ATTR_UNUSED *mem_space, H5D_chunk_map_t H5_ATTR_UNUSED *cm) { - FUNC_ENTER_STATIC_NOERR + htri_t use_selection_io = FALSE; /* Whether to use selection I/O */ + htri_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_STATIC io_info->store->contig.dset_addr = io_info->dset->shared->layout.storage.u.contig.addr; io_info->store->contig.dset_size = io_info->dset->shared->layout.storage.u.contig.size; - FUNC_LEAVE_NOAPI(SUCCEED) + /* Check if we're performing selection I/O */ + if ((use_selection_io = H5D__contig_may_use_select_io(io_info, H5D_IO_OP_READ)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if selection I/O is possible") + io_info->use_select_io = (hbool_t)use_selection_io; + +done: + FUNC_LEAVE_NOAPI(ret_value) } /* end H5D__contig_io_init() */ +/*------------------------------------------------------------------------- + * Function: H5D__contig_may_use_select_io + * + * Purpose: A small internal function to if it may be possible to use + * selection I/O. + * + * Return: TRUE/FALSE/FAIL + * + * Programmer: Neil Fortner + * 3 August 2021 + * + *------------------------------------------------------------------------- + */ +static htri_t +H5D__contig_may_use_select_io(const H5D_io_info_t *io_info, H5D_io_op_type_t op_type) +{ + const H5D_t *dataset = io_info->dset; /* Local pointer to dataset info */ + htri_t ret_value = FAIL; /* Return value */ + + FUNC_ENTER_STATIC + + /* Sanity check */ + HDassert(io_info); + HDassert(dataset); + HDassert(op_type == H5D_IO_OP_READ || op_type == H5D_IO_OP_WRITE); + + /* Don't use selection I/O if it's globally disabled, if there is a type + * conversion, or if it's not a contiguous dataset, or if the sieve buffer + * exists (write) or is dirty (read) */ + if (!H5_use_selection_io_g || io_info->io_ops.single_read != H5D__select_read || + io_info->layout_ops.readvv != H5D__contig_readvv || + (op_type == H5D_IO_OP_READ && io_info->dset->shared->cache.contig.sieve_dirty) || + (op_type == H5D_IO_OP_WRITE && io_info->dset->shared->cache.contig.sieve_buf)) + ret_value = FALSE; + else { + hbool_t page_buf_enabled; + + HDassert(io_info->io_ops.single_write == H5D__select_write); + HDassert(io_info->layout_ops.writevv == H5D__contig_writevv); + + /* Check if the page buffer is enabled */ + if (H5PB_enabled(io_info->f_sh, H5FD_MEM_DRAW, &page_buf_enabled) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't check if page buffer is enabled") + if (page_buf_enabled) + ret_value = FALSE; + else + ret_value = TRUE; + } /* end else */ + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5D__contig_may_use_select_io() */ + /*------------------------------------------------------------------------- * Function: H5D__contig_read * @@ -594,7 +658,7 @@ herr_t H5D__contig_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t H5_ATTR_UNUSED *fm) { - herr_t ret_value = SUCCEED; /*return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -605,8 +669,20 @@ H5D__contig_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize HDassert(mem_space); HDassert(file_space); - /* Read data */ - if ((io_info->io_ops.single_read)(io_info, type_info, nelmts, file_space, mem_space) < 0) + if (io_info->use_select_io) { + size_t dst_type_size = type_info->dst_type_size; + + /* Issue selection I/O call (we can skip the page buffer because we've + * already verified it won't be used, and the metadata accumulator + * because this is raw data) */ + if (H5F_shared_select_read(H5F_SHARED(io_info->dset->oloc.file), H5FD_MEM_DRAW, nelmts > 0 ? 1 : 0, + &mem_space, &file_space, &(io_info->store->contig.dset_addr), + &dst_type_size, &(io_info->u.rbuf)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "contiguous selection read failed") + } /* end if */ + else + /* Read data through legacy (non-selection I/O) pathway */ + if ((io_info->io_ops.single_read)(io_info, type_info, nelmts, file_space, mem_space) < 0) HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "contiguous read failed") done: @@ -629,7 +705,7 @@ herr_t H5D__contig_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t H5_ATTR_UNUSED *fm) { - herr_t ret_value = SUCCEED; /*return value */ + herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -640,8 +716,20 @@ H5D__contig_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsiz HDassert(mem_space); HDassert(file_space); - /* Write data */ - if ((io_info->io_ops.single_write)(io_info, type_info, nelmts, file_space, mem_space) < 0) + if (io_info->use_select_io) { + size_t dst_type_size = type_info->dst_type_size; + + /* Issue selection I/O call (we can skip the page buffer because we've + * already verified it won't be used, and the metadata accumulator + * because this is raw data) */ + if (H5F_shared_select_write(H5F_SHARED(io_info->dset->oloc.file), H5FD_MEM_DRAW, nelmts > 0 ? 1 : 0, + &mem_space, &file_space, &(io_info->store->contig.dset_addr), + &dst_type_size, &(io_info->u.wbuf)) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "contiguous selection write failed") + } /* end if */ + else + /* Write data through legacy (non-selection I/O) pathway */ + if ((io_info->io_ops.single_write)(io_info, type_info, nelmts, file_space, mem_space) < 0) HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "contiguous write failed") done: diff --git a/src/H5Defl.c b/src/H5Defl.c index a30955bef4..b22c6de314 100644 --- a/src/H5Defl.c +++ b/src/H5Defl.c @@ -60,9 +60,9 @@ typedef struct H5D_efl_writevv_ud_t { /********************/ /* Layout operation callbacks */ -static herr_t H5D__efl_construct(H5F_t *f, H5D_t *dset); -static herr_t H5D__efl_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, - H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *cm); +static herr_t H5D__efl_construct(H5F_t *f, H5D_t *dset); +static herr_t H5D__efl_io_init(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, + H5S_t *file_space, H5S_t *mem_space, H5D_chunk_map_t *cm); static ssize_t H5D__efl_readvv(const H5D_io_info_t *io_info, size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[]); @@ -209,7 +209,7 @@ H5D__efl_is_space_alloc(const H5O_storage_t H5_ATTR_UNUSED *storage) *------------------------------------------------------------------------- */ static herr_t -H5D__efl_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, +H5D__efl_io_init(H5D_io_info_t *io_info, const H5D_type_info_t H5_ATTR_UNUSED *type_info, hsize_t H5_ATTR_UNUSED nelmts, H5S_t H5_ATTR_UNUSED *file_space, H5S_t H5_ATTR_UNUSED *mem_space, H5D_chunk_map_t H5_ATTR_UNUSED *cm) { diff --git a/src/H5Dio.c b/src/H5Dio.c index e226a0a6e4..cb61b71c5e 100644 --- a/src/H5Dio.c +++ b/src/H5Dio.c @@ -576,6 +576,10 @@ H5D__ioinfo_init(H5D_t *dset, const H5D_type_info_t *type_info, H5D_storage_t *s io_info->io_ops.single_write = H5D__scatgath_write; } /* end else */ + /* Start with selection I/O off, layout callback will turn it on if + * appropriate */ + io_info->use_select_io = FALSE; + #ifdef H5_HAVE_PARALLEL /* Determine if the file was opened with an MPI VFD */ io_info->using_mpi_vfd = H5F_HAS_FEATURE(dset->oloc.file, H5FD_FEAT_HAS_MPI); @@ -814,12 +818,17 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, const H5S_t *file_ /* Check if we can use the optimized parallel I/O routines */ if (opt == TRUE) { - /* Override the I/O op pointers to the MPI-specific routines */ - io_info->io_ops.multi_read = dset->shared->layout.ops->par_read; - io_info->io_ops.multi_write = dset->shared->layout.ops->par_write; - io_info->io_ops.single_read = H5D__mpio_select_read; - io_info->io_ops.single_write = H5D__mpio_select_write; - } /* end if */ + /* Override the I/O op pointers to the MPI-specific routines, unless + * selection I/O is to be used - in this case the file driver will + * handle collective I/O */ + /* Check for selection/vector support in file driver? -NAF */ + if (!io_info->use_select_io) { + io_info->io_ops.multi_read = dset->shared->layout.ops->par_read; + io_info->io_ops.multi_write = dset->shared->layout.ops->par_write; + io_info->io_ops.single_read = H5D__mpio_select_read; + io_info->io_ops.single_write = H5D__mpio_select_write; + } /* end if */ + } /* end if */ else { /* Check if there are any filters in the pipeline. If there are, * we cannot break to independent I/O if this is a write operation diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h index a424929fb3..0e0eb08ae0 100644 --- a/src/H5Dpkg.h +++ b/src/H5Dpkg.h @@ -121,9 +121,9 @@ typedef herr_t (*H5D_layout_construct_func_t)(H5F_t *f, H5D_t *dset); typedef herr_t (*H5D_layout_init_func_t)(H5F_t *f, const H5D_t *dset, hid_t dapl_id); typedef hbool_t (*H5D_layout_is_space_alloc_func_t)(const H5O_storage_t *storage); typedef hbool_t (*H5D_layout_is_data_cached_func_t)(const H5D_shared_t *shared_dset); -typedef herr_t (*H5D_layout_io_init_func_t)(const struct H5D_io_info_t *io_info, - const H5D_type_info_t *type_info, hsize_t nelmts, - H5S_t *file_space, H5S_t *mem_space, struct H5D_chunk_map_t *cm); +typedef herr_t (*H5D_layout_io_init_func_t)(struct H5D_io_info_t *io_info, const H5D_type_info_t *type_info, + hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space, + struct H5D_chunk_map_t *cm); typedef herr_t (*H5D_layout_read_func_t)(struct H5D_io_info_t *io_info, const H5D_type_info_t *type_info, hsize_t nelmts, H5S_t *file_space, H5S_t *mem_space, struct H5D_chunk_map_t *fm); @@ -222,6 +222,7 @@ typedef struct H5D_io_info_t { H5D_layout_ops_t layout_ops; /* Dataset layout I/O operation function pointers */ H5D_io_ops_t io_ops; /* I/O operation function pointers */ H5D_io_op_type_t op_type; + hbool_t use_select_io; /* Whether to use selection I/O */ union { void * rbuf; /* Pointer to buffer for read */ const void *wbuf; /* Pointer to buffer to write */ diff --git a/src/H5FD.c b/src/H5FD.c index 20f69cb804..1887e181b6 100644 --- a/src/H5FD.c +++ b/src/H5FD.c @@ -214,6 +214,8 @@ H5FDregister(const H5FD_class_t *cls) /* Check arguments */ if (!cls) HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, H5I_INVALID_HID, "null class pointer is disallowed") + if (cls->version != H5FD_CLASS_VERSION) + HGOTO_ERROR(H5E_ARGS, H5E_VERSION, H5I_INVALID_HID, "wrong file driver version #") if (!cls->open || !cls->close) HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, H5I_INVALID_HID, "'open' and/or 'close' methods are not defined") @@ -1479,6 +1481,370 @@ done: FUNC_LEAVE_API(ret_value) } /* end H5FDwrite() */ +/*------------------------------------------------------------------------- + * Function: H5FDread_vector + * + * Purpose: Perform count reads from the specified file at the offsets + * provided in the addrs array, with the lengths and memory + * types provided in the sizes and types arrays. Data read + * is returned in the buffers provided in the bufs array. + * + * All reads are done according to the data transfer property + * list dxpl_id (which may be the constant H5P_DEFAULT). + * + * Return: Success: SUCCEED + * All reads have completed successfully, and + * the results havce been into the supplied + * buffers. + * + * Failure: FAIL + * The contents of supplied buffers are undefined. + * + * Programmer: JRM -- 6/10/20 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +herr_t +H5FDread_vector(H5FD_t *file, hid_t dxpl_id, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], + size_t sizes[], void *bufs[] /* out */) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE7("e", "*#iIu*Mt*a*zx", file, dxpl_id, count, types, addrs, sizes, bufs); + + /* Check arguments */ + if (!file) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file pointer cannot be NULL") + + if (!file->cls) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file class pointer cannot be NULL") + + if ((!types) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "types parameter can't be NULL if count is positive") + + if ((!addrs) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "addrs parameter can't be NULL if count is positive") + + if ((!sizes) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sizes parameter can't be NULL if count is positive") + + if ((!bufs) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "bufs parameter can't be NULL if count is positive") + + if ((count > 0) && (sizes[0] == 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sizes[0] can't be 0") + + if ((count > 0) && (types[0] == H5FD_MEM_NOLIST)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "count[0] can't be H5FD_MEM_NOLIST") + + /* Get the default dataset transfer property list if the user + * didn't provide one + */ + if (H5P_DEFAULT == dxpl_id) { + dxpl_id = H5P_DATASET_XFER_DEFAULT; + } + else { + if (TRUE != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list") + } + + /* Set DXPL for operation */ + H5CX_set_dxpl(dxpl_id); + + /* Call private function */ + /* (Note compensating for base addresses addition in internal routine) */ + if (H5FD_read_vector(file, count, types, addrs, sizes, bufs) < 0) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "file vector read request failed") + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5FDread_vector() */ + +/*------------------------------------------------------------------------- + * Function: H5FDwrite_vector + * + * Purpose: Perform count writes to the specified file at the offsets + * provided in the addrs array, with the lengths and memory + * types provided in the sizes and types arrays. Data to be + * written is in the buffers provided in the bufs array. + * + * All writes are done according to the data transfer property + * list dxpl_id (which may be the constant H5P_DEFAULT). + * + * Return: Success: SUCCEED + * All writes have completed successfully + * + * Failure: FAIL + * One or more of the writes failed. + * + * Programmer: JRM -- 6/10/20 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +herr_t +H5FDwrite_vector(H5FD_t *file, hid_t dxpl_id, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], + size_t sizes[], const void *bufs[] /* in */) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE7("e", "*#iIu*Mt*a*z**x", file, dxpl_id, count, types, addrs, sizes, bufs); + + /* Check arguments */ + if (!file) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file pointer cannot be NULL") + + if (!file->cls) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file class pointer cannot be NULL") + + if ((!types) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "types parameter can't be NULL if count is positive") + + if ((!addrs) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "addrs parameter can't be NULL if count is positive") + + if ((!sizes) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sizes parameter can't be NULL if count is positive") + + if ((!bufs) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "bufs parameter can't be NULL if count is positive") + + if ((count > 0) && (sizes[0] == 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sizes[0] can't be 0") + + if ((count > 0) && (types[0] == H5FD_MEM_NOLIST)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "count[0] can't be H5FD_MEM_NOLIST") + + /* Get the default dataset transfer property list if the user didn't provide one */ + if (H5P_DEFAULT == dxpl_id) { + dxpl_id = H5P_DATASET_XFER_DEFAULT; + } + else { + if (TRUE != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list") + } + + /* Set DXPL for operation */ + H5CX_set_dxpl(dxpl_id); + + /* Call private function */ + /* (Note compensating for base address addition in internal routine) */ + if (H5FD_write_vector(file, count, types, addrs, sizes, bufs) < 0) + HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "file vector write request failed") + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5FDwrite_vector() */ + +/*------------------------------------------------------------------------- + * Function: H5FDread_selection + * + * Purpose: Perform count reads from the specified file at the + * locations selected in the dataspaces in the file_spaces + * array, with each of those dataspaces starting at the file + * address specified by the corresponding element of the + * offsets array, and with the size of each element in the + * dataspace specified by the corresponding element of the + * element_sizes array. The memory type provided by type is + * the same for all selections. Data read is returned in + * the locations selected in the dataspaces in the + * mem_spaces array, within the buffers provided in the + * corresponding elements of the bufs array. + * + * If i > 0 and element_sizes[i] == 0, presume + * element_sizes[n] = element_sizes[i-1] for all n >= i and + * < count. + * + * If the underlying VFD supports selection reads, pass the + * call through directly. + * + * If it doesn't, convert the selection read into a sequence + * of individual reads. + * + * All reads are done according to the data transfer property + * list dxpl_id (which may be the constant H5P_DEFAULT). + * + * Return: Success: SUCCEED + * All reads have completed successfully, and + * the results havce been into the supplied + * buffers. + * + * Failure: FAIL + * The contents of supplied buffers are undefined. + * + * Programmer: NAF -- 5/19/21 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +herr_t +H5FDread_selection(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count, hid_t mem_space_ids[], + hid_t file_space_ids[], haddr_t offsets[], size_t element_sizes[], void *bufs[] /* out */) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE9("e", "*#MtiIu*i*i*a*zx", file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, + element_sizes, bufs); + + /* Check arguments */ + if (!file) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file pointer cannot be NULL") + + if (!file->cls) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file class pointer cannot be NULL") + + if ((!mem_space_ids) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "mem_spaces parameter can't be NULL if count is positive") + + if ((!file_space_ids) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file_spaces parameter can't be NULL if count is positive") + + if ((!offsets) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "offsets parameter can't be NULL if count is positive") + + if ((!element_sizes) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "element_sizes parameter can't be NULL if count is positive") + + if ((!bufs) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "bufs parameter can't be NULL if count is positive") + + if ((count > 0) && (element_sizes[0] == 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sizes[0] can't be 0") + + if ((count > 0) && (bufs[0] == NULL)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "bufs[0] can't be NULL") + + /* Get the default dataset transfer property list if the user didn't provide one */ + if (H5P_DEFAULT == dxpl_id) { + dxpl_id = H5P_DATASET_XFER_DEFAULT; + } + else { + if (TRUE != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list") + } + + /* Set DXPL for operation */ + H5CX_set_dxpl(dxpl_id); + + /* Call private function */ + /* (Note compensating for base address addition in internal routine) */ + if (H5FD_read_selection_id(file, type, count, mem_space_ids, file_space_ids, offsets, element_sizes, + bufs) < 0) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "file selection read request failed") + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5FDread_selection() */ + +/*------------------------------------------------------------------------- + * Function: H5FDwrite_selection + * + * Purpose: Perform count writes to the specified file at the + * locations selected in the dataspaces in the file_spaces + * array, with each of those dataspaces starting at the file + * address specified by the corresponding element of the + * offsets array, and with the size of each element in the + * dataspace specified by the corresponding element of the + * element_sizes array. The memory type provided by type is + * the same for all selections. Data write is from + * the locations selected in the dataspaces in the + * mem_spaces array, within the buffers provided in the + * corresponding elements of the bufs array. + * + * If i > 0 and element_sizes[i] == 0, presume + * element_sizes[n] = element_sizes[i-1] for all n >= i and + * < count. + * + * If the underlying VFD supports selection writes, pass the + * call through directly. + * + * If it doesn't, convert the selection write into a sequence + * of individual writes. + * + * All writes are done according to the data transfer property + * list dxpl_id (which may be the constant H5P_DEFAULT). + * + * Return: Success: SUCCEED + * All writes have completed successfully + * + * Failure: FAIL + * One or more of the writes failed. + * + * Programmer: NAF -- 5/14/21 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +herr_t +H5FDwrite_selection(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count, hid_t mem_space_ids[], + hid_t file_space_ids[], haddr_t offsets[], size_t element_sizes[], const void *bufs[]) +{ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_API(FAIL) + H5TRACE9("e", "*#MtiIu*i*i*a*z**x", file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, + element_sizes, bufs); + + /* Check arguments */ + if (!file) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file pointer cannot be NULL") + + if (!file->cls) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file class pointer cannot be NULL") + + if ((!mem_space_ids) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "mem_spaces parameter can't be NULL if count is positive") + + if ((!file_space_ids) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "file_spaces parameter can't be NULL if count is positive") + + if ((!offsets) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "offsets parameter can't be NULL if count is positive") + + if ((!element_sizes) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, + "element_sizes parameter can't be NULL if count is positive") + + if ((!bufs) && (count > 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "bufs parameter can't be NULL if count is positive") + + if ((count > 0) && (element_sizes[0] == 0)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sizes[0] can't be 0") + + if ((count > 0) && (bufs[0] == NULL)) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "bufs[0] can't be NULL") + + /* Get the default dataset transfer property list if the user didn't provide one */ + if (H5P_DEFAULT == dxpl_id) { + dxpl_id = H5P_DATASET_XFER_DEFAULT; + } + else { + if (TRUE != H5P_isa_class(dxpl_id, H5P_DATASET_XFER)) + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list") + } + + /* Set DXPL for operation */ + H5CX_set_dxpl(dxpl_id); + + /* Call private function */ + /* (Note compensating for base address addition in internal routine) */ + if (H5FD_write_selection_id(file, type, count, mem_space_ids, file_space_ids, offsets, element_sizes, + bufs) < 0) + HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "file selection write request failed") + +done: + FUNC_LEAVE_API(ret_value) +} /* end H5FDwrite_selection() */ + /*------------------------------------------------------------------------- * Function: H5FDflush * @@ -1827,7 +2193,7 @@ H5FD_ctl(H5FD_t *file, uint64_t op_code, uint64_t flags, const void *input, void else if (flags & H5FD_CTL__FAIL_IF_UNKNOWN_FLAG) { HGOTO_ERROR(H5E_VFL, H5E_FCNTL, FAIL, - "VFD ctl request failed (no ctl callback and fail if unknown flag is set)") + "VFD ctl request failed (no ctl and fail if unknown flag is set)") } done: diff --git a/src/H5FDcore.c b/src/H5FDcore.c index a1750ee2c7..0604316c7b 100644 --- a/src/H5FDcore.c +++ b/src/H5FDcore.c @@ -152,6 +152,7 @@ static herr_t H5FD__core_delete(const char *filename, hid_t fapl_id); static inline const H5FD_core_fapl_t *H5FD__core_get_default_config(void); static const H5FD_class_t H5FD_core_g = { + H5FD_CLASS_VERSION, /* struct version */ H5FD_CORE_VALUE, /* value */ "core", /* name */ MAXADDR, /* maxaddr */ @@ -180,6 +181,10 @@ static const H5FD_class_t H5FD_core_g = { H5FD__core_get_handle, /* get_handle */ H5FD__core_read, /* read */ H5FD__core_write, /* write */ + NULL, /* read_vector */ + NULL, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ H5FD__core_flush, /* flush */ H5FD__core_truncate, /* truncate */ H5FD__core_lock, /* lock */ diff --git a/src/H5FDdevelop.h b/src/H5FDdevelop.h index be3b54572d..f5b32ed354 100644 --- a/src/H5FDdevelop.h +++ b/src/H5FDdevelop.h @@ -25,6 +25,9 @@ /* Public Macros */ /*****************/ +/* H5FD_class_t struct version */ +#define H5FD_CLASS_VERSION 0x01 /* File driver struct version */ + /* Map "fractal heap" header blocks to 'ohdr' type file memory, since its * a fair amount of work to add a new kind of file memory and they are similar * enough to object headers and probably too minor to deserve their own type. @@ -160,6 +163,7 @@ typedef struct H5FD_t H5FD_t; /* Class information for each file driver */ typedef struct H5FD_class_t { + unsigned version; /**< File driver class struct version # */ H5FD_class_value_t value; const char * name; haddr_t maxaddr; @@ -188,6 +192,16 @@ typedef struct H5FD_class_t { herr_t (*get_handle)(H5FD_t *file, hid_t fapl, void **file_handle); herr_t (*read)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, size_t size, void *buffer); herr_t (*write)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl, haddr_t addr, size_t size, const void *buffer); + herr_t (*read_vector)(H5FD_t *file, hid_t dxpl, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], + size_t sizes[], void *bufs[]); + herr_t (*write_vector)(H5FD_t *file, hid_t dxpl, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], + size_t sizes[], const void *bufs[]); + herr_t (*read_selection)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, size_t count, hid_t mem_spaces[], + hid_t file_spaces[], haddr_t offsets[], size_t element_sizes[], + void *bufs[] /*out*/); + herr_t (*write_selection)(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, size_t count, hid_t mem_spaces[], + hid_t file_spaces[], haddr_t offsets[], size_t element_sizes[], + const void *bufs[] /*in*/); herr_t (*flush)(H5FD_t *file, hid_t dxpl_id, hbool_t closing); herr_t (*truncate)(H5FD_t *file, hid_t dxpl_id, hbool_t closing); herr_t (*lock)(H5FD_t *file, hbool_t rw); @@ -257,6 +271,16 @@ H5_DLL herr_t H5FDread(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t ad void *buf /*out*/); H5_DLL herr_t H5FDwrite(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size, const void *buf); +H5_DLL herr_t H5FDread_vector(H5FD_t *file, hid_t dxpl_id, uint32_t count, H5FD_mem_t types[], + haddr_t addrs[], size_t sizes[], void *bufs[] /* out */); +H5_DLL herr_t H5FDwrite_vector(H5FD_t *file, hid_t dxpl_id, uint32_t count, H5FD_mem_t types[], + haddr_t addrs[], size_t sizes[], const void *bufs[] /* in */); +H5_DLL herr_t H5FDread_selection(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count, + hid_t mem_spaces[], hid_t file_spaces[], haddr_t offsets[], + size_t element_sizes[], void *bufs[] /* out */); +H5_DLL herr_t H5FDwrite_selection(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count, + hid_t mem_spaces[], hid_t file_spaces[], haddr_t offsets[], + size_t element_sizes[], const void *bufs[]); H5_DLL herr_t H5FDflush(H5FD_t *file, hid_t dxpl_id, hbool_t closing); H5_DLL herr_t H5FDtruncate(H5FD_t *file, hid_t dxpl_id, hbool_t closing); H5_DLL herr_t H5FDlock(H5FD_t *file, hbool_t rw); diff --git a/src/H5FDdirect.c b/src/H5FDdirect.c index 7a431170f3..25ee970cf9 100644 --- a/src/H5FDdirect.c +++ b/src/H5FDdirect.c @@ -142,6 +142,7 @@ static herr_t H5FD__direct_unlock(H5FD_t *_file); static herr_t H5FD__direct_delete(const char *filename, hid_t fapl_id); static const H5FD_class_t H5FD_direct_g = { + H5FD_CLASS_VERSION, /* struct version */ H5FD_DIRECT_VALUE, /* value */ "direct", /* name */ MAXADDR, /* maxaddr */ @@ -170,6 +171,10 @@ static const H5FD_class_t H5FD_direct_g = { H5FD__direct_get_handle, /* get_handle */ H5FD__direct_read, /* read */ H5FD__direct_write, /* write */ + NULL, /* read_vector */ + NULL, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ NULL, /* flush */ H5FD__direct_truncate, /* truncate */ H5FD__direct_lock, /* lock */ diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c index 3f0080e927..66a1a68af1 100644 --- a/src/H5FDfamily.c +++ b/src/H5FDfamily.c @@ -112,6 +112,7 @@ static herr_t H5FD__family_delete(const char *filename, hid_t fapl_id); /* The class struct */ static const H5FD_class_t H5FD_family_g = { + H5FD_CLASS_VERSION, /* struct version */ H5FD_FAMILY_VALUE, /* value */ "family", /* name */ HADDR_MAX, /* maxaddr */ @@ -140,6 +141,10 @@ static const H5FD_class_t H5FD_family_g = { H5FD__family_get_handle, /* get_handle */ H5FD__family_read, /* read */ H5FD__family_write, /* write */ + NULL, /* read_vector */ + NULL, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ H5FD__family_flush, /* flush */ H5FD__family_truncate, /* truncate */ H5FD__family_lock, /* lock */ diff --git a/src/H5FDhdfs.c b/src/H5FDhdfs.c index 4927a402ae..f0ffb62117 100644 --- a/src/H5FDhdfs.c +++ b/src/H5FDhdfs.c @@ -278,6 +278,7 @@ static herr_t H5FD__hdfs_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing static herr_t H5FD__hdfs_validate_config(const H5FD_hdfs_fapl_t *fa); static const H5FD_class_t H5FD_hdfs_g = { + H5FD_CLASS_VERSION, /* struct version */ H5FD_HDFS_VALUE, /* value */ "hdfs", /* name */ MAXADDR, /* maxaddr */ @@ -306,6 +307,10 @@ static const H5FD_class_t H5FD_hdfs_g = { H5FD__hdfs_get_handle, /* get_handle */ H5FD__hdfs_read, /* read */ H5FD__hdfs_write, /* write */ + NULL, /* read_vector */ + NULL, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ NULL, /* flush */ H5FD__hdfs_truncate, /* truncate */ NULL, /* lock */ diff --git a/src/H5FDint.c b/src/H5FDint.c index d7fe33c443..0c3fe9ea3d 100644 --- a/src/H5FDint.c +++ b/src/H5FDint.c @@ -34,6 +34,7 @@ #include "H5Eprivate.h" /* Error handling */ #include "H5Fprivate.h" /* File access */ #include "H5FDpkg.h" /* File Drivers */ +#include "H5FLprivate.h" /* Free Lists */ #include "H5Iprivate.h" /* IDs */ #include "H5PLprivate.h" /* Plugins */ @@ -41,10 +42,51 @@ /* Local Macros */ /****************/ +/* Length of sequence lists requested from dataspace selections */ +#define H5FD_SEQ_LIST_LEN 128 + +/* Length of stack allocated arrays for building vector I/O operations. + * Corresponds to the number of contiguous blocks in a selection I/O operation. + * If more space is needed dynamic allocation will be used instead. */ +#define H5FD_LOCAL_VECTOR_LEN 8 + +/* Length of stack allocated arrays for dataspace IDs/structs for selection I/O + * operations. Corresponds to the number of file selection/memory selection + * pairs (along with addresses, etc.) in a selection I/O operation. If more + * space is needed dynamic allocation will be used instead */ +#define H5FD_LOCAL_SEL_ARR_LEN 8 + /******************/ /* Local Typedefs */ /******************/ +/************************************************************************* + * + * H5FD_vsrt_tmp_t + * + * Structure used to store vector I/O request addresses and the associated + * indexes in the addrs[] array for the purpose of determine the sorted + * order. + * + * This is done by allocating an array of H5FD_vsrt_tmp_t of length + * count, loading it with the contents of the addrs[] array and the + * associated indices, and then sorting it. + * + * This sorted array of H5FD_vsrt_tmp_t is then used to populate sorted + * versions of the types[], addrs[], sizes[] and bufs[] vectors. + * + * addr: haddr_t containing the value of addrs[i], + * + * index: integer containing the value of i used to obtain the + * value of the addr field from the addrs[] vector. + * + *************************************************************************/ + +typedef struct H5FD_vsrt_tmp_t { + haddr_t addr; + int index; +} H5FD_vsrt_tmp_t; + /* Information needed for iterating over the registered VFD hid_t IDs. * The name or value of the new VFD that is being registered is stored * in the name (or value) field and the found_id field is initialized to @@ -66,7 +108,13 @@ typedef struct H5FD_get_driver_ud_t { /********************/ /* Local Prototypes */ /********************/ -static int H5FD__get_driver_cb(void *obj, hid_t id, void *_op_data); +static int H5FD__get_driver_cb(void *obj, hid_t id, void *_op_data); +static herr_t H5FD__read_selection_translate(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count, + H5S_t **mem_spaces, H5S_t **file_spaces, haddr_t offsets[], + size_t element_sizes[], void *bufs[] /* out */); +static herr_t H5FD__write_selection_translate(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count, + H5S_t **mem_spaces, H5S_t **file_spaces, haddr_t offsets[], + size_t element_sizes[], const void *bufs[]); /*********************/ /* Package Variables */ @@ -80,6 +128,9 @@ static int H5FD__get_driver_cb(void *obj, hid_t id, void *_op_data); /* Local Variables */ /*******************/ +/* Declare extern free list to manage the H5S_sel_iter_t struct */ +H5FL_EXTERN(H5S_sel_iter_t); + /*------------------------------------------------------------------------- * Function: H5FD_locate_signature * @@ -259,6 +310,1689 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5FD_write() */ +/*------------------------------------------------------------------------- + * Function: H5FD_read_vector + * + * Purpose: Private version of H5FDread_vector() + * + * Perform count reads from the specified file at the offsets + * provided in the addrs array, with the lengths and memory + * types provided in the sizes and types arrays. Data read + * is returned in the buffers provided in the bufs array. + * + * If i > 0 and sizes[i] == 0, presume sizes[n] = sizes[i-1] + * for all n >= i and < count. + * + * Similarly, if i > 0 and types[i] == H5FD_MEM_NOLIST, + * presume types[n] = types[i-1] for all n >= i and < count. + * + * If the underlying VFD supports vector reads, pass the + * call through directly. + * + * If it doesn't, convert the vector read into a sequence + * of individual reads. + * + * Note that it is not in general possible to convert a + * vector read into a selection read, because each element + * in the vector read may have a different memory type. + * In contrast, selection reads are of a single type. + * + * Return: Success: SUCCEED + * All reads have completed successfully, and + * the results havce been into the supplied + * buffers. + * + * Failure: FAIL + * The contents of supplied buffers are undefined. + * + * Programmer: JRM -- 6/10/20 + * + * Changes: None + * + *------------------------------------------------------------------------- + */ +herr_t +H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], + void *bufs[] /* out */) +{ + hbool_t addrs_cooked = FALSE; + hbool_t extend_sizes = FALSE; + hbool_t extend_types = FALSE; + uint32_t i; + size_t size; + H5FD_mem_t type; + hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(file); + HDassert(file->cls); + HDassert((types) || (count == 0)); + HDassert((addrs) || (count == 0)); + HDassert((sizes) || (count == 0)); + HDassert((bufs) || (count == 0)); + + /* verify that the first elements of the sizes and types arrays are + * valid. + */ + HDassert((count == 0) || (sizes[0] != 0)); + HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST)); + + /* Get proper DXPL for I/O */ + dxpl_id = H5CX_get_dxpl(); + +#ifndef H5_HAVE_PARALLEL + /* The no-op case + * + * Do not return early for Parallel mode since the I/O could be a + * collective transfer. + */ + if (0 == count) { + HGOTO_DONE(SUCCEED) + } +#endif /* H5_HAVE_PARALLEL */ + + if (file->base_addr > 0) { + + /* apply the base_addr offset to the addrs array. Must undo before + * we return. + */ + for (i = 0; i < count; i++) { + + addrs[i] += file->base_addr; + } + addrs_cooked = TRUE; + } + + /* If the file is open for SWMR read access, allow access to data past + * the end of the allocated space (the 'eoa'). This is done because the + * eoa stored in the file's superblock might be out of sync with the + * objects being written within the file by the application performing + * SWMR write operations. + */ + if ((!(file->access_flags & H5F_ACC_SWMR_READ)) && (count > 0)) { + haddr_t eoa; + + extend_sizes = FALSE; + extend_types = FALSE; + + for (i = 0; i < count; i++) { + + if (!extend_sizes) { + + if (sizes[i] == 0) { + + extend_sizes = TRUE; + size = sizes[i - 1]; + } + else { + + size = sizes[i]; + } + } + + if (!extend_types) { + + if (types[i] == H5FD_MEM_NOLIST) { + + extend_types = TRUE; + type = types[i - 1]; + } + else { + + type = types[i]; + } + } + + if (HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type))) + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed") + + if ((addrs[i] + size) > eoa) + + HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, + "addr overflow, addrs[%d] = %llu, sizes[%d] = %llu, eoa = %llu", (int)i, + (unsigned long long)(addrs[i]), (int)i, (unsigned long long)size, + (unsigned long long)eoa) + } + } + + /* if the underlying VFD supports vector read, make the call */ + if (file->cls->read_vector) { + + if ((file->cls->read_vector)(file, dxpl_id, count, types, addrs, sizes, bufs) < 0) + + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read vector request failed") + } + else { + + /* otherwise, implement the vector read as a sequence of regular + * read calls. + */ + extend_sizes = FALSE; + extend_types = FALSE; + + for (i = 0; i < count; i++) { + + /* we have already verified that sizes[0] != 0 and + * types[0] != H5FD_MEM_NOLIST + */ + + if (!extend_sizes) { + + if (sizes[i] == 0) { + + extend_sizes = TRUE; + size = sizes[i - 1]; + } + else { + + size = sizes[i]; + } + } + + if (!extend_types) { + + if (types[i] == H5FD_MEM_NOLIST) { + + extend_types = TRUE; + type = types[i - 1]; + } + else { + + type = types[i]; + } + } + + if ((file->cls->read)(file, type, dxpl_id, addrs[i], size, bufs[i]) < 0) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read request failed") + } + } + +done: + /* undo the base addr offset to the addrs array if necessary */ + if (addrs_cooked) { + + HDassert(file->base_addr > 0); + + for (i = 0; i < count; i++) { + + addrs[i] -= file->base_addr; + } + } + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_read_vector() */ + +/*------------------------------------------------------------------------- + * Function: H5FD_write_vector + * + * Purpose: Private version of H5FDwrite_vector() + * + * Perform count writes to the specified file at the offsets + * provided in the addrs array, with the lengths and memory + * types provided in the sizes and types arrays. Data written + * is taken from the buffers provided in the bufs array. + * + * If i > 0 and sizes[i] == 0, presume sizes[n] = sizes[i-1] + * for all n >= i and < count. + * + * Similarly, if i > 0 and types[i] == H5FD_MEM_NOLIST, + * presume types[n] = types[i-1] for all n >= i and < count. + * + * If the underlying VFD supports vector writes, pass the + * call through directly. + * + * If it doesn't, convert the vector write into a sequence + * of individual writes. + * + * Note that it is not in general possible to convert a + * vector write into a selection write, because each element + * in the vector write may have a different memory type. + * In contrast, selection writes are of a single type. + * + * Return: Success: SUCCEED + * All writes have completed successfully. + * + * Failure: FAIL + * One or more writes failed. + * + * Programmer: JRM -- 6/10/20 + * + * Changes: None + * + *------------------------------------------------------------------------- + */ +herr_t +H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], + const void *bufs[]) +{ + hbool_t addrs_cooked = FALSE; + hbool_t extend_sizes = FALSE; + hbool_t extend_types = FALSE; + uint32_t i; + size_t size; + H5FD_mem_t type; + hid_t dxpl_id; /* DXPL for operation */ + haddr_t eoa = HADDR_UNDEF; /* EOA for file */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(file); + HDassert(file->cls); + HDassert((types) || (count == 0)); + HDassert((addrs) || (count == 0)); + HDassert((sizes) || (count == 0)); + HDassert((bufs) || (count == 0)); + + /* verify that the first elements of the sizes and types arrays are + * valid. + */ + HDassert((count == 0) || (sizes[0] != 0)); + HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST)); + + /* Get proper DXPL for I/O */ + dxpl_id = H5CX_get_dxpl(); + +#ifndef H5_HAVE_PARALLEL + /* The no-op case + * + * Do not return early for Parallel mode since the I/O could be a + * collective transfer. + */ + if (0 == count) + HGOTO_DONE(SUCCEED) +#endif /* H5_HAVE_PARALLEL */ + + if (file->base_addr > 0) { + + /* apply the base_addr offset to the addrs array. Must undo before + * we return. + */ + for (i = 0; i < count; i++) { + + addrs[i] += file->base_addr; + } + addrs_cooked = TRUE; + } + + extend_sizes = FALSE; + extend_types = FALSE; + + for (i = 0; i < count; i++) { + + if (!extend_sizes) { + + if (sizes[i] == 0) { + + extend_sizes = TRUE; + size = sizes[i - 1]; + } + else { + + size = sizes[i]; + } + } + + if (!extend_types) { + + if (types[i] == H5FD_MEM_NOLIST) { + + extend_types = TRUE; + type = types[i - 1]; + } + else { + + type = types[i]; + } + } + + if (HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type))) + + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed") + + if ((addrs[i] + size) > eoa) + + HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, addrs[%d] = %llu, sizes[%d] = %llu, \ + eoa = %llu", + (int)i, (unsigned long long)(addrs[i]), (int)i, (unsigned long long)size, + (unsigned long long)eoa) + } + + /* if the underlying VFD supports vector write, make the call */ + if (file->cls->write_vector) { + + if ((file->cls->write_vector)(file, dxpl_id, count, types, addrs, sizes, bufs) < 0) + + HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write vector request failed") + } + else { + /* otherwise, implement the vector write as a sequence of regular + * write calls. + */ + extend_sizes = FALSE; + extend_types = FALSE; + + for (i = 0; i < count; i++) { + + /* we have already verified that sizes[0] != 0 and + * types[0] != H5FD_MEM_NOLIST + */ + + if (!extend_sizes) { + + if (sizes[i] == 0) { + + extend_sizes = TRUE; + size = sizes[i - 1]; + } + else { + + size = sizes[i]; + } + } + + if (!extend_types) { + + if (types[i] == H5FD_MEM_NOLIST) { + + extend_types = TRUE; + type = types[i - 1]; + } + else { + + type = types[i]; + } + } + + if ((file->cls->write)(file, type, dxpl_id, addrs[i], size, bufs[i]) < 0) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver write request failed") + } + } + +done: + /* undo the base addr offset to the addrs array if necessary */ + if (addrs_cooked) { + + HDassert(file->base_addr > 0); + + for (i = 0; i < count; i++) { + + addrs[i] -= file->base_addr; + } + } + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_write_vector() */ + +/*------------------------------------------------------------------------- + * Function: H5FD__read_selection_translate + * + * Purpose: Translates a selection read call to a vector read call if + * vector reads are supported, or a series of scalar read + * calls otherwise. + * + * Return: Success: SUCCEED + * All reads have completed successfully, and + * the results havce been into the supplied + * buffers. + * + * Failure: FAIL + * The contents of supplied buffers are undefined. + * + * Programmer: NAF -- 5/13/21 + * + * Changes: None + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD__read_selection_translate(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count, + H5S_t **mem_spaces, H5S_t **file_spaces, haddr_t offsets[], + size_t element_sizes[], void *bufs[] /* out */) +{ + hbool_t extend_sizes = FALSE; + hbool_t extend_bufs = FALSE; + uint32_t i; + size_t element_size; + void * buf; + hbool_t use_vector = FALSE; + haddr_t addrs_local[H5FD_LOCAL_VECTOR_LEN]; + haddr_t * addrs = addrs_local; + size_t sizes_local[H5FD_LOCAL_VECTOR_LEN]; + size_t * sizes = sizes_local; + void * vec_bufs_local[H5FD_LOCAL_VECTOR_LEN]; + void ** vec_bufs = vec_bufs_local; + hsize_t file_off[H5FD_SEQ_LIST_LEN]; + size_t file_len[H5FD_SEQ_LIST_LEN]; + hsize_t mem_off[H5FD_SEQ_LIST_LEN]; + size_t mem_len[H5FD_SEQ_LIST_LEN]; + size_t file_seq_i; + size_t mem_seq_i; + size_t file_nseq; + size_t mem_nseq; + size_t io_len; + size_t nelmts; + hssize_t hss_nelmts; + size_t seq_nelem; + H5S_sel_iter_t *file_iter = NULL; + H5S_sel_iter_t *mem_iter = NULL; + hbool_t file_iter_init = FALSE; + hbool_t mem_iter_init = FALSE; + H5FD_mem_t types[2] = {type, H5FD_MEM_NOLIST}; + size_t vec_arr_nalloc = H5FD_LOCAL_VECTOR_LEN; + size_t vec_arr_nused = 0; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(file); + HDassert(file->cls); + HDassert(mem_spaces); + HDassert(file_spaces); + HDassert(offsets); + HDassert(element_sizes); + HDassert(bufs); + + /* Verify that the first elements of the element_sizes and bufs arrays are + * valid. */ + HDassert(element_sizes[0] != 0); + HDassert(bufs[0] != NULL); + + /* Check if we're using vector I/O */ + use_vector = file->cls->read_vector != NULL; + + /* Allocate sequence lists for memory and file spaces */ + if (NULL == (file_iter = H5FL_MALLOC(H5S_sel_iter_t))) + HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, FAIL, "couldn't allocate file selection iterator") + if (NULL == (mem_iter = H5FL_MALLOC(H5S_sel_iter_t))) + HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, FAIL, "couldn't allocate memory selection iterator") + + /* Loop over dataspaces */ + for (i = 0; i < count; i++) { + + /* we have already verified that element_sizes[0] != 0 and bufs[0] + * != NULL */ + + if (!extend_sizes) { + + if (element_sizes[i] == 0) { + + extend_sizes = TRUE; + element_size = element_sizes[i - 1]; + } + else { + + element_size = element_sizes[i]; + } + } + + if (!extend_bufs) { + + if (bufs[i] == NULL) { + + extend_bufs = TRUE; + buf = bufs[i - 1]; + } + else { + + buf = bufs[i]; + } + } + + /* Initialize sequence lists for memory and file spaces */ + if (H5S_select_iter_init(file_iter, file_spaces[i], element_size, 0) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "can't initialize sequence list for file space") + file_iter_init = TRUE; + if (H5S_select_iter_init(mem_iter, mem_spaces[i], element_size, 0) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "can't initialize sequence list for memory space") + mem_iter_init = TRUE; + + /* Get the number of elements in selection */ + if ((hss_nelmts = (hssize_t)H5S_GET_SELECT_NPOINTS(file_spaces[i])) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTCOUNT, FAIL, "can't get number of elements selected") + H5_CHECKED_ASSIGN(nelmts, size_t, hss_nelmts, hssize_t); + +#ifndef NDEBUG + /* Verify mem space has the same number of elements */ + { + if ((hss_nelmts = (hssize_t)H5S_GET_SELECT_NPOINTS(mem_spaces[i])) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTCOUNT, FAIL, "can't get number of elements selected") + HDassert((hssize_t)nelmts == hss_nelmts); + } +#endif /* NDEBUG */ + + /* Initialize values so sequence lists are retrieved on the first + * iteration */ + file_seq_i = H5FD_SEQ_LIST_LEN; + mem_seq_i = H5FD_SEQ_LIST_LEN; + file_nseq = 0; + mem_nseq = 0; + + /* Loop until all elements are processed */ + while (file_seq_i < file_nseq || nelmts > 0) { + /* Fill/refill file sequence list if necessary */ + if (file_seq_i == H5FD_SEQ_LIST_LEN) { + if (H5S_SELECT_ITER_GET_SEQ_LIST(file_iter, H5FD_SEQ_LIST_LEN, SIZE_MAX, &file_nseq, + &seq_nelem, file_off, file_len) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed") + HDassert(file_nseq > 0); + + nelmts -= seq_nelem; + file_seq_i = 0; + } + HDassert(file_seq_i < file_nseq); + + /* Fill/refill memory sequence list if necessary */ + if (mem_seq_i == H5FD_SEQ_LIST_LEN) { + if (H5S_SELECT_ITER_GET_SEQ_LIST(mem_iter, H5FD_SEQ_LIST_LEN, SIZE_MAX, &mem_nseq, &seq_nelem, + mem_off, mem_len) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed") + HDassert(mem_nseq > 0); + + mem_seq_i = 0; + } + HDassert(mem_seq_i < mem_nseq); + + /* Calculate length of this IO */ + io_len = MIN(file_len[file_seq_i], mem_len[mem_seq_i]); + + /* Check if we're using vector I/O */ + if (use_vector) { + /* Check if we need to extend the arrays */ + if (vec_arr_nused == vec_arr_nalloc) { + /* Check if we're using the static arrays */ + if (addrs == addrs_local) { + HDassert(sizes == sizes_local); + HDassert(vec_bufs == vec_bufs_local); + + /* Allocate dynamic arrays */ + if (NULL == (addrs = H5MM_malloc(sizeof(addrs_local) * 2))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for address list") + if (NULL == (sizes = H5MM_malloc(sizeof(sizes_local) * 2))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for size list") + if (NULL == (vec_bufs = H5MM_malloc(sizeof(vec_bufs_local) * 2))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for buffer list") + + /* Copy the existing data */ + (void)H5MM_memcpy(addrs, addrs_local, sizeof(addrs_local)); + (void)H5MM_memcpy(sizes, sizes_local, sizeof(sizes_local)); + (void)H5MM_memcpy(vec_bufs, vec_bufs_local, sizeof(vec_bufs_local)); + } + else { + void *tmp_ptr; + + /* Reallocate arrays */ + if (NULL == (tmp_ptr = H5MM_realloc(addrs, vec_arr_nalloc * sizeof(*addrs) * 2))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory reallocation failed for address list") + addrs = tmp_ptr; + if (NULL == (tmp_ptr = H5MM_realloc(sizes, vec_arr_nalloc * sizeof(*sizes) * 2))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory reallocation failed for size list") + sizes = tmp_ptr; + if (NULL == + (tmp_ptr = H5MM_realloc(vec_bufs, vec_arr_nalloc * sizeof(*vec_bufs) * 2))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory reallocation failed for buffer list") + vec_bufs = tmp_ptr; + } + + /* Record that we've doubled the array sizes */ + vec_arr_nalloc *= 2; + } + + /* Add this segment to vector read list */ + addrs[vec_arr_nused] = offsets[i] + file_off[file_seq_i]; + sizes[vec_arr_nused] = io_len; + vec_bufs[vec_arr_nused] = (void *)((uint8_t *)buf + mem_off[mem_seq_i]); + vec_arr_nused++; + } + else + /* Issue scalar read call */ + if ((file->cls->read)(file, type, dxpl_id, offsets[i] + file_off[file_seq_i], io_len, + (void *)((uint8_t *)buf + mem_off[mem_seq_i])) < 0) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read request failed") + + /* Update file sequence */ + if (io_len == file_len[file_seq_i]) + file_seq_i++; + else { + file_off[file_seq_i] += io_len; + file_len[file_seq_i] -= io_len; + } + + /* Update memory sequence */ + if (io_len == mem_len[mem_seq_i]) + mem_seq_i++; + else { + mem_off[mem_seq_i] += io_len; + mem_len[mem_seq_i] -= io_len; + } + } + + /* Make sure both memory and file sequences terminated at the same time */ + if (mem_seq_i < mem_nseq) + HGOTO_ERROR(H5E_INTERNAL, H5E_BADVALUE, FAIL, "file selection terminated before memory selection") + + /* Terminate iterators */ + if (H5S_SELECT_ITER_RELEASE(file_iter) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release file selection iterator") + file_iter_init = FALSE; + if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release memory selection iterator") + mem_iter_init = FALSE; + } + + /* Issue vector read call if appropriate */ + if (use_vector) { + H5_CHECK_OVERFLOW(vec_arr_nused, size_t, uint32_t) + if ((file->cls->read_vector)(file, dxpl_id, (uint32_t)vec_arr_nused, types, addrs, sizes, vec_bufs) < + 0) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read vector request failed") + } + +done: + /* Terminate and free iterators */ + if (file_iter) { + if (file_iter_init && H5S_SELECT_ITER_RELEASE(file_iter) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release file selection iterator") + file_iter = H5FL_FREE(H5S_sel_iter_t, file_iter); + } + if (mem_iter) { + if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release memory selection iterator") + mem_iter = H5FL_FREE(H5S_sel_iter_t, mem_iter); + } + + /* Cleanup vector arrays */ + if (use_vector) { + if (addrs != addrs_local) + addrs = H5MM_xfree(addrs); + if (sizes != sizes_local) + sizes = H5MM_xfree(sizes); + if (vec_bufs != vec_bufs_local) + vec_bufs = H5MM_xfree(vec_bufs); + } + + /* Make sure we cleaned up */ + HDassert(!addrs || addrs == addrs_local); + HDassert(!sizes || sizes == sizes_local); + HDassert(!vec_bufs || vec_bufs == vec_bufs_local); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD__read_selection_translate() */ + +/*------------------------------------------------------------------------- + * Function: H5FD_read_selection + * + * Purpose: Private version of H5FDread_selection() + * + * Perform count reads from the specified file at the + * locations selected in the dataspaces in the file_spaces + * array, with each of those dataspaces starting at the file + * address specified by the corresponding element of the + * offsets array, and with the size of each element in the + * dataspace specified by the corresponding element of the + * element_sizes array. The memory type provided by type is + * the same for all selections. Data read is returned in + * the locations selected in the dataspaces in the + * mem_spaces array, within the buffers provided in the + * corresponding elements of the bufs array. + * + * If i > 0 and element_sizes[i] == 0, presume + * element_sizes[n] = element_sizes[i-1] for all n >= i and + * < count. + * + * If the underlying VFD supports selection reads, pass the + * call through directly. + * + * If it doesn't, convert the vector read into a sequence + * of individual reads. + * + * Return: Success: SUCCEED + * All reads have completed successfully, and + * the results havce been into the supplied + * buffers. + * + * Failure: FAIL + * The contents of supplied buffers are undefined. + * + * Programmer: NAF -- 3/29/21 + * + * Changes: None + * + *------------------------------------------------------------------------- + */ +herr_t +H5FD_read_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_spaces, H5S_t **file_spaces, + haddr_t offsets[], size_t element_sizes[], void *bufs[] /* out */) +{ + hbool_t offsets_cooked = FALSE; + hid_t mem_space_ids_local[H5FD_LOCAL_SEL_ARR_LEN]; + hid_t * mem_space_ids = mem_space_ids_local; + hid_t file_space_ids_local[H5FD_LOCAL_SEL_ARR_LEN]; + hid_t * file_space_ids = file_space_ids_local; + uint32_t num_spaces = 0; + hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */ + uint32_t i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(file); + HDassert(file->cls); + HDassert((mem_spaces) || (count == 0)); + HDassert((file_spaces) || (count == 0)); + HDassert((offsets) || (count == 0)); + HDassert((element_sizes) || (count == 0)); + HDassert((bufs) || (count == 0)); + + /* Verify that the first elements of the element_sizes and bufs arrays are + * valid. */ + HDassert((count == 0) || (element_sizes[0] != 0)); + HDassert((count == 0) || (bufs[0] != NULL)); + + /* Get proper DXPL for I/O */ + dxpl_id = H5CX_get_dxpl(); + +#ifndef H5_HAVE_PARALLEL + /* The no-op case + * + * Do not return early for Parallel mode since the I/O could be a + * collective transfer. + */ + if (0 == count) { + HGOTO_DONE(SUCCEED) + } +#endif /* H5_HAVE_PARALLEL */ + + if (file->base_addr > 0) { + + /* apply the base_addr offset to the offsets array. Must undo before + * we return. + */ + for (i = 0; i < count; i++) { + + offsets[i] += file->base_addr; + } + offsets_cooked = TRUE; + } + + /* If the file is open for SWMR read access, allow access to data past + * the end of the allocated space (the 'eoa'). This is done because the + * eoa stored in the file's superblock might be out of sync with the + * objects being written within the file by the application performing + * SWMR write operations. + */ + /* For now at least, only check that the offset is not past the eoa, since + * looking into the highest offset in the selection (different from the + * bounds) is potentially expensive. + */ + if (!(file->access_flags & H5F_ACC_SWMR_READ)) { + haddr_t eoa; + + if (HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type))) + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed") + + for (i = 0; i < count; i++) { + + if ((offsets[i]) > eoa) + + HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, offsets[%d] = %llu, eoa = %llu", + (int)i, (unsigned long long)(offsets[i]), (unsigned long long)eoa) + } + } + + /* if the underlying VFD supports selection read, make the call */ + if (file->cls->read_selection) { + /* Allocate array of space IDs if necessary, otherwise use local + * buffers */ + if (count > sizeof(mem_space_ids_local) / sizeof(mem_space_ids_local[0])) { + if (NULL == (mem_space_ids = H5MM_malloc(count * sizeof(hid_t)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list") + if (NULL == (file_space_ids = H5MM_malloc(count * sizeof(hid_t)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list") + } + + /* Create IDs for all dataspaces */ + for (; num_spaces < count; num_spaces++) { + if ((mem_space_ids[num_spaces] = H5I_register(H5I_DATASPACE, mem_spaces[num_spaces], TRUE)) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTREGISTER, FAIL, "unable to register dataspace ID") + + if ((file_space_ids[num_spaces] = H5I_register(H5I_DATASPACE, file_spaces[num_spaces], TRUE)) < + 0) { + if (H5I_dec_app_ref(mem_space_ids[num_spaces]) < 0) + HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "problem freeing id") + HGOTO_ERROR(H5E_VFL, H5E_CANTREGISTER, FAIL, "unable to register dataspace ID") + } + } + + if ((file->cls->read_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, + element_sizes, bufs) < 0) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read selection request failed") + } + else + /* Otherwise, implement the selection read as a sequence of regular + * or vector read calls. + */ + if (H5FD__read_selection_translate(file, type, dxpl_id, count, mem_spaces, file_spaces, offsets, + element_sizes, bufs) < 0) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "translation to vector or scalar read failed") + +done: + /* undo the base addr offset to the offsets array if necessary */ + if (offsets_cooked) { + + HDassert(file->base_addr > 0); + + for (i = 0; i < count; i++) { + + offsets[i] -= file->base_addr; + } + } + + /* Cleanup dataspace arrays */ + for (i = 0; i < num_spaces; i++) { + if (H5I_dec_app_ref(mem_space_ids[i]) < 0) + HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "problem freeing id") + if (H5I_dec_app_ref(file_space_ids[i]) < 0) + HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "problem freeing id") + } + if (mem_space_ids != mem_space_ids_local) + mem_space_ids = H5MM_xfree(mem_space_ids); + if (file_space_ids != file_space_ids_local) + file_space_ids = H5MM_xfree(file_space_ids); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_read_selection() */ + +/*------------------------------------------------------------------------- + * Function: H5FD_read_selection_id + * + * Purpose: Like H5FD_read_selection(), but takes hid_t arrays instead + * of H5S_t * arrays for the dataspaces. + * + * Return: Success: SUCCEED + * All reads have completed successfully, and + * the results havce been into the supplied + * buffers. + * + * Failure: FAIL + * The contents of supplied buffers are undefined. + * + * Programmer: NAF -- 5/19/21 + * + * Changes: None + * + *------------------------------------------------------------------------- + */ +herr_t +H5FD_read_selection_id(H5FD_t *file, H5FD_mem_t type, uint32_t count, hid_t mem_space_ids[], + hid_t file_space_ids[], haddr_t offsets[], size_t element_sizes[], + void *bufs[] /* out */) +{ + hbool_t offsets_cooked = FALSE; + H5S_t * mem_spaces_local[H5FD_LOCAL_SEL_ARR_LEN]; + H5S_t ** mem_spaces = mem_spaces_local; + H5S_t * file_spaces_local[H5FD_LOCAL_SEL_ARR_LEN]; + H5S_t ** file_spaces = file_spaces_local; + hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */ + uint32_t i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(file); + HDassert(file->cls); + HDassert((mem_space_ids) || (count == 0)); + HDassert((file_space_ids) || (count == 0)); + HDassert((offsets) || (count == 0)); + HDassert((element_sizes) || (count == 0)); + HDassert((bufs) || (count == 0)); + + /* Verify that the first elements of the element_sizes and bufs arrays are + * valid. */ + HDassert((count == 0) || (element_sizes[0] != 0)); + HDassert((count == 0) || (bufs[0] != NULL)); + + /* Get proper DXPL for I/O */ + dxpl_id = H5CX_get_dxpl(); + +#ifndef H5_HAVE_PARALLEL + /* The no-op case + * + * Do not return early for Parallel mode since the I/O could be a + * collective transfer. + */ + if (0 == count) { + HGOTO_DONE(SUCCEED) + } +#endif /* H5_HAVE_PARALLEL */ + + if (file->base_addr > 0) { + + /* apply the base_addr offset to the offsets array. Must undo before + * we return. + */ + for (i = 0; i < count; i++) { + + offsets[i] += file->base_addr; + } + offsets_cooked = TRUE; + } + + /* If the file is open for SWMR read access, allow access to data past + * the end of the allocated space (the 'eoa'). This is done because the + * eoa stored in the file's superblock might be out of sync with the + * objects being written within the file by the application performing + * SWMR write operations. + */ + /* For now at least, only check that the offset is not past the eoa, since + * looking into the highest offset in the selection (different from the + * bounds) is potentially expensive. + */ + if (!(file->access_flags & H5F_ACC_SWMR_READ)) { + haddr_t eoa; + + if (HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type))) + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed") + + for (i = 0; i < count; i++) { + + if ((offsets[i]) > eoa) + + HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, offsets[%d] = %llu, eoa = %llu", + (int)i, (unsigned long long)(offsets[i]), (unsigned long long)eoa) + } + } + + /* if the underlying VFD supports selection read, make the call */ + if (file->cls->read_selection) { + if ((file->cls->read_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, + element_sizes, bufs) < 0) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read selection request failed") + } + else { + /* Otherwise, implement the selection read as a sequence of regular + * or vector read calls. + */ + + /* Allocate arrays of space objects if necessary, otherwise use local + * buffers */ + if (count > sizeof(mem_spaces_local) / sizeof(mem_spaces_local[0])) { + if (NULL == (mem_spaces = H5MM_malloc(count * sizeof(H5S_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list") + if (NULL == (file_spaces = H5MM_malloc(count * sizeof(H5S_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list") + } + + /* Get object pointers for all dataspaces */ + for (i = 0; i < count; i++) { + if (NULL == (mem_spaces[i] = (H5S_t *)H5I_object_verify(mem_space_ids[i], H5I_DATASPACE))) + HGOTO_ERROR(H5E_VFL, H5E_BADTYPE, H5I_INVALID_HID, "can't retrieve memory dataspace from ID") + if (NULL == (file_spaces[i] = (H5S_t *)H5I_object_verify(file_space_ids[i], H5I_DATASPACE))) + HGOTO_ERROR(H5E_VFL, H5E_BADTYPE, H5I_INVALID_HID, "can't retrieve file dataspace from ID") + } + + /* Translate to vector or scalar I/O */ + if (H5FD__read_selection_translate(file, type, dxpl_id, count, mem_spaces, file_spaces, offsets, + element_sizes, bufs) < 0) + HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "translation to vector or scalar read failed") + } + +done: + /* undo the base addr offset to the offsets array if necessary */ + if (offsets_cooked) { + + HDassert(file->base_addr > 0); + + for (i = 0; i < count; i++) { + + offsets[i] -= file->base_addr; + } + } + + /* Cleanup dataspace arrays */ + if (mem_spaces != mem_spaces_local) + mem_spaces = H5MM_xfree(mem_spaces); + if (file_spaces != file_spaces_local) + file_spaces = H5MM_xfree(file_spaces); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_read_selection_id() */ + +/*------------------------------------------------------------------------- + * Function: H5FD__write_selection_translate + * + * Purpose: Translates a selection write call to a vector write call + * if vector writes are supported, or a series of scalar + * write calls otherwise. + * + * Return: Success: SUCCEED + * All writes have completed successfully. + * + * Failure: FAIL + * One or more writes failed. + * + * Programmer: NAF -- 5/13/21 + * + * Changes: None + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD__write_selection_translate(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, uint32_t count, + H5S_t **mem_spaces, H5S_t **file_spaces, haddr_t offsets[], + size_t element_sizes[], const void *bufs[]) +{ + hbool_t extend_sizes = FALSE; + hbool_t extend_bufs = FALSE; + uint32_t i; + size_t element_size; + const void * buf; + hbool_t use_vector = FALSE; + haddr_t addrs_local[H5FD_LOCAL_VECTOR_LEN]; + haddr_t * addrs = addrs_local; + size_t sizes_local[H5FD_LOCAL_VECTOR_LEN]; + size_t * sizes = sizes_local; + const void * vec_bufs_local[H5FD_LOCAL_VECTOR_LEN]; + const void ** vec_bufs = vec_bufs_local; + hsize_t file_off[H5FD_SEQ_LIST_LEN]; + size_t file_len[H5FD_SEQ_LIST_LEN]; + hsize_t mem_off[H5FD_SEQ_LIST_LEN]; + size_t mem_len[H5FD_SEQ_LIST_LEN]; + size_t file_seq_i; + size_t mem_seq_i; + size_t file_nseq; + size_t mem_nseq; + size_t io_len; + size_t nelmts; + hssize_t hss_nelmts; + size_t seq_nelem; + H5S_sel_iter_t *file_iter = NULL; + H5S_sel_iter_t *mem_iter = NULL; + hbool_t file_iter_init = FALSE; + hbool_t mem_iter_init = FALSE; + H5FD_mem_t types[2] = {type, H5FD_MEM_NOLIST}; + size_t vec_arr_nalloc = H5FD_LOCAL_VECTOR_LEN; + size_t vec_arr_nused = 0; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(file); + HDassert(file->cls); + HDassert(mem_spaces); + HDassert(file_spaces); + HDassert(offsets); + HDassert(element_sizes); + HDassert(bufs); + + /* Verify that the first elements of the element_sizes and bufs arrays are + * valid. */ + HDassert(element_sizes[0] != 0); + HDassert(bufs[0] != NULL); + + /* Check if we're using vector I/O */ + use_vector = file->cls->write_vector != NULL; + + /* Allocate sequence lists for memory and file spaces */ + if (NULL == (file_iter = H5FL_MALLOC(H5S_sel_iter_t))) + HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, FAIL, "couldn't allocate file selection iterator") + if (NULL == (mem_iter = H5FL_MALLOC(H5S_sel_iter_t))) + HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, FAIL, "couldn't allocate memory selection iterator") + + /* Loop over dataspaces */ + for (i = 0; i < count; i++) { + + /* we have already verified that element_sizes[0] != 0 and bufs[0] + * != NULL */ + + if (!extend_sizes) { + + if (element_sizes[i] == 0) { + + extend_sizes = TRUE; + element_size = element_sizes[i - 1]; + } + else { + + element_size = element_sizes[i]; + } + } + + if (!extend_bufs) { + + if (bufs[i] == NULL) { + + extend_bufs = TRUE; + buf = bufs[i - 1]; + } + else { + + buf = bufs[i]; + } + } + + /* Initialize sequence lists for memory and file spaces */ + if (H5S_select_iter_init(file_iter, file_spaces[i], element_size, 0) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "can't initialize sequence list for file space") + file_iter_init = TRUE; + if (H5S_select_iter_init(mem_iter, mem_spaces[i], element_size, 0) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "can't initialize sequence list for memory space") + mem_iter_init = TRUE; + + /* Get the number of elements in selection */ + if ((hss_nelmts = (hssize_t)H5S_GET_SELECT_NPOINTS(file_spaces[i])) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTCOUNT, FAIL, "can't get number of elements selected") + H5_CHECKED_ASSIGN(nelmts, size_t, hss_nelmts, hssize_t); + +#ifndef NDEBUG + /* Verify mem space has the same number of elements */ + { + if ((hss_nelmts = (hssize_t)H5S_GET_SELECT_NPOINTS(mem_spaces[i])) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTCOUNT, FAIL, "can't get number of elements selected") + HDassert((hssize_t)nelmts == hss_nelmts); + } +#endif /* NDEBUG */ + + /* Initialize values so sequence lists are retrieved on the first + * iteration */ + file_seq_i = H5FD_SEQ_LIST_LEN; + mem_seq_i = H5FD_SEQ_LIST_LEN; + file_nseq = 0; + mem_nseq = 0; + + /* Loop until all elements are processed */ + while (file_seq_i < file_nseq || nelmts > 0) { + /* Fill/refill file sequence list if necessary */ + if (file_seq_i == H5FD_SEQ_LIST_LEN) { + if (H5S_SELECT_ITER_GET_SEQ_LIST(file_iter, H5FD_SEQ_LIST_LEN, SIZE_MAX, &file_nseq, + &seq_nelem, file_off, file_len) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed") + HDassert(file_nseq > 0); + + nelmts -= seq_nelem; + file_seq_i = 0; + } + HDassert(file_seq_i < file_nseq); + + /* Fill/refill memory sequence list if necessary */ + if (mem_seq_i == H5FD_SEQ_LIST_LEN) { + if (H5S_SELECT_ITER_GET_SEQ_LIST(mem_iter, H5FD_SEQ_LIST_LEN, SIZE_MAX, &mem_nseq, &seq_nelem, + mem_off, mem_len) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_UNSUPPORTED, FAIL, "sequence length generation failed") + HDassert(mem_nseq > 0); + + mem_seq_i = 0; + } + HDassert(mem_seq_i < mem_nseq); + + /* Calculate length of this IO */ + io_len = MIN(file_len[file_seq_i], mem_len[mem_seq_i]); + + /* Check if we're using vector I/O */ + if (use_vector) { + /* Check if we need to extend the arrays */ + if (vec_arr_nused == vec_arr_nalloc) { + /* Check if we're using the static arrays */ + if (addrs == addrs_local) { + HDassert(sizes == sizes_local); + HDassert(vec_bufs == vec_bufs_local); + + /* Allocate dynamic arrays */ + if (NULL == (addrs = H5MM_malloc(sizeof(addrs_local) * 2))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for address list") + if (NULL == (sizes = H5MM_malloc(sizeof(sizes_local) * 2))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for size list") + if (NULL == (vec_bufs = H5MM_malloc(sizeof(vec_bufs_local) * 2))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory allocation failed for buffer list") + + /* Copy the existing data */ + (void)H5MM_memcpy(addrs, addrs_local, sizeof(addrs_local)); + (void)H5MM_memcpy(sizes, sizes_local, sizeof(sizes_local)); + (void)H5MM_memcpy(vec_bufs, vec_bufs_local, sizeof(vec_bufs_local)); + } + else { + void *tmp_ptr; + + /* Reallocate arrays */ + if (NULL == (tmp_ptr = H5MM_realloc(addrs, vec_arr_nalloc * sizeof(*addrs) * 2))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory reallocation failed for address list") + addrs = tmp_ptr; + if (NULL == (tmp_ptr = H5MM_realloc(sizes, vec_arr_nalloc * sizeof(*sizes) * 2))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory reallocation failed for size list") + sizes = tmp_ptr; + if (NULL == + (tmp_ptr = H5MM_realloc(vec_bufs, vec_arr_nalloc * sizeof(*vec_bufs) * 2))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, + "memory reallocation failed for buffer list") + vec_bufs = tmp_ptr; + } + + /* Record that we've doubled the array sizes */ + vec_arr_nalloc *= 2; + } + + /* Add this segment to vector write list */ + addrs[vec_arr_nused] = offsets[i] + file_off[file_seq_i]; + sizes[vec_arr_nused] = io_len; + vec_bufs[vec_arr_nused] = (const void *)((const uint8_t *)buf + mem_off[mem_seq_i]); + vec_arr_nused++; + } + else + /* Issue scalar write call */ + if ((file->cls->write)(file, type, dxpl_id, offsets[i] + file_off[file_seq_i], io_len, + (const void *)((const uint8_t *)buf + mem_off[mem_seq_i])) < 0) + HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write request failed") + + /* Update file sequence */ + if (io_len == file_len[file_seq_i]) + file_seq_i++; + else { + file_off[file_seq_i] += io_len; + file_len[file_seq_i] -= io_len; + } + + /* Update memory sequence */ + if (io_len == mem_len[mem_seq_i]) + mem_seq_i++; + else { + mem_off[mem_seq_i] += io_len; + mem_len[mem_seq_i] -= io_len; + } + } + + /* Make sure both memory and file sequences terminated at the same time */ + if (mem_seq_i < mem_nseq) + HGOTO_ERROR(H5E_INTERNAL, H5E_BADVALUE, FAIL, "file selection terminated before memory selection") + + /* Terminate iterators */ + if (H5S_SELECT_ITER_RELEASE(file_iter) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release file selection iterator") + file_iter_init = FALSE; + if (H5S_SELECT_ITER_RELEASE(mem_iter) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release memory selection iterator") + mem_iter_init = FALSE; + } + + /* Issue vector write call if appropriate */ + if (use_vector) { + H5_CHECK_OVERFLOW(vec_arr_nused, size_t, uint32_t) + if ((file->cls->write_vector)(file, dxpl_id, (uint32_t)vec_arr_nused, types, addrs, sizes, vec_bufs) < + 0) + HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write vector request failed") + } + +done: + /* Terminate and free iterators */ + if (file_iter) { + if (file_iter_init && H5S_SELECT_ITER_RELEASE(file_iter) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release file selection iterator") + file_iter = H5FL_FREE(H5S_sel_iter_t, file_iter); + } + if (mem_iter) { + if (mem_iter_init && H5S_SELECT_ITER_RELEASE(mem_iter) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTFREE, FAIL, "can't release memory selection iterator") + mem_iter = H5FL_FREE(H5S_sel_iter_t, mem_iter); + } + + /* Cleanup vector arrays */ + if (use_vector) { + if (addrs != addrs_local) + addrs = H5MM_xfree(addrs); + if (sizes != sizes_local) + sizes = H5MM_xfree(sizes); + if (vec_bufs != vec_bufs_local) + vec_bufs = H5MM_xfree(vec_bufs); + } + + /* Make sure we cleaned up */ + HDassert(!addrs || addrs == addrs_local); + HDassert(!sizes || sizes == sizes_local); + HDassert(!vec_bufs || vec_bufs == vec_bufs_local); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD__write_selection_translate() */ + +/*------------------------------------------------------------------------- + * Function: H5FD_write_selection + * + * Purpose: Private version of H5FDwrite_selection() + * + * Perform count writes to the specified file at the + * locations selected in the dataspaces in the file_spaces + * array, with each of those dataspaces starting at the file + * address specified by the corresponding element of the + * offsets array, and with the size of each element in the + * dataspace specified by the corresponding element of the + * element_sizes array. The memory type provided by type is + * the same for all selections. Data write is from + * the locations selected in the dataspaces in the + * mem_spaces array, within the buffers provided in the + * corresponding elements of the bufs array. + * + * If i > 0 and element_sizes[i] == 0, presume + * element_sizes[n] = element_sizes[i-1] for all n >= i and + * < count. + * + * If the underlying VFD supports selection writes, pass the + * call through directly. + * + * If it doesn't, convert the vector write into a sequence + * of individual writes. + * + * Return: Success: SUCCEED + * All writes have completed successfully. + * + * Failure: FAIL + * One or more writes failed. + * + * Programmer: NAF -- 3/29/21 + * + * Changes: None + * + *------------------------------------------------------------------------- + */ +herr_t +H5FD_write_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, H5S_t **mem_spaces, H5S_t **file_spaces, + haddr_t offsets[], size_t element_sizes[], const void *bufs[]) +{ + hbool_t offsets_cooked = FALSE; + hid_t mem_space_ids_local[H5FD_LOCAL_SEL_ARR_LEN]; + hid_t * mem_space_ids = mem_space_ids_local; + hid_t file_space_ids_local[H5FD_LOCAL_SEL_ARR_LEN]; + hid_t * file_space_ids = file_space_ids_local; + uint32_t num_spaces = 0; + hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */ + uint32_t i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(file); + HDassert(file->cls); + HDassert((mem_spaces) || (count == 0)); + HDassert((file_spaces) || (count == 0)); + HDassert((offsets) || (count == 0)); + HDassert((element_sizes) || (count == 0)); + HDassert((bufs) || (count == 0)); + + /* Verify that the first elements of the element_sizes and bufs arrays are + * valid. */ + HDassert((count == 0) || (element_sizes[0] != 0)); + HDassert((count == 0) || (bufs[0] != NULL)); + + /* Get proper DXPL for I/O */ + dxpl_id = H5CX_get_dxpl(); + +#ifndef H5_HAVE_PARALLEL + /* The no-op case + * + * Do not return early for Parallel mode since the I/O could be a + * collective transfer. + */ + if (0 == count) { + HGOTO_DONE(SUCCEED) + } +#endif /* H5_HAVE_PARALLEL */ + + if (file->base_addr > 0) { + + /* apply the base_addr offset to the offsets array. Must undo before + * we return. + */ + for (i = 0; i < count; i++) { + + offsets[i] += file->base_addr; + } + offsets_cooked = TRUE; + } + + /* For now at least, only check that the offset is not past the eoa, since + * looking into the highest offset in the selection (different from the + * bounds) is potentially expensive. + */ + { + haddr_t eoa; + + if (HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type))) + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed") + + for (i = 0; i < count; i++) { + + if ((offsets[i]) > eoa) + + HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, offsets[%d] = %llu, eoa = %llu", + (int)i, (unsigned long long)(offsets[i]), (unsigned long long)eoa) + } + } + + /* if the underlying VFD supports selection write, make the call */ + if (file->cls->write_selection) { + /* Allocate array of space IDs if necessary, otherwise use local + * buffers */ + if (count > sizeof(mem_space_ids_local) / sizeof(mem_space_ids_local[0])) { + if (NULL == (mem_space_ids = H5MM_malloc(count * sizeof(hid_t)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list") + if (NULL == (file_space_ids = H5MM_malloc(count * sizeof(hid_t)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list") + } + + /* Create IDs for all dataspaces */ + for (; num_spaces < count; num_spaces++) { + if ((mem_space_ids[num_spaces] = H5I_register(H5I_DATASPACE, mem_spaces[num_spaces], TRUE)) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTREGISTER, FAIL, "unable to register dataspace ID") + + if ((file_space_ids[num_spaces] = H5I_register(H5I_DATASPACE, file_spaces[num_spaces], TRUE)) < + 0) { + if (H5I_dec_app_ref(mem_space_ids[num_spaces]) < 0) + HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "problem freeing id") + HGOTO_ERROR(H5E_VFL, H5E_CANTREGISTER, FAIL, "unable to register dataspace ID") + } + } + + if ((file->cls->write_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, + element_sizes, bufs) < 0) + HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write selection request failed") + } + else + /* Otherwise, implement the selection write as a sequence of regular + * or vector write calls. + */ + if (H5FD__write_selection_translate(file, type, dxpl_id, count, mem_spaces, file_spaces, offsets, + element_sizes, bufs) < 0) + HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "translation to vector or scalar write failed") + +done: + /* undo the base addr offset to the offsets array if necessary */ + if (offsets_cooked) { + + HDassert(file->base_addr > 0); + + for (i = 0; i < count; i++) { + + offsets[i] -= file->base_addr; + } + } + + /* Cleanup dataspace arrays */ + for (i = 0; i < num_spaces; i++) { + if (H5I_dec_app_ref(mem_space_ids[i]) < 0) + HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "problem freeing id") + if (H5I_dec_app_ref(file_space_ids[i]) < 0) + HDONE_ERROR(H5E_VFL, H5E_CANTDEC, FAIL, "problem freeing id") + } + if (mem_space_ids != mem_space_ids_local) + mem_space_ids = H5MM_xfree(mem_space_ids); + if (file_space_ids != file_space_ids_local) + file_space_ids = H5MM_xfree(file_space_ids); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_write_selection() */ + +/*------------------------------------------------------------------------- + * Function: H5FD_write_selection_id + * + * Purpose: Like H5FD_write_selection(), but takes hid_t arrays + * instead of H5S_t * arrays for the dataspaces. + * + * Return: Success: SUCCEED + * All writes have completed successfully. + * + * Failure: FAIL + * One or more writes failed. + * + * Programmer: NAF -- 5/19/21 + * + * Changes: None + * + *------------------------------------------------------------------------- + */ +herr_t +H5FD_write_selection_id(H5FD_t *file, H5FD_mem_t type, uint32_t count, hid_t mem_space_ids[], + hid_t file_space_ids[], haddr_t offsets[], size_t element_sizes[], const void *bufs[]) +{ + hbool_t offsets_cooked = FALSE; + H5S_t * mem_spaces_local[H5FD_LOCAL_SEL_ARR_LEN]; + H5S_t ** mem_spaces = mem_spaces_local; + H5S_t * file_spaces_local[H5FD_LOCAL_SEL_ARR_LEN]; + H5S_t ** file_spaces = file_spaces_local; + hid_t dxpl_id = H5I_INVALID_HID; /* DXPL for operation */ + uint32_t i; + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(file); + HDassert(file->cls); + HDassert((mem_space_ids) || (count == 0)); + HDassert((file_space_ids) || (count == 0)); + HDassert((offsets) || (count == 0)); + HDassert((element_sizes) || (count == 0)); + HDassert((bufs) || (count == 0)); + + /* Verify that the first elements of the element_sizes and bufs arrays are + * valid. */ + HDassert((count == 0) || (element_sizes[0] != 0)); + HDassert((count == 0) || (bufs[0] != NULL)); + + /* Get proper DXPL for I/O */ + dxpl_id = H5CX_get_dxpl(); + +#ifndef H5_HAVE_PARALLEL + /* The no-op case + * + * Do not return early for Parallel mode since the I/O could be a + * collective transfer. + */ + if (0 == count) { + HGOTO_DONE(SUCCEED) + } +#endif /* H5_HAVE_PARALLEL */ + + if (file->base_addr > 0) { + + /* apply the base_addr offset to the offsets array. Must undo before + * we return. + */ + for (i = 0; i < count; i++) { + + offsets[i] += file->base_addr; + } + offsets_cooked = TRUE; + } + + /* For now at least, only check that the offset is not past the eoa, since + * looking into the highest offset in the selection (different from the + * bounds) is potentially expensive. + */ + { + haddr_t eoa; + + if (HADDR_UNDEF == (eoa = (file->cls->get_eoa)(file, type))) + HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver get_eoa request failed") + + for (i = 0; i < count; i++) { + + if ((offsets[i]) > eoa) + + HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, offsets[%d] = %llu, eoa = %llu", + (int)i, (unsigned long long)(offsets[i]), (unsigned long long)eoa) + } + } + + /* if the underlying VFD supports selection write, make the call */ + if (file->cls->write_selection) { + if ((file->cls->write_selection)(file, type, dxpl_id, count, mem_space_ids, file_space_ids, offsets, + element_sizes, bufs) < 0) + HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write selection request failed") + } + else { + /* Otherwise, implement the selection write as a sequence of regular + * or vector write calls. + */ + + /* Allocate arrays of space objects if necessary, otherwise use local + * buffers */ + if (count > sizeof(mem_spaces_local) / sizeof(mem_spaces_local[0])) { + if (NULL == (mem_spaces = H5MM_malloc(count * sizeof(H5S_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list") + if (NULL == (file_spaces = H5MM_malloc(count * sizeof(H5S_t *)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for dataspace list") + } + + /* Get object pointers for all dataspaces */ + for (i = 0; i < count; i++) { + if (NULL == (mem_spaces[i] = (H5S_t *)H5I_object_verify(mem_space_ids[i], H5I_DATASPACE))) + HGOTO_ERROR(H5E_VFL, H5E_BADTYPE, H5I_INVALID_HID, "can't retrieve memory dataspace from ID") + if (NULL == (file_spaces[i] = (H5S_t *)H5I_object_verify(file_space_ids[i], H5I_DATASPACE))) + HGOTO_ERROR(H5E_VFL, H5E_BADTYPE, H5I_INVALID_HID, "can't retrieve file dataspace from ID") + } + + /* Translate to vector or scalar I/O */ + if (H5FD__write_selection_translate(file, type, dxpl_id, count, mem_spaces, file_spaces, offsets, + element_sizes, bufs) < 0) + HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "translation to vector or scalar write failed") + } + +done: + /* undo the base addr offset to the offsets array if necessary */ + if (offsets_cooked) { + + HDassert(file->base_addr > 0); + + for (i = 0; i < count; i++) { + + offsets[i] -= file->base_addr; + } + } + + /* Cleanup dataspace arrays */ + if (mem_spaces != mem_spaces_local) + mem_spaces = H5MM_xfree(mem_spaces); + if (file_spaces != file_spaces_local) + file_spaces = H5MM_xfree(file_spaces); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD_write_selection_id() */ + /*------------------------------------------------------------------------- * Function: H5FD_set_eoa * @@ -398,6 +2132,246 @@ H5FD_driver_query(const H5FD_class_t *driver, unsigned long *flags /*out*/) FUNC_LEAVE_NOAPI(ret_value) } /* end H5FD_driver_query() */ +/*------------------------------------------------------------------------- + * Function: H5FD_sort_vector_io_req + * + * Purpose: Determine whether the supplied vector I/O request is + * sorted. + * + * if is is, set *vector_was_sorted to TRUE, set: + * + * *s_types_ptr = types + * *s_addrs_ptr = addrs + * *s_sizes_ptr = sizes + * *s_bufs_ptr = bufs + * + * and return. + * + * If it is not sorted, duplicate the type, addrs, sizes, + * and bufs vectors, storing the base addresses of the new + * vectors in *s_types_ptr, *s_addrs_ptr, *s_sizes_ptr, and + * *s_bufs_ptr respectively. Determine the sorted order + * of the vector I/O request, and load it into the new + * vectors in sorted order. + * + * Note that in this case, it is the callers responsibility + * to free the sorted vectors. + * + * JRM -- 3/15/21 + * + * Return: SUCCEED/FAIL + * + *------------------------------------------------------------------------- + */ + +static int +H5FD__vsrt_tmp_cmp(const void *element_1, const void *element_2) +{ + haddr_t addr_1 = ((const H5FD_vsrt_tmp_t *)element_1)->addr; + haddr_t addr_2 = ((const H5FD_vsrt_tmp_t *)element_2)->addr; + int ret_value = 0; /* Return value */ + + FUNC_ENTER_STATIC_NOERR + + /* Sanity checks */ + HDassert(H5F_addr_defined(addr_1)); + HDassert(H5F_addr_defined(addr_2)); + + /* Compare the addresses */ + if (H5F_addr_gt(addr_1, addr_2)) + ret_value = 1; + else if (H5F_addr_lt(addr_1, addr_2)) + ret_value = -1; + + FUNC_LEAVE_NOAPI(ret_value) +} /* H5FD__vsrt_tmp_cmp() */ + +herr_t +H5FD_sort_vector_io_req(hbool_t *vector_was_sorted, uint32_t _count, H5FD_mem_t types[], haddr_t addrs[], + size_t sizes[], H5_flexible_const_ptr_t bufs[], H5FD_mem_t **s_types_ptr, + haddr_t **s_addrs_ptr, size_t **s_sizes_ptr, H5_flexible_const_ptr_t **s_bufs_ptr) +{ + herr_t ret_value = SUCCEED; /* Return value */ + size_t count = (size_t)_count; + size_t i; + struct H5FD_vsrt_tmp_t *srt_tmp = NULL; + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + + HDassert(vector_was_sorted); + + HDassert((types) || (count == 0)); + HDassert((addrs) || (count == 0)); + HDassert((sizes) || (count == 0)); + HDassert((bufs) || (count == 0)); + + /* verify that the first elements of the sizes and types arrays are + * valid. + */ + HDassert((count == 0) || (sizes[0] != 0)); + HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST)); + + HDassert((count == 0) || ((s_types_ptr) && (NULL == *s_types_ptr))); + HDassert((count == 0) || ((s_addrs_ptr) && (NULL == *s_addrs_ptr))); + HDassert((count == 0) || ((s_sizes_ptr) && (NULL == *s_sizes_ptr))); + HDassert((count == 0) || ((s_bufs_ptr) && (NULL == *s_bufs_ptr))); + + /* scan the addrs array to see if it is sorted */ + for (i = 1; i < count; i++) { + HDassert(H5F_addr_defined(addrs[i - 1])); + + if (H5F_addr_gt(addrs[i - 1], addrs[i])) + break; + else if (H5F_addr_eq(addrs[i - 1], addrs[i])) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "duplicate addr in vector") + } + + /* if we traversed the entire array without breaking out, then + * the array was already sorted */ + if (i >= count) + *vector_was_sorted = TRUE; + else + *vector_was_sorted = FALSE; + + if (*vector_was_sorted) { + + *s_types_ptr = types; + *s_addrs_ptr = addrs; + *s_sizes_ptr = sizes; + *s_bufs_ptr = bufs; + } + else { + + /* must sort the addrs array in increasing addr order, while + * maintaining the association between each addr, and the + * sizes[], types[], and bufs[] values at the same index. + * + * Do this by allocating an array of struct H5FD_vsrt_tmp_t, where + * each instance of H5FD_vsrt_tmp_t has two fields, addr and index. + * Load the array with the contents of the addrs array and + * the index of the associated entry. Sort the array, allocate + * the s_types_ptr, s_addrs_ptr, s_sizes_ptr, and s_bufs_ptr + * arrays and populate them using the mapping provided by + * the sorted array of H5FD_vsrt_tmp_t. + */ + int j; + size_t fixed_size_index = count; + size_t fixed_type_index = count; + size_t srt_tmp_size; + + srt_tmp_size = (count * sizeof(struct H5FD_vsrt_tmp_t)); + + if (NULL == (srt_tmp = (H5FD_vsrt_tmp_t *)HDmalloc(srt_tmp_size))) + + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't alloc srt_tmp") + + for (i = 0; i < count; i++) { + HDassert(i == (size_t)((int)i)); + + srt_tmp[i].addr = addrs[i]; + srt_tmp[i].index = (int)i; + } + + /* sort the srt_tmp array */ + HDqsort(srt_tmp, count, sizeof(struct H5FD_vsrt_tmp_t), H5FD__vsrt_tmp_cmp); + + /* verify no duplicate entries */ + i = 1; + + for (i = 1; i < count; i++) { + HDassert(H5F_addr_lt(srt_tmp[i - 1].addr, srt_tmp[i].addr)); + + if (H5F_addr_eq(addrs[i - 1], addrs[i])) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "duplicate addr in vector") + } + + if ((NULL == (*s_types_ptr = (H5FD_mem_t *)HDmalloc(count * sizeof(H5FD_mem_t)))) || + (NULL == (*s_addrs_ptr = (haddr_t *)HDmalloc(count * sizeof(haddr_t)))) || + (NULL == (*s_sizes_ptr = (size_t *)HDmalloc(count * sizeof(size_t)))) || + (NULL == + (*s_bufs_ptr = (H5_flexible_const_ptr_t *)HDmalloc(count * sizeof(H5_flexible_const_ptr_t))))) { + + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't alloc sorted vector(s)") + } + + HDassert(sizes[0] != 0); + HDassert(types[0] != H5FD_MEM_NOLIST); + + /* Scan the sizes and types vectors to determine if the fixed size / type + * optimization is in use, and if so, to determine the index of the last + * valid value on each vector. We have already verified that the first + * elements of these arrays are valid so we can start at the second + * element (if it exists). + */ + for (i = 1; i < count && ((fixed_size_index == count) || (fixed_type_index == count)); i++) { + if ((fixed_size_index == count) && (sizes[i] == 0)) + fixed_size_index = i - 1; + if ((fixed_type_index == count) && (types[i] == H5FD_MEM_NOLIST)) + fixed_type_index = i - 1; + } + + HDassert(fixed_size_index <= count); + HDassert(fixed_type_index <= count); + + /* populate the sorted vectors */ + for (i = 0; i < count; i++) { + + j = srt_tmp[i].index; + + (*s_types_ptr)[j] = types[MIN(i, fixed_type_index)]; + (*s_addrs_ptr)[j] = addrs[i]; + (*s_sizes_ptr)[j] = sizes[MIN(i, fixed_size_index)]; + (*s_bufs_ptr)[j] = bufs[i]; + } + } + +done: + if (srt_tmp) { + + HDfree(srt_tmp); + srt_tmp = NULL; + } + + /* On failure, free the sorted vectors if they were allocated. + * Note that we only allocate these vectors if the original array + * was not sorted -- thus we check both for failure, and for + * the flag indicating that the original vector was not sorted + * in increasing address order. + */ + if ((ret_value != SUCCEED) && (!(*vector_was_sorted))) { + + /* free space allocated for sorted vectors */ + if (*s_types_ptr) { + + HDfree(*s_types_ptr); + *s_types_ptr = NULL; + } + + if (*s_addrs_ptr) { + + HDfree(*s_addrs_ptr); + *s_addrs_ptr = NULL; + } + + if (*s_sizes_ptr) { + + HDfree(*s_sizes_ptr); + *s_sizes_ptr = NULL; + } + + if (*s_bufs_ptr) { + + HDfree(*s_bufs_ptr); + *s_bufs_ptr = NULL; + } + } + + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD_sort_vector_io_req() */ + /*------------------------------------------------------------------------- * Function: H5FD_delete * @@ -418,6 +2392,7 @@ H5FD_delete(const char *filename, hid_t fapl_id) FUNC_ENTER_NOAPI(FAIL) /* Sanity checks */ + HDassert(filename); /* Get file access property list */ diff --git a/src/H5FDlog.c b/src/H5FDlog.c index af34682883..4d2e705e08 100644 --- a/src/H5FDlog.c +++ b/src/H5FDlog.c @@ -180,41 +180,46 @@ static herr_t H5FD__log_unlock(H5FD_t *_file); static herr_t H5FD__log_delete(const char *filename, hid_t fapl_id); static const H5FD_class_t H5FD_log_g = { - H5FD_LOG_VALUE, /* value */ - "log", /* name */ - MAXADDR, /* maxaddr */ - H5F_CLOSE_WEAK, /* fc_degree */ - H5FD__log_term, /* terminate */ - NULL, /* sb_size */ - NULL, /* sb_encode */ - NULL, /* sb_decode */ - sizeof(H5FD_log_fapl_t), /* fapl_size */ - H5FD__log_fapl_get, /* fapl_get */ - H5FD__log_fapl_copy, /* fapl_copy */ - H5FD__log_fapl_free, /* fapl_free */ - 0, /* dxpl_size */ - NULL, /* dxpl_copy */ - NULL, /* dxpl_free */ - H5FD__log_open, /* open */ - H5FD__log_close, /* close */ - H5FD__log_cmp, /* cmp */ - H5FD__log_query, /* query */ - NULL, /* get_type_map */ - H5FD__log_alloc, /* alloc */ - H5FD__log_free, /* free */ - H5FD__log_get_eoa, /* get_eoa */ - H5FD__log_set_eoa, /* set_eoa */ - H5FD__log_get_eof, /* get_eof */ - H5FD__log_get_handle, /* get_handle */ - H5FD__log_read, /* read */ - H5FD__log_write, /* write */ - NULL, /* flush */ - H5FD__log_truncate, /* truncate */ - H5FD__log_lock, /* lock */ - H5FD__log_unlock, /* unlock */ - H5FD__log_delete, /* del */ - NULL, /* ctl */ - H5FD_FLMAP_DICHOTOMY /* fl_map */ + H5FD_CLASS_VERSION, /* struct version */ + H5FD_LOG_VALUE, /* value */ + "log", /* name */ + MAXADDR, /* maxaddr */ + H5F_CLOSE_WEAK, /* fc_degree */ + H5FD__log_term, /* terminate */ + NULL, /* sb_size */ + NULL, /* sb_encode */ + NULL, /* sb_decode */ + sizeof(H5FD_log_fapl_t), /* fapl_size */ + H5FD__log_fapl_get, /* fapl_get */ + H5FD__log_fapl_copy, /* fapl_copy */ + H5FD__log_fapl_free, /* fapl_free */ + 0, /* dxpl_size */ + NULL, /* dxpl_copy */ + NULL, /* dxpl_free */ + H5FD__log_open, /* open */ + H5FD__log_close, /* close */ + H5FD__log_cmp, /* cmp */ + H5FD__log_query, /* query */ + NULL, /* get_type_map */ + H5FD__log_alloc, /* alloc */ + H5FD__log_free, /* free */ + H5FD__log_get_eoa, /* get_eoa */ + H5FD__log_set_eoa, /* set_eoa */ + H5FD__log_get_eof, /* get_eof */ + H5FD__log_get_handle, /* get_handle */ + H5FD__log_read, /* read */ + H5FD__log_write, /* write */ + NULL, /* read vector */ + NULL, /* write vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ + NULL, /* flush */ + H5FD__log_truncate, /* truncate */ + H5FD__log_lock, /* lock */ + H5FD__log_unlock, /* unlock */ + H5FD__log_delete, /* del */ + NULL, /* ctl */ + H5FD_FLMAP_DICHOTOMY /* fl_map */ }; /* Default configuration, if none provided */ diff --git a/src/H5FDmirror.c b/src/H5FDmirror.c index e74901a5d1..0ab53458c7 100644 --- a/src/H5FDmirror.c +++ b/src/H5FDmirror.c @@ -160,6 +160,7 @@ static herr_t H5FD__mirror_unlock(H5FD_t *_file); static herr_t H5FD__mirror_verify_reply(H5FD_mirror_t *file); static const H5FD_class_t H5FD_mirror_g = { + H5FD_CLASS_VERSION, /* struct version */ H5FD_MIRROR_VALUE, /* value */ "mirror", /* name */ MAXADDR, /* maxaddr */ @@ -188,6 +189,10 @@ static const H5FD_class_t H5FD_mirror_g = { NULL, /* get_handle */ H5FD__mirror_read, /* read */ H5FD__mirror_write, /* write */ + NULL, /* read_vector */ + NULL, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ NULL, /* flush */ H5FD__mirror_truncate, /* truncate */ H5FD__mirror_lock, /* lock */ diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c index 4aa8a96995..6b639f90bb 100644 --- a/src/H5FDmpio.c +++ b/src/H5FDmpio.c @@ -87,49 +87,66 @@ static herr_t H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, ha void *buf); static herr_t H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size, const void *buf); +static herr_t H5FD__mpio_read_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t count, + H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], void *bufs[]); +static herr_t H5FD__mpio_write_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t count, + H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], + const void *bufs[]); static herr_t H5FD__mpio_flush(H5FD_t *_file, hid_t dxpl_id, hbool_t closing); static herr_t H5FD__mpio_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing); static herr_t H5FD__mpio_delete(const char *filename, hid_t fapl_id); static herr_t H5FD__mpio_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flags, const void *input, void **output); +/* Other functions */ +static herr_t H5FD__mpio_vector_build_types( + uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], H5_flexible_const_ptr_t bufs[], + haddr_t *s_addrs[], size_t *s_sizes[], H5_flexible_const_ptr_t *s_bufs[], hbool_t *vector_was_sorted, + MPI_Offset *mpi_off, H5_flexible_const_ptr_t *mpi_bufs_base, int *size_i, MPI_Datatype *buf_type, + hbool_t *buf_type_created, MPI_Datatype *file_type, hbool_t *file_type_created, char *unused); + /* The MPIO file driver information */ static const H5FD_class_t H5FD_mpio_g = { - H5_VFD_MPIO, /* value */ - "mpio", /* name */ - HADDR_MAX, /* maxaddr */ - H5F_CLOSE_SEMI, /* fc_degree */ - H5FD__mpio_term, /* terminate */ - NULL, /* sb_size */ - NULL, /* sb_encode */ - NULL, /* sb_decode */ - 0, /* fapl_size */ - NULL, /* fapl_get */ - NULL, /* fapl_copy */ - NULL, /* fapl_free */ - 0, /* dxpl_size */ - NULL, /* dxpl_copy */ - NULL, /* dxpl_free */ - H5FD__mpio_open, /* open */ - H5FD__mpio_close, /* close */ - NULL, /* cmp */ - H5FD__mpio_query, /* query */ - NULL, /* get_type_map */ - NULL, /* alloc */ - NULL, /* free */ - H5FD__mpio_get_eoa, /* get_eoa */ - H5FD__mpio_set_eoa, /* set_eoa */ - H5FD__mpio_get_eof, /* get_eof */ - H5FD__mpio_get_handle, /* get_handle */ - H5FD__mpio_read, /* read */ - H5FD__mpio_write, /* write */ - H5FD__mpio_flush, /* flush */ - H5FD__mpio_truncate, /* truncate */ - NULL, /* lock */ - NULL, /* unlock */ - H5FD__mpio_delete, /* del */ - H5FD__mpio_ctl, /* ctl */ - H5FD_FLMAP_DICHOTOMY /* fl_map */ + H5FD_CLASS_VERSION, /* struct version */ + H5_VFD_MPIO, /* value */ + "mpio", /* name */ + HADDR_MAX, /* maxaddr */ + H5F_CLOSE_SEMI, /* fc_degree */ + H5FD__mpio_term, /* terminate */ + NULL, /* sb_size */ + NULL, /* sb_encode */ + NULL, /* sb_decode */ + 0, /* fapl_size */ + NULL, /* fapl_get */ + NULL, /* fapl_copy */ + NULL, /* fapl_free */ + 0, /* dxpl_size */ + NULL, /* dxpl_copy */ + NULL, /* dxpl_free */ + H5FD__mpio_open, /* open */ + H5FD__mpio_close, /* close */ + NULL, /* cmp */ + H5FD__mpio_query, /* query */ + NULL, /* get_type_map */ + NULL, /* alloc */ + NULL, /* free */ + H5FD__mpio_get_eoa, /* get_eoa */ + H5FD__mpio_set_eoa, /* set_eoa */ + H5FD__mpio_get_eof, /* get_eof */ + H5FD__mpio_get_handle, /* get_handle */ + H5FD__mpio_read, /* read */ + H5FD__mpio_write, /* write */ + H5FD__mpio_read_vector, /* read_vector */ + H5FD__mpio_write_vector, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ + H5FD__mpio_flush, /* flush */ + H5FD__mpio_truncate, /* truncate */ + NULL, /* lock */ + NULL, /* unlock */ + H5FD__mpio_delete, /* del */ + H5FD__mpio_ctl, /* ctl */ + H5FD_FLMAP_DICHOTOMY /* fl_map */ }; #ifdef H5FDmpio_DEBUG @@ -1415,7 +1432,7 @@ H5FD__mpio_read(H5FD_t *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNU #ifdef H5FDmpio_DEBUG if (H5FD_mpio_debug_r_flag) HDfprintf(stderr, "%s: (%d) mpi_off = %ld bytes_read = %lld type = %s\n", __func__, file->mpi_rank, - (long)mpi_off, bytes_read, H5FD__mem_t_to_str(type)); + (long)mpi_off, (long long)bytes_read, H5FD__mem_t_to_str(type)); #endif /* @@ -1636,7 +1653,7 @@ H5FD__mpio_write(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, h #ifdef H5FDmpio_DEBUG if (H5FD_mpio_debug_w_flag) HDfprintf(stderr, "%s: (%d) mpi_off = %ld bytes_written = %lld type = %s\n", __func__, - file->mpi_rank, (long)mpi_off, bytes_written, H5FD__mem_t_to_str(type)); + file->mpi_rank, (long)mpi_off, (long long)bytes_written, H5FD__mem_t_to_str(type)); #endif /* Each process will keep track of its perceived EOF value locally, and @@ -1662,6 +1679,1050 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5FD__mpio_write() */ +/*------------------------------------------------------------------------- + * Function: H5FD__mpio_vector_build_types + * + * Purpose: Build MPI datatypes and calculate offset, base buffer, and + * size for MPIO vector I/O. Spun off from common code in + * H5FD__mpio_vector_read() and H5FD__mpio_vector_write(). + * + * Return: Success: SUCCEED. + * Failure: FAIL. + * + * Programmer: Neil Fortner + * March 14, 2022 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD__mpio_vector_build_types(uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], + H5_flexible_const_ptr_t bufs[], haddr_t *s_addrs[], size_t *s_sizes[], + H5_flexible_const_ptr_t *s_bufs[], hbool_t *vector_was_sorted, + MPI_Offset *mpi_off, H5_flexible_const_ptr_t *mpi_bufs_base, int *size_i, + MPI_Datatype *buf_type, hbool_t *buf_type_created, MPI_Datatype *file_type, + hbool_t *file_type_created, char *unused) +{ + hsize_t bigio_count; /* Transition point to create derived type */ + hbool_t fixed_size = FALSE; + size_t size; + H5FD_mem_t * s_types = NULL; + int * mpi_block_lengths = NULL; + MPI_Aint mpi_bufs_base_Aint; + MPI_Aint * mpi_bufs = NULL; + MPI_Aint * mpi_displacements = NULL; + MPI_Datatype *sub_types = NULL; + uint8_t * sub_types_created = NULL; + int i; + int j; + int mpi_code; /* MPI return code */ + herr_t ret_value = SUCCEED; + + FUNC_ENTER_STATIC + + /* Sanity checks */ + HDassert(s_sizes); + HDassert(s_bufs); + HDassert(vector_was_sorted); + HDassert(*vector_was_sorted); + HDassert(mpi_off); + HDassert(mpi_bufs_base); + HDassert(size_i); + HDassert(buf_type); + HDassert(buf_type_created); + HDassert(!*buf_type_created); + HDassert(file_type); + HDassert(file_type_created); + HDassert(!*file_type_created); + HDassert(unused); + + /* Get bio I/O transition point (may be lower than 2G for testing) */ + bigio_count = H5_mpi_get_bigio_count(); + + if (count == 1) { + /* Single block. Just use a series of MPI_BYTEs for the file view. + */ + *size_i = (int)sizes[0]; + *buf_type = MPI_BYTE; + *file_type = MPI_BYTE; + *mpi_bufs_base = bufs[0]; + + /* Setup s_addrs, s_sizes and s_bufs (needed for incomplete read filling code and eof + * calculation code) */ + *s_addrs = addrs; + *s_sizes = sizes; + *s_bufs = bufs; + + /* some numeric conversions */ + if (H5FD_mpi_haddr_to_MPIOff(addrs[0], mpi_off) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't set MPI offset") + + /* Check for size overflow */ + if (sizes[0] > bigio_count) { + /* We need to work around the integer size limit of 2GB. The input size_t size + * variable cannot fit into an integer, but we can get around that limitation by + * creating a different datatype and then setting the integer size (or element + * count) to 1 when using the derived_type. */ + + if (H5_mpio_create_large_type(sizes[0], 0, MPI_BYTE, buf_type) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype") + *buf_type_created = TRUE; + + if (H5_mpio_create_large_type(sizes[0], 0, MPI_BYTE, file_type) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype") + *file_type_created = TRUE; + + *size_i = 1; + } + } + else if (count > 0) { /* create MPI derived types describing the vector write */ + + /* sort the vector I/O request into increasing address order if required + * + * If the vector is already sorted, the base addresses of types, addrs, sizes, + * and bufs will be returned in s_types, s_addrs, s_sizes, and s_bufs respectively. + * + * If the vector was not already sorted, new, sorted versions of types, addrs, sizes, and bufs + * are allocated, populated, and returned in s_types, s_addrs, s_sizes, and s_bufs respectively. + * In this case, this function must free the memory allocated for the sorted vectors. + */ + if (H5FD_sort_vector_io_req(vector_was_sorted, count, types, addrs, sizes, bufs, &s_types, s_addrs, + s_sizes, s_bufs) < 0) + HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "can't sort vector I/O request") + + if ((NULL == (mpi_block_lengths = (int *)HDmalloc((size_t)count * sizeof(int)))) || + (NULL == (mpi_displacements = (MPI_Aint *)HDmalloc((size_t)count * sizeof(MPI_Aint)))) || + (NULL == (mpi_bufs = (MPI_Aint *)HDmalloc((size_t)count * sizeof(MPI_Aint))))) { + + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't alloc mpi block lengths / displacement") + } + + /* when we setup mpi_bufs[] below, all addresses are offsets from + * mpi_bufs_base. + * + * Since these offsets must all be positive, we must scan through + * s_bufs[] to find the smallest value, and choose that for + * mpi_bufs_base. + */ + + j = 0; /* guess at the index of the smallest value of s_bufs[] */ + + for (i = 1; i < (int)count; i++) { + + if ((*s_bufs)[i].cvp < (*s_bufs)[j].cvp) { + + j = i; + } + } + + *mpi_bufs_base = (*s_bufs)[j]; + + if (MPI_SUCCESS != (mpi_code = MPI_Get_address(mpi_bufs_base->cvp, &mpi_bufs_base_Aint))) + + HMPI_GOTO_ERROR(FAIL, "MPI_Get_address for s_bufs[] to mpi_bufs_base failed", mpi_code) + + *size_i = 1; + + fixed_size = FALSE; + + /* load the mpi_block_lengths and mpi_displacements arrays */ + for (i = 0; i < (int)count; i++) { + /* Determine size of this vector element */ + if (!fixed_size) { + if ((*s_sizes)[i] == 0) { + HDassert(vector_was_sorted); + fixed_size = TRUE; + size = sizes[i - 1]; + } + else { + size = (*s_sizes)[i]; + } + } + + /* Add to block lengths and displacements arrays */ + mpi_block_lengths[i] = (int)size; + mpi_displacements[i] = (MPI_Aint)(*s_addrs)[i]; + + /* convert s_bufs[i] to MPI_Aint... */ + if (MPI_SUCCESS != (mpi_code = MPI_Get_address((*s_bufs)[i].cvp, &(mpi_bufs[i])))) + HMPI_GOTO_ERROR(FAIL, "MPI_Get_address for s_bufs[] - mpi_bufs_base failed", mpi_code) + + /*... and then subtract mpi_bufs_base_Aint from it. */ +#if ((MPI_VERSION > 3) || ((MPI_VERSION == 3) && (MPI_SUBVERSION >= 1))) + mpi_bufs[i] = MPI_Aint_diff(mpi_bufs[i], mpi_bufs_base_Aint); +#else + mpi_bufs[i] = mpi_bufs[i] - mpi_bufs_base_Aint; +#endif + + /* Check for size overflow */ + if (size > bigio_count) { + /* We need to work around the integer size limit of 2GB. The input size_t size + * variable cannot fit into an integer, but we can get around that limitation by + * creating a different datatype and then setting the integer size (or element + * count) to 1 when using the derived_type. */ + + /* Allocate arrays to keep track of types and whether they were created, if + * necessary */ + if (!sub_types) { + HDassert(!sub_types_created); + + if (NULL == (sub_types = (int *)HDmalloc((size_t)count * sizeof(MPI_Datatype)))) + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't alloc sub types array") + if (NULL == (sub_types_created = (uint8_t *)HDcalloc((size_t)count, 1))) { + H5MM_free(sub_types); + sub_types = NULL; + HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't alloc sub types created array") + } + + /* Initialize sub_types to all MPI_BYTE */ + for (j = 0; j < (int)count; j++) + sub_types[j] = MPI_BYTE; + } + HDassert(sub_types_created); + + /* Create type for large block */ + if (H5_mpio_create_large_type(size, 0, MPI_BYTE, &sub_types[i]) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype") + sub_types_created[i] = TRUE; + + /* Only one of these large types for this vector element */ + mpi_block_lengths[i] = 1; + } + else + HDassert(size == (size_t)mpi_block_lengths[i]); + } + + /* create the memory MPI derived types */ + if (sub_types) { + if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)count, mpi_block_lengths, mpi_bufs, + sub_types, buf_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct for buf_type failed", mpi_code) + } + else if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)count, mpi_block_lengths, mpi_bufs, + MPI_BYTE, buf_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed for buf_type failed", mpi_code) + + *buf_type_created = TRUE; + + if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(buf_type))) + + HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit for buf_type failed", mpi_code) + + /* create the file MPI derived type */ + if (sub_types) { + if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)count, mpi_block_lengths, + mpi_displacements, sub_types, file_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct for file_type failed", mpi_code) + } + else if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)count, mpi_block_lengths, + mpi_displacements, MPI_BYTE, file_type))) + HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed for file_type failed", mpi_code) + + *file_type_created = TRUE; + + if (MPI_SUCCESS != (mpi_code = MPI_Type_commit(file_type))) + + HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit for file_type failed", mpi_code) + + /* Free up memory used to build types */ + HDassert(mpi_block_lengths); + HDfree(mpi_block_lengths); + mpi_block_lengths = NULL; + + HDassert(mpi_displacements); + HDfree(mpi_displacements); + mpi_displacements = NULL; + + HDassert(mpi_bufs); + HDfree(mpi_bufs); + mpi_bufs = NULL; + + if (sub_types) { + HDassert(sub_types); + + for (i = 0; i < (int)count; i++) + if (sub_types_created[i]) + MPI_Type_free(&sub_types[i]); + + HDfree(sub_types); + sub_types = NULL; + HDfree(sub_types_created); + sub_types_created = NULL; + } + + /* some numeric conversions */ + if (H5FD_mpi_haddr_to_MPIOff((haddr_t)0, mpi_off) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't set MPI off to 0") + } + else { + /* setup for null participation in the collective operation. */ + *buf_type = MPI_BYTE; + *file_type = MPI_BYTE; + + /* Set non-NULL pointer for I/O operation */ + mpi_bufs_base->vp = unused; + + /* MPI count to read */ + *size_i = 0; + + /* some numeric conversions */ + if (H5FD_mpi_haddr_to_MPIOff((haddr_t)0, mpi_off) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't set MPI off to 0") + } + +done: + /* free sorted vectors if they exist */ + if (!vector_was_sorted) + if (s_types) { + HDfree(s_types); + s_types = NULL; + } + + /* Clean up on error */ + if (ret_value < 0) { + if (mpi_block_lengths) { + HDfree(mpi_block_lengths); + mpi_block_lengths = NULL; + } + + if (mpi_displacements) { + HDfree(mpi_displacements); + mpi_displacements = NULL; + } + + if (mpi_bufs) { + HDfree(mpi_bufs); + mpi_bufs = NULL; + } + + if (sub_types) { + HDassert(sub_types_created); + + for (i = 0; i < (int)count; i++) + if (sub_types_created[i]) + MPI_Type_free(&sub_types[i]); + + HDfree(sub_types); + sub_types = NULL; + HDfree(sub_types_created); + sub_types_created = NULL; + } + } + + /* Make sure we cleaned up */ + HDassert(!mpi_block_lengths); + HDassert(!mpi_displacements); + HDassert(!mpi_bufs); + HDassert(!sub_types); + HDassert(!sub_types_created); + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD__mpio_vector_build_types() */ + +/*------------------------------------------------------------------------- + * Function: H5FD__mpio_read_vector() + * + * Purpose: The behaviour of this function dependes on the value of + * the io_xfer_mode obtained from the context. + * + * If it is H5FD_MPIO_COLLECTIVE, this is a collective + * operation, which allows us to use MPI_File_set_view, and + * then perform the entire vector read in a single MPI call. + * + * Do this (if count is positive), by constructing memory + * and file derived types from the supplied vector, using + * file type to set the file view, and then reading the + * the memory type from file. Note that this read is + * either independent or collective depending on the + * value of mpio_coll_opt -- again obtained from the context. + * + * If count is zero, participate in the collective read + * (if so configured) with an empty read. + * + * Finally, set the file view back to its default state. + * + * In contrast, if io_xfer_mode is H5FD_MPIO_INDEPENDENT, + * this call is independent, and thus we cannot use + * MPI_File_set_view(). + * + * In this case, simply walk the vector, and issue an + * independent read for each entry. + * + * Return: Success: SUCCEED. + * Failure: FAIL. + * + * Programmer: John Mainzer + * March 15, 2021 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD__mpio_read_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t count, H5FD_mem_t types[], + haddr_t addrs[], size_t sizes[], void *bufs[]) +{ + H5FD_mpio_t * file = (H5FD_mpio_t *)_file; + hbool_t vector_was_sorted = TRUE; + haddr_t * s_addrs = NULL; + size_t * s_sizes = NULL; + void ** s_bufs = NULL; + char unused = 0; /* Unused, except for non-NULL pointer value */ + void * mpi_bufs_base = NULL; + MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */ + hbool_t buf_type_created = FALSE; + MPI_Datatype file_type = MPI_BYTE; /* MPI description of the selection in file */ + hbool_t file_type_created = FALSE; + int i; + int mpi_code; /* MPI return code */ + MPI_Offset mpi_off = 0; + MPI_Status mpi_stat; /* Status from I/O operation */ + H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode */ + H5FD_mpio_collective_opt_t coll_opt_mode; /* whether we are doing collective or independent I/O */ + int size_i; +#if MPI_VERSION >= 3 + MPI_Count bytes_read = 0; /* Number of bytes read in */ + MPI_Count type_size; /* MPI datatype used for I/O's size */ + MPI_Count io_size; /* Actual number of bytes requested */ + MPI_Count n; +#else + int bytes_read = 0; /* Number of bytes read in */ + int type_size; /* MPI datatype used for I/O's size */ + int io_size; /* Actual number of bytes requested */ + int n; +#endif + hbool_t rank0_bcast = FALSE; /* If read-with-rank0-and-bcast flag was used */ +#ifdef H5FDmpio_DEBUG + hbool_t H5FD_mpio_debug_t_flag = (H5FD_mpio_debug_flags_s[(int)'t'] && H5FD_MPIO_TRACE_THIS_RANK(file)); + hbool_t H5FD_mpio_debug_r_flag = (H5FD_mpio_debug_flags_s[(int)'r'] && H5FD_MPIO_TRACE_THIS_RANK(file)); +#endif + herr_t ret_value = SUCCEED; + + FUNC_ENTER_STATIC + +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_t_flag) + HDfprintf(stderr, "%s: (%d) Entering\n", __func__, file->mpi_rank); +#endif + + /* Sanity checks */ + HDassert(file); + HDassert(H5FD_MPIO == file->pub.driver_id); + HDassert((types) || (count == 0)); + HDassert((addrs) || (count == 0)); + HDassert((sizes) || (count == 0)); + HDassert((bufs) || (count == 0)); + + /* verify that the first elements of the sizes and types arrays are + * valid. + */ + HDassert((count == 0) || (sizes[0] != 0)); + HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST)); + + /* Get the transfer mode from the API context + * + * This flag is set to H5FD_MPIO_COLLECTIVE if the API call is + * collective, and to H5FD_MPIO_INDEPENDENT if it is not. + * + * While this doesn't mean that we are actually about to do a collective + * read, it does mean that all ranks are here, so we can use MPI_File_set_view(). + */ + if (H5CX_get_io_xfer_mode(&xfer_mode) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get MPI-I/O transfer mode") + + if (xfer_mode == H5FD_MPIO_COLLECTIVE) { + /* Build MPI types, etc. */ + if (H5FD__mpio_vector_build_types(count, types, addrs, sizes, (H5_flexible_const_ptr_t *)bufs, + &s_addrs, &s_sizes, (H5_flexible_const_ptr_t **)&s_bufs, + &vector_was_sorted, &mpi_off, + (H5_flexible_const_ptr_t *)&mpi_bufs_base, &size_i, &buf_type, + &buf_type_created, &file_type, &file_type_created, &unused) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't build MPI datatypes for I/O") + + /* free sorted addrs vector if it exists */ + if (!vector_was_sorted) + if (s_addrs) { + HDfree(s_addrs); + s_addrs = NULL; + } + + /* Portably initialize MPI status variable */ + HDmemset(&mpi_stat, 0, sizeof(mpi_stat)); + +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_r_flag) + HDfprintf(stdout, "%s: mpi_off = %ld size_i = %d\n", __func__, (long)mpi_off, size_i); +#endif + + /* Setup the file view. */ + if (MPI_SUCCESS != (mpi_code = MPI_File_set_view(file->f, mpi_off, MPI_BYTE, file_type, + H5FD_mpi_native_g, file->info))) + HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code) + + /* Reset mpi_off to 0 since the view now starts at the data offset */ + if (H5FD_mpi_haddr_to_MPIOff((haddr_t)0, &mpi_off) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't set MPI off to 0") + + /* Get the collective_opt property to check whether the application wants to do IO individually. + */ + if (H5CX_get_mpio_coll_opt(&coll_opt_mode) < 0) + + HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get MPI-I/O collective_op property") + + /* Read the data. */ +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_r_flag) + HDfprintf(stdout, "%s: using MPIO collective mode\n", __func__); +#endif + if (coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO) { +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_r_flag) + HDfprintf(stdout, "%s: doing MPI collective IO\n", __func__); +#endif + /* Check whether we should read from rank 0 and broadcast to other ranks */ + if (H5CX_get_mpio_rank0_bcast()) { +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_r_flag) + HDfprintf(stdout, "%s: doing read-rank0-and-MPI_Bcast\n", __func__); +#endif + /* Indicate path we've taken */ + rank0_bcast = TRUE; + + /* Read on rank 0 Bcast to other ranks */ + if (file->mpi_rank == 0) + if (MPI_SUCCESS != (mpi_code = MPI_File_read_at(file->f, mpi_off, mpi_bufs_base, size_i, + buf_type, &mpi_stat))) + HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at_all failed", mpi_code) + if (MPI_SUCCESS != (mpi_code = MPI_Bcast(mpi_bufs_base, size_i, buf_type, 0, file->comm))) + HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_code) + } /* end if */ + else if (MPI_SUCCESS != (mpi_code = MPI_File_read_at_all(file->f, mpi_off, mpi_bufs_base, size_i, + buf_type, &mpi_stat))) + HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at_all failed", mpi_code) + } /* end if */ + else if (size_i > 0) { +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_r_flag) + HDfprintf(stdout, "%s: doing MPI independent IO\n", __func__); +#endif + + if (MPI_SUCCESS != + (mpi_code = MPI_File_read_at(file->f, mpi_off, mpi_bufs_base, size_i, buf_type, &mpi_stat))) + + HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at failed", mpi_code) + + } /* end else */ + + /* Reset the file view */ + if (MPI_SUCCESS != (mpi_code = MPI_File_set_view(file->f, (MPI_Offset)0, MPI_BYTE, MPI_BYTE, + H5FD_mpi_native_g, file->info))) + HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code) + + /* Only retrieve bytes read if this rank _actually_ participated in I/O */ + if (!rank0_bcast || (rank0_bcast && file->mpi_rank == 0)) { + /* How many bytes were actually read? */ +#if MPI_VERSION >= 3 + if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, buf_type, &bytes_read))) +#else + if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_read))) +#endif + HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code) + } /* end if */ + + /* If the rank0-bcast feature was used, broadcast the # of bytes read to + * other ranks, which didn't perform any I/O. + */ + /* NOTE: This could be optimized further to be combined with the broadcast + * of the data. (QAK - 2019/1/2) + * Or have rank 0 clear the unread parts of the buffer prior to + * the bcast. (NAF - 2021/9/15) + */ + if (rank0_bcast) +#if MPI_VERSION >= 3 + if (MPI_SUCCESS != MPI_Bcast(&bytes_read, 1, MPI_COUNT, 0, file->comm)) +#else + if (MPI_SUCCESS != MPI_Bcast(&bytes_read, 1, MPI_INT, 0, file->comm)) +#endif + HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", 0) + + /* Get the type's size */ +#if MPI_VERSION >= 3 + if (MPI_SUCCESS != (mpi_code = MPI_Type_size_x(buf_type, &type_size))) +#else + if (MPI_SUCCESS != (mpi_code = MPI_Type_size(buf_type, &type_size))) +#endif + HMPI_GOTO_ERROR(FAIL, "MPI_Type_size failed", mpi_code) + + /* Compute the actual number of bytes requested */ + io_size = type_size * size_i; + + /* Check for read failure */ + if (bytes_read < 0 || bytes_read > io_size) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "file read failed") + + /* Check for incomplete read */ + n = io_size - bytes_read; + if (n > 0) { + i = (int)count - 1; + + /* Iterate over sorted array in reverse, filling in zeroes to + * sections of the buffers that were not read to */ + do { + HDassert(i >= 0); + +#if MPI_VERSION >= 3 + io_size = MIN(n, (MPI_Count)s_sizes[i]); + bytes_read = (MPI_Count)s_sizes[i] - io_size; +#else + io_size = MIN(n, (int)s_sizes[i]); + bytes_read = (int)s_sizes[i] - io_size; +#endif + HDassert(bytes_read >= 0); + + HDmemset((char *)s_bufs[i] + bytes_read, 0, (size_t)io_size); + + n -= io_size; + i--; + } while (n > 0); + } + } + else if (count > 0) { + haddr_t max_addr = HADDR_MAX; + hbool_t fixed_size = FALSE; + size_t size; + + /* The read is part of an independent operation. As a result, + * we can't use MPI_File_set_view() (since it it a collective operation), + * and thus we can't use the above code to construct the MPI datatypes. + * In the future, we could write code to detect when a contiguous slab + * in the file selection spans multiple vector elements and construct a + * memory datatype to match this larger block in the file, but for now + * just read in each element of the vector in a separate + * MPI_File_read_at() call. + * + * We could also just detect the case when the entire file selection is + * contiguous, which would allow us to use + * H5FD__mpio_vector_build_types() to construct the memory datatype. + */ + +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_r_flag) + HDfprintf(stdout, "%s: doing MPI independent IO\n", __func__); +#endif + + /* Loop over vector elements */ + for (i = 0; i < (int)count; i++) { + /* Convert address to mpi offset */ + if (H5FD_mpi_haddr_to_MPIOff(addrs[i], &mpi_off) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert from haddr to MPI off") + + /* Calculate I/O size */ + if (!fixed_size) { + if (sizes[i] == 0) { + fixed_size = TRUE; + size = sizes[i - 1]; + } + else { + size = sizes[i]; + } + } + size_i = (int)size; + + if (size != (size_t)size_i) { + /* If HERE, then we need to work around the integer size limit + * of 2GB. The input size_t size variable cannot fit into an integer, + * but we can get around that limitation by creating a different datatype + * and then setting the integer size (or element count) to 1 when using + * the derived_type. + */ + + if (H5_mpio_create_large_type(size, 0, MPI_BYTE, &buf_type) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype") + + buf_type_created = TRUE; + size_i = 1; + } + + /* Check if we actually need to do I/O */ + if (addrs[i] < max_addr) { + /* Portably initialize MPI status variable */ + HDmemset(&mpi_stat, 0, sizeof(mpi_stat)); + + /* Issue read */ + if (MPI_SUCCESS != + (mpi_code = MPI_File_read_at(file->f, mpi_off, bufs[i], size_i, buf_type, &mpi_stat))) + + HMPI_GOTO_ERROR(FAIL, "MPI_File_read_at failed", mpi_code) + + /* How many bytes were actually read? */ +#if MPI_VERSION >= 3 + if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&mpi_stat, MPI_BYTE, &bytes_read))) +#else + if (MPI_SUCCESS != (mpi_code = MPI_Get_elements(&mpi_stat, MPI_BYTE, &bytes_read))) +#endif + HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements failed", mpi_code) + + /* Compute the actual number of bytes requested */ +#if MPI_VERSION >= 3 + io_size = (MPI_Count)size; +#else + io_size = (int)size; +#endif + + /* Check for read failure */ + if (bytes_read < 0 || bytes_read > io_size) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "file read failed") + + /* + * If we didn't read the entire I/O, fill in zeroes beyond end of + * the physical MPI file and don't issue any more reads at higher + * addresses. + */ + if ((n = (io_size - bytes_read)) > 0) { + HDmemset((char *)bufs[i] + bytes_read, 0, (size_t)n); + max_addr = addrs[i] + (haddr_t)bytes_read; + } + } + else { + /* Read is past the max address, fill in zeroes */ + HDmemset((char *)bufs[i], 0, size); + } + } + } + +done: + if (buf_type_created) { + MPI_Type_free(&buf_type); + } + + if (file_type_created) { + MPI_Type_free(&file_type); + } + + /* free sorted vectors if they exist */ + if (!vector_was_sorted) { + if (s_addrs) { + HDfree(s_addrs); + s_addrs = NULL; + } + if (s_sizes) { + HDfree(s_sizes); + s_sizes = NULL; + } + if (s_bufs) { + HDfree(s_bufs); + s_bufs = NULL; + } + } + +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_t_flag) + HDfprintf(stdout, "%s: Leaving, proc %d: ret_value = %d\n", __func__, file->mpi_rank, ret_value); +#endif + + FUNC_LEAVE_NOAPI(ret_value) + +} /* end H5FD__mpio_read_vector() */ + +/*------------------------------------------------------------------------- + * Function: H5FD__mpio_write_vector + * + * Purpose: The behaviour of this function dependes on the value of + * the io_xfer_mode obtained from the context. + * + * If it is H5FD_MPIO_COLLECTIVE, this is a collective + * operation, which allows us to use MPI_File_set_view, and + * then perform the entire vector write in a single MPI call. + * + * Do this (if count is positive), by constructing memory + * and file derived types from the supplied vector, using + * file type to set the file view, and then writing the + * the memory type to file. Note that this write is + * either independent or collective depending on the + * value of mpio_coll_opt -- again obtained from the context. + * + * If count is zero, participate in the collective write + * (if so configured) with an empty write. + * + * Finally, set the file view back to its default state. + * + * In contrast, if io_xfer_mode is H5FD_MPIO_INDEPENDENT, + * this call is independent, and thus we cannot use + * MPI_File_set_view(). + * + * In this case, simply walk the vector, and issue an + * independent write for each entry. + * + * Return: Success: SUCCEED. + * Failure: FAIL. + * + * Programmer: John Mainzer + * March 15, 2021 + * + *------------------------------------------------------------------------- + */ +static herr_t +H5FD__mpio_write_vector(H5FD_t *_file, hid_t H5_ATTR_UNUSED dxpl_id, uint32_t count, H5FD_mem_t types[], + haddr_t addrs[], size_t sizes[], const void *bufs[]) +{ + H5FD_mpio_t * file = (H5FD_mpio_t *)_file; + hbool_t vector_was_sorted = TRUE; + haddr_t * s_addrs = NULL; + size_t * s_sizes = NULL; + const void ** s_bufs = NULL; + char unused = 0; /* Unused, except for non-NULL pointer value */ + const void * mpi_bufs_base = NULL; + MPI_Datatype buf_type = MPI_BYTE; /* MPI description of the selection in memory */ + hbool_t buf_type_created = FALSE; + MPI_Datatype file_type = MPI_BYTE; /* MPI description of the selection in file */ + hbool_t file_type_created = FALSE; + int i; + int mpi_code; /* MPI return code */ + MPI_Offset mpi_off = 0; + MPI_Status mpi_stat; /* Status from I/O operation */ + H5FD_mpio_xfer_t xfer_mode; /* I/O transfer mode */ + H5FD_mpio_collective_opt_t coll_opt_mode; /* whether we are doing collective or independent I/O */ + int size_i; +#ifdef H5FDmpio_DEBUG + hbool_t H5FD_mpio_debug_t_flag = (H5FD_mpio_debug_flags_s[(int)'t'] && H5FD_MPIO_TRACE_THIS_RANK(file)); + hbool_t H5FD_mpio_debug_w_flag = (H5FD_mpio_debug_flags_s[(int)'w'] && H5FD_MPIO_TRACE_THIS_RANK(file)); +#endif + haddr_t max_addr = 0; + herr_t ret_value = SUCCEED; + + FUNC_ENTER_STATIC + +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_t_flag) + HDfprintf(stderr, "%s: (%d) Entering\n", __func__, file->mpi_rank); +#endif + + /* Sanity checks */ + HDassert(file); + HDassert(H5FD_MPIO == file->pub.driver_id); + HDassert((types) || (count == 0)); + HDassert((addrs) || (count == 0)); + HDassert((sizes) || (count == 0)); + HDassert((bufs) || (count == 0)); + + /* verify that the first elements of the sizes and types arrays are + * valid. + */ + HDassert((count == 0) || (sizes[0] != 0)); + HDassert((count == 0) || (types[0] != H5FD_MEM_NOLIST)); + + /* Verify that no data is written when between MPI_Barrier()s during file flush */ + + HDassert(!H5CX_get_mpi_file_flushing()); + + /* Get the transfer mode from the API context + * + * This flag is set to H5FD_MPIO_COLLECTIVE if the API call is + * collective, and to H5FD_MPIO_INDEPENDENT if it is not. + * + * While this doesn't mean that we are actually about to do a collective + * write, it does mean that all ranks are here, so we can use MPI_File_set_view(). + */ + if (H5CX_get_io_xfer_mode(&xfer_mode) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get MPI-I/O transfer mode") + + if (xfer_mode == H5FD_MPIO_COLLECTIVE) { + /* Build MPI types, etc. */ + if (H5FD__mpio_vector_build_types(count, types, addrs, sizes, (H5_flexible_const_ptr_t *)bufs, + &s_addrs, &s_sizes, (H5_flexible_const_ptr_t **)&s_bufs, + &vector_was_sorted, &mpi_off, + (H5_flexible_const_ptr_t *)&mpi_bufs_base, &size_i, &buf_type, + &buf_type_created, &file_type, &file_type_created, &unused) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't build MPI datatypes for I/O") + + /* Compute max addr writted to */ + if (count > 0) + max_addr = s_addrs[count - 1] + (haddr_t)(s_sizes[count - 1]); + + /* free sorted vectors if they exist */ + if (!vector_was_sorted) { + if (s_addrs) { + HDfree(s_addrs); + s_addrs = NULL; + } + if (s_sizes) { + HDfree(s_sizes); + s_sizes = NULL; + } + if (s_bufs) { + HDfree(s_bufs); + s_bufs = NULL; + } + } + + /* Portably initialize MPI status variable */ + HDmemset(&mpi_stat, 0, sizeof(MPI_Status)); + +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_w_flag) + HDfprintf(stdout, "%s: mpi_off = %ld size_i = %d\n", __func__, (long)mpi_off, size_i); +#endif + + /* Setup the file view. */ + if (MPI_SUCCESS != (mpi_code = MPI_File_set_view(file->f, mpi_off, MPI_BYTE, file_type, + H5FD_mpi_native_g, file->info))) + HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code) + + /* Reset mpi_off to 0 since the view now starts at the data offset */ + if (H5FD_mpi_haddr_to_MPIOff((haddr_t)0, &mpi_off) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't set MPI off to 0") + + /* Get the collective_opt property to check whether the application wants to do IO individually. + */ + if (H5CX_get_mpio_coll_opt(&coll_opt_mode) < 0) + HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get MPI-I/O collective_op property") + + /* Write the data. */ +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_w_flag) + HDfprintf(stdout, "%s: using MPIO collective mode\n", __func__); +#endif + + if (coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO) { +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_w_flag) + HDfprintf(stdout, "%s: doing MPI collective IO\n", __func__); +#endif + + if (MPI_SUCCESS != (mpi_code = MPI_File_write_at_all(file->f, mpi_off, mpi_bufs_base, size_i, + buf_type, &mpi_stat))) + HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at_all failed", mpi_code) + } /* end if */ + else if (size_i > 0) { +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_w_flag) + HDfprintf(stdout, "%s: doing MPI independent IO\n", __func__); +#endif + + if (MPI_SUCCESS != + (mpi_code = MPI_File_write_at(file->f, mpi_off, mpi_bufs_base, size_i, buf_type, &mpi_stat))) + HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at failed", mpi_code) + } /* end else */ + + /* Reset the file view */ + if (MPI_SUCCESS != (mpi_code = MPI_File_set_view(file->f, (MPI_Offset)0, MPI_BYTE, MPI_BYTE, + H5FD_mpi_native_g, file->info))) + HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code) + } + else if (count > 0) { + hbool_t fixed_size = FALSE; + size_t size; + + /* The read is part of an independent operation. As a result, + * we can't use MPI_File_set_view() (since it it a collective operation), + * and thus we can't use the above code to construct the MPI datatypes. + * In the future, we could write code to detect when a contiguous slab + * in the file selection spans multiple vector elements and construct a + * memory datatype to match this larger block in the file, but for now + * just read in each element of the vector in a separate + * MPI_File_read_at() call. + * + * We could also just detect the case when the entire file selection is + * contiguous, which would allow us to use + * H5FD__mpio_vector_build_types() to construct the memory datatype. + */ + +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_w_flag) + HDfprintf(stdout, "%s: doing MPI independent IO\n", __func__); +#endif + + /* Loop over vector elements */ + for (i = 0; i < (int)count; i++) { + /* Convert address to mpi offset */ + if (H5FD_mpi_haddr_to_MPIOff(addrs[i], &mpi_off) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_BADRANGE, FAIL, "can't convert from haddr to MPI off") + + /* Calculate I/O size */ + if (!fixed_size) { + if (sizes[i] == 0) { + fixed_size = TRUE; + size = sizes[i - 1]; + } + else { + size = sizes[i]; + } + } + size_i = (int)size; + + if (size != (size_t)size_i) { + /* If HERE, then we need to work around the integer size limit + * of 2GB. The input size_t size variable cannot fit into an integer, + * but we can get around that limitation by creating a different datatype + * and then setting the integer size (or element count) to 1 when using + * the derived_type. + */ + + if (H5_mpio_create_large_type(size, 0, MPI_BYTE, &buf_type) < 0) + HGOTO_ERROR(H5E_INTERNAL, H5E_CANTGET, FAIL, "can't create MPI-I/O datatype") + + buf_type_created = TRUE; + size_i = 1; + } + + /* Perform write */ + if (MPI_SUCCESS != + (mpi_code = MPI_File_write_at(file->f, mpi_off, bufs[i], size_i, buf_type, &mpi_stat))) + + HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at failed", mpi_code) + + /* Check if this is the highest address written to so far */ + if (addrs[i] + size > max_addr) + max_addr = addrs[i] + size; + } + } + + /* Each process will keep track of its perceived EOF value locally, and + * ultimately we will reduce this value to the maximum amongst all + * processes, but until then keep the actual eof at HADDR_UNDEF just in + * case something bad happens before that point. (rather have a value + * we know is wrong sitting around rather than one that could only + * potentially be wrong.) + */ + file->eof = HADDR_UNDEF; + + /* check to see if the local eof has changed been extended, and update if so */ + if (max_addr > file->local_eof) + file->local_eof = max_addr; + +done: + if (buf_type_created) + MPI_Type_free(&buf_type); + + if (file_type_created) + MPI_Type_free(&file_type); + + /* Cleanup on error */ + if (ret_value < 0 && !vector_was_sorted) { + if (s_addrs) { + HDfree(s_addrs); + s_addrs = NULL; + } + if (s_sizes) { + HDfree(s_sizes); + s_sizes = NULL; + } + if (s_bufs) { + HDfree(s_bufs); + s_bufs = NULL; + } + } + + /* Make sure we cleaned up */ + HDassert(vector_was_sorted || !s_addrs); + HDassert(vector_was_sorted || !s_sizes); + HDassert(vector_was_sorted || !s_bufs); + +#ifdef H5FDmpio_DEBUG + if (H5FD_mpio_debug_t_flag) + HDfprintf(stdout, "%s: Leaving, proc %d: ret_value = %d\n", __func__, file->mpi_rank, ret_value); +#endif + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5FD__mpio_write_vector() */ + /*------------------------------------------------------------------------- * Function: H5FD__mpio_flush * diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c index d9a6ce982a..20c538f061 100644 --- a/src/H5FDmulti.c +++ b/src/H5FDmulti.c @@ -176,6 +176,7 @@ static herr_t H5FD_multi_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flags, c /* The class struct */ static const H5FD_class_t H5FD_multi_g = { + H5FD_CLASS_VERSION, /* struct version */ H5_VFD_MULTI, /* value */ "multi", /* name */ HADDR_MAX, /* maxaddr */ @@ -204,6 +205,10 @@ static const H5FD_class_t H5FD_multi_g = { H5FD_multi_get_handle, /* get_handle */ H5FD_multi_read, /* read */ H5FD_multi_write, /* write */ + NULL, /*read_vector */ + NULL, /*write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ H5FD_multi_flush, /* flush */ H5FD_multi_truncate, /* truncate */ H5FD_multi_lock, /* lock */ diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h index 61b4c60dc4..bcbc69335c 100644 --- a/src/H5FDprivate.h +++ b/src/H5FDprivate.h @@ -24,6 +24,7 @@ /* Private headers needed by this file */ #include "H5Pprivate.h" /* Property lists */ +#include "H5Sprivate.h" /* Dataspaces */ /* * The MPI drivers are needed because there are @@ -94,6 +95,9 @@ typedef enum H5FD_get_driver_kind_t { H5FD_GET_DRIVER_BY_VALUE /* Value field is set */ } H5FD_get_driver_kind_t; +/* Forward declarations for prototype arguments */ +struct H5S_t; + /*****************************/ /* Library Private Variables */ /*****************************/ @@ -140,6 +144,22 @@ H5_DLL herr_t H5FD_set_feature_flags(H5FD_t *file, unsigned long feature_flags) H5_DLL herr_t H5FD_get_fs_type_map(const H5FD_t *file, H5FD_mem_t *type_map); H5_DLL herr_t H5FD_read(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, void *buf /*out*/); H5_DLL herr_t H5FD_write(H5FD_t *file, H5FD_mem_t type, haddr_t addr, size_t size, const void *buf); +H5_DLL herr_t H5FD_read_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], + size_t sizes[], void *bufs[] /* out */); +H5_DLL herr_t H5FD_write_vector(H5FD_t *file, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], + size_t sizes[], const void *bufs[] /* out */); +H5_DLL herr_t H5FD_read_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, struct H5S_t **mem_spaces, + struct H5S_t **file_spaces, haddr_t offsets[], size_t element_sizes[], + void *bufs[] /* out */); +H5_DLL herr_t H5FD_write_selection(H5FD_t *file, H5FD_mem_t type, uint32_t count, struct H5S_t **mem_spaces, + struct H5S_t **file_spaces, haddr_t offsets[], size_t element_sizes[], + const void *bufs[]); +H5_DLL herr_t H5FD_read_selection_id(H5FD_t *file, H5FD_mem_t type, uint32_t count, hid_t mem_space_ids[], + hid_t file_space_ids[], haddr_t offsets[], size_t element_sizes[], + void *bufs[] /* out */); +H5_DLL herr_t H5FD_write_selection_id(H5FD_t *file, H5FD_mem_t type, uint32_t count, hid_t mem_space_ids[], + hid_t file_space_ids[], haddr_t offsets[], size_t element_sizes[], + const void *bufs[]); H5_DLL herr_t H5FD_flush(H5FD_t *file, hbool_t closing); H5_DLL herr_t H5FD_truncate(H5FD_t *file, hbool_t closing); H5_DLL herr_t H5FD_lock(H5FD_t *file, hbool_t rw); @@ -152,6 +172,10 @@ H5_DLL herr_t H5FD_set_base_addr(H5FD_t *file, haddr_t base_addr); H5_DLL haddr_t H5FD_get_base_addr(const H5FD_t *file); H5_DLL herr_t H5FD_set_paged_aggr(H5FD_t *file, hbool_t paged); +H5_DLL herr_t H5FD_sort_vector_io_req(hbool_t *vector_was_sorted, uint32_t count, H5FD_mem_t types[], + haddr_t addrs[], size_t sizes[], H5_flexible_const_ptr_t bufs[], + H5FD_mem_t **s_types_ptr, haddr_t **s_addrs_ptr, size_t **s_sizes_ptr, + H5_flexible_const_ptr_t **s_bufs_ptr); H5_DLL herr_t H5FD_init(void); /* Function prototypes for MPI based VFDs*/ diff --git a/src/H5FDros3.c b/src/H5FDros3.c index 922dac5e08..fcce76d1e0 100644 --- a/src/H5FDros3.c +++ b/src/H5FDros3.c @@ -237,6 +237,7 @@ static herr_t H5FD__ros3_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing static herr_t H5FD__ros3_validate_config(const H5FD_ros3_fapl_t *fa); static const H5FD_class_t H5FD_ros3_g = { + H5FD_CLASS_VERSION, /* struct version */ H5FD_ROS3_VALUE, /* value */ "ros3", /* name */ MAXADDR, /* maxaddr */ @@ -265,6 +266,10 @@ static const H5FD_class_t H5FD_ros3_g = { H5FD__ros3_get_handle, /* get_handle */ H5FD__ros3_read, /* read */ H5FD__ros3_write, /* write */ + NULL, /* read_vector */ + NULL, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ NULL, /* flush */ H5FD__ros3_truncate, /* truncate */ NULL, /* lock */ diff --git a/src/H5FDsec2.c b/src/H5FDsec2.c index 46f5fd4dbd..cc41707035 100644 --- a/src/H5FDsec2.c +++ b/src/H5FDsec2.c @@ -143,6 +143,7 @@ static herr_t H5FD__sec2_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flags, c void **output); static const H5FD_class_t H5FD_sec2_g = { + H5FD_CLASS_VERSION, /* struct version */ H5FD_SEC2_VALUE, /* value */ "sec2", /* name */ MAXADDR, /* maxaddr */ @@ -171,6 +172,10 @@ static const H5FD_class_t H5FD_sec2_g = { H5FD__sec2_get_handle, /* get_handle */ H5FD__sec2_read, /* read */ H5FD__sec2_write, /* write */ + NULL, /* read_vector */ + NULL, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ NULL, /* flush */ H5FD__sec2_truncate, /* truncate */ H5FD__sec2_lock, /* lock */ diff --git a/src/H5FDsplitter.c b/src/H5FDsplitter.c index 509c18e663..124c54f8aa 100644 --- a/src/H5FDsplitter.c +++ b/src/H5FDsplitter.c @@ -138,6 +138,7 @@ static herr_t H5FD__splitter_ctl(H5FD_t *_file, uint64_t op_code, uint64_t flag void **output); static const H5FD_class_t H5FD_splitter_g = { + H5FD_CLASS_VERSION, /* struct version */ H5FD_SPLITTER_VALUE, /* value */ "splitter", /* name */ MAXADDR, /* maxaddr */ @@ -166,6 +167,10 @@ static const H5FD_class_t H5FD_splitter_g = { H5FD__splitter_get_handle, /* get_handle */ H5FD__splitter_read, /* read */ H5FD__splitter_write, /* write */ + NULL, /* read_vector */ + NULL, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ H5FD__splitter_flush, /* flush */ H5FD__splitter_truncate, /* truncate */ H5FD__splitter_lock, /* lock */ diff --git a/src/H5FDstdio.c b/src/H5FDstdio.c index 122379a0f5..6624685d70 100644 --- a/src/H5FDstdio.c +++ b/src/H5FDstdio.c @@ -183,41 +183,46 @@ static herr_t H5FD_stdio_unlock(H5FD_t *_file); static herr_t H5FD_stdio_delete(const char *filename, hid_t fapl_id); static const H5FD_class_t H5FD_stdio_g = { - H5_VFD_STDIO, /* value */ - "stdio", /* name */ - MAXADDR, /* maxaddr */ - H5F_CLOSE_WEAK, /* fc_degree */ - H5FD_stdio_term, /* terminate */ - NULL, /* sb_size */ - NULL, /* sb_encode */ - NULL, /* sb_decode */ - 0, /* fapl_size */ - NULL, /* fapl_get */ - NULL, /* fapl_copy */ - NULL, /* fapl_free */ - 0, /* dxpl_size */ - NULL, /* dxpl_copy */ - NULL, /* dxpl_free */ - H5FD_stdio_open, /* open */ - H5FD_stdio_close, /* close */ - H5FD_stdio_cmp, /* cmp */ - H5FD_stdio_query, /* query */ - NULL, /* get_type_map */ - H5FD_stdio_alloc, /* alloc */ - NULL, /* free */ - H5FD_stdio_get_eoa, /* get_eoa */ - H5FD_stdio_set_eoa, /* set_eoa */ - H5FD_stdio_get_eof, /* get_eof */ - H5FD_stdio_get_handle, /* get_handle */ - H5FD_stdio_read, /* read */ - H5FD_stdio_write, /* write */ - H5FD_stdio_flush, /* flush */ - H5FD_stdio_truncate, /* truncate */ - H5FD_stdio_lock, /* lock */ - H5FD_stdio_unlock, /* unlock */ - H5FD_stdio_delete, /* del */ - NULL, /* ctl */ - H5FD_FLMAP_DICHOTOMY /* fl_map */ + H5FD_CLASS_VERSION, /* struct version */ + H5_VFD_STDIO, /* value */ + "stdio", /* name */ + MAXADDR, /* maxaddr */ + H5F_CLOSE_WEAK, /* fc_degree */ + H5FD_stdio_term, /* terminate */ + NULL, /* sb_size */ + NULL, /* sb_encode */ + NULL, /* sb_decode */ + 0, /* fapl_size */ + NULL, /* fapl_get */ + NULL, /* fapl_copy */ + NULL, /* fapl_free */ + 0, /* dxpl_size */ + NULL, /* dxpl_copy */ + NULL, /* dxpl_free */ + H5FD_stdio_open, /* open */ + H5FD_stdio_close, /* close */ + H5FD_stdio_cmp, /* cmp */ + H5FD_stdio_query, /* query */ + NULL, /* get_type_map */ + H5FD_stdio_alloc, /* alloc */ + NULL, /* free */ + H5FD_stdio_get_eoa, /* get_eoa */ + H5FD_stdio_set_eoa, /* set_eoa */ + H5FD_stdio_get_eof, /* get_eof */ + H5FD_stdio_get_handle, /* get_handle */ + H5FD_stdio_read, /* read */ + H5FD_stdio_write, /* write */ + NULL, /* read_vector */ + NULL, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ + H5FD_stdio_flush, /* flush */ + H5FD_stdio_truncate, /* truncate */ + H5FD_stdio_lock, /* lock */ + H5FD_stdio_unlock, /* unlock */ + H5FD_stdio_delete, /* del */ + NULL, /* ctl */ + H5FD_FLMAP_DICHOTOMY /* fl_map */ }; /*------------------------------------------------------------------------- diff --git a/src/H5Fio.c b/src/H5Fio.c index 5a9d2c17c8..53fec97f85 100644 --- a/src/H5Fio.c +++ b/src/H5Fio.c @@ -233,11 +233,100 @@ H5F_block_write(H5F_t *f, H5FD_mem_t type, haddr_t addr, size_t size, const void /* Pass through page buffer layer */ if (H5PB_write(f->shared, map_type, addr, size, buf) < 0) HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "write through page buffer failed") - done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5F_block_write() */ +/*------------------------------------------------------------------------- + * Function: H5F_shared_select_read + * + * Purpose: Reads some data from a file/server/etc into a buffer. + * The location of the data is defined by the mem_spaces and + * file_spaces dataspace arrays, along with the offsets + * array. The addresses is relative to the base address for + * the file. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * May 3 2021 + * + *------------------------------------------------------------------------- + */ +herr_t +H5F_shared_select_read(H5F_shared_t *f_sh, H5FD_mem_t type, uint32_t count, H5S_t **mem_spaces, + H5S_t **file_spaces, haddr_t offsets[], size_t element_sizes[], void *bufs[] /* out */) +{ + H5FD_mem_t map_type; /* Mapped memory type */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(f_sh); + HDassert((mem_spaces) || (count == 0)); + HDassert((file_spaces) || (count == 0)); + HDassert((offsets) || (count == 0)); + HDassert((element_sizes) || (count == 0)); + HDassert((bufs) || (count == 0)); + + /* Treat global heap as raw data */ + map_type = (type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type; + + /* Pass down to file driver layer (bypass page buffer for now) */ + if (H5FD_read_selection(f_sh->lf, map_type, count, mem_spaces, file_spaces, offsets, element_sizes, + bufs) < 0) + HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "selection read through file driver failed") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5F_shared_select_read() */ + +/*------------------------------------------------------------------------- + * Function: H5F_shared_select_write + * + * Purpose: Writes some data from a buffer to a file/server/etc. + * The location of the data is defined by the mem_spaces and + * file_spaces dataspace arrays, along with the offsets + * array. The addresses is relative to the base address for + * the file. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * May 4 2021 + * + *------------------------------------------------------------------------- + */ +herr_t +H5F_shared_select_write(H5F_shared_t *f_sh, H5FD_mem_t type, uint32_t count, H5S_t **mem_spaces, + H5S_t **file_spaces, haddr_t offsets[], size_t element_sizes[], const void *bufs[]) +{ + H5FD_mem_t map_type; /* Mapped memory type */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI(FAIL) + + /* Sanity checks */ + HDassert(f_sh); + HDassert((mem_spaces) || (count == 0)); + HDassert((file_spaces) || (count == 0)); + HDassert((offsets) || (count == 0)); + HDassert((element_sizes) || (count == 0)); + HDassert((bufs) || (count == 0)); + + /* Treat global heap as raw data */ + map_type = (type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type; + + /* Pass down to file driver layer (bypass page buffer for now) */ + if (H5FD_write_selection(f_sh->lf, map_type, count, mem_spaces, file_spaces, offsets, element_sizes, + bufs) < 0) + HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "selection write through file driver failed") + +done: + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5F_shared_select_write() */ + /*------------------------------------------------------------------------- * Function: H5F_flush_tagged_metadata * diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h index 67e153e83f..629aee1444 100644 --- a/src/H5Fprivate.h +++ b/src/H5Fprivate.h @@ -758,6 +758,7 @@ struct H5O_loc_t; struct H5HG_heap_t; struct H5VL_class_t; struct H5P_genplist_t; +struct H5S_t; /* Forward declarations for anonymous H5F objects */ @@ -923,6 +924,14 @@ H5_DLL herr_t H5F_shared_block_write(H5F_shared_t *f_sh, H5FD_mem_t type, haddr_ const void *buf); H5_DLL herr_t H5F_block_write(H5F_t *f, H5FD_mem_t type, haddr_t addr, size_t size, const void *buf); +/* Functions that operate on selections of elements in the file */ +H5_DLL herr_t H5F_shared_select_read(H5F_shared_t *f_sh, H5FD_mem_t type, uint32_t count, + struct H5S_t **mem_spaces, struct H5S_t **file_spaces, haddr_t offsets[], + size_t element_sizes[], void *bufs[] /* out */); +H5_DLL herr_t H5F_shared_select_write(H5F_shared_t *f_sh, H5FD_mem_t type, uint32_t count, + struct H5S_t **mem_spaces, struct H5S_t **file_spaces, + haddr_t offsets[], size_t element_sizes[], const void *bufs[]); + /* Functions that flush or evict */ H5_DLL herr_t H5F_flush_tagged_metadata(H5F_t *f, haddr_t tag); H5_DLL herr_t H5F_evict_tagged_metadata(H5F_t *f, haddr_t tag); diff --git a/src/H5Gprivate.h b/src/H5Gprivate.h index 4cf462323f..d1725f6670 100644 --- a/src/H5Gprivate.h +++ b/src/H5Gprivate.h @@ -130,7 +130,7 @@ typedef enum { typedef int H5G_own_loc_t; /* Structure to store information about the name an object was opened with */ -typedef struct { +typedef struct H5G_name_t { H5RS_str_t *full_path_r; /* Path to object, as seen from root of current file mounting hierarchy */ H5RS_str_t *user_path_r; /* Path to object, as opened by user */ unsigned obj_hidden; /* Whether the object is visible in group hier. */ diff --git a/src/H5PB.c b/src/H5PB.c index 9ab87b09ed..ce8336ba60 100644 --- a/src/H5PB.c +++ b/src/H5PB.c @@ -1302,6 +1302,75 @@ done: FUNC_LEAVE_NOAPI(ret_value) } /* end H5PB_write() */ +/*------------------------------------------------------------------------- + * Function: H5PB_enabled + * + * Purpose: Check if the page buffer may be enabled for the specified + * file and data access type. + * + * Return: Non-negative on success/Negative on failure + * + * Programmer: Neil Fortner + * + *------------------------------------------------------------------------- + */ +herr_t +H5PB_enabled(H5F_shared_t *f_sh, H5FD_mem_t type, hbool_t *enabled) +{ + H5PB_t *page_buf; /* Page buffering info for this file */ + hbool_t bypass_pb = FALSE; /* Whether to bypass page buffering */ + herr_t ret_value = SUCCEED; /* Return value */ + + FUNC_ENTER_NOAPI_NOERR + + /* Sanity checks */ + HDassert(f_sh); + + /* Get pointer to page buffer info for this file */ + page_buf = f_sh->page_buf; + +#ifdef H5_HAVE_PARALLEL + if (H5F_SHARED_HAS_FEATURE(f_sh, H5FD_FEAT_HAS_MPI)) { +#if 1 + bypass_pb = TRUE; +#else + /* MSC - why this stopped working ? */ + int mpi_size; + + if ((mpi_size = H5F_shared_mpi_get_size(f_sh)) < 0) + HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTGET, FAIL, "can't retrieve MPI communicator size") + if (1 != mpi_size) + bypass_pb = TRUE; +#endif + } /* end if */ +#endif + + /* If page buffering is disabled, or if this is a parallel raw data access, + * bypass page buffering. Note that page buffering may still be disabled for + * large metadata access or large non-parallel raw data access, but this + * function doesn't take I/O size into account so if it returns TRUE the + * page buffer may still be disabled for some I/O. If it returns FALSE it is + * always disabled for this access type. + */ + if (NULL == page_buf || (bypass_pb && H5FD_MEM_DRAW == type)) { + /* Update statistics, since wherever this function is called, if it + * returns FALSE, the calling function performs I/O avoiding the page + * buffer layer */ + if (page_buf) { + HDassert(type == H5FD_MEM_DRAW); + page_buf->bypasses[1]++; + } /* end if */ + + /* Page buffer is disabled, at least for this data access type */ + *enabled = FALSE; + } /* end if */ + else + /* Page buffer may be enabled */ + *enabled = TRUE; + + FUNC_LEAVE_NOAPI(ret_value) +} /* end H5PB_enabled() */ + /*------------------------------------------------------------------------- * Function: H5PB__insert_entry() * diff --git a/src/H5PBprivate.h b/src/H5PBprivate.h index e0197bfed2..f5d4536e6c 100644 --- a/src/H5PBprivate.h +++ b/src/H5PBprivate.h @@ -91,6 +91,7 @@ H5_DLL herr_t H5PB_update_entry(H5PB_t *page_buf, haddr_t addr, size_t size, con H5_DLL herr_t H5PB_remove_entry(const H5F_shared_t *f_sh, haddr_t addr); H5_DLL herr_t H5PB_read(H5F_shared_t *f_sh, H5FD_mem_t type, haddr_t addr, size_t size, void *buf /*out*/); H5_DLL herr_t H5PB_write(H5F_shared_t *f_sh, H5FD_mem_t type, haddr_t addr, size_t size, const void *buf); +H5_DLL herr_t H5PB_enabled(H5F_shared_t *f_sh, H5FD_mem_t type, hbool_t *enabled); /* Statistics routines */ H5_DLL herr_t H5PB_reset_stats(H5PB_t *page_buf); diff --git a/src/H5Tprivate.h b/src/H5Tprivate.h index 6624096bf7..9731379ae2 100644 --- a/src/H5Tprivate.h +++ b/src/H5Tprivate.h @@ -101,6 +101,8 @@ typedef struct H5T_subset_info_t { } H5T_subset_info_t; /* Forward declarations for prototype arguments */ +struct H5G_loc_t; +struct H5G_name_t; struct H5O_shared_t; /* The native endianness of the platform */ @@ -120,14 +122,14 @@ H5_DLL size_t H5T_get_size(const H5T_t *dt); H5_DLL hbool_t H5T_get_force_conv(const H5T_t *dt); H5_DLL int H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset); H5_DLL herr_t H5T_encode(H5T_t *obj, unsigned char *buf, size_t *nalloc); -H5_DLL H5T_t * H5T_decode(size_t buf_size, const unsigned char *buf); -H5_DLL herr_t H5T_debug(const H5T_t *dt, FILE *stream); -H5_DLL struct H5O_loc_t *H5T_oloc(H5T_t *dt); -H5_DLL H5G_name_t *H5T_nameof(const H5T_t *dt); -H5_DLL htri_t H5T_is_immutable(const H5T_t *dt); -H5_DLL htri_t H5T_is_named(const H5T_t *dt); -H5_DLL herr_t H5T_convert_committed_datatype(H5T_t *dt, H5F_t *f); -H5_DLL htri_t H5T_is_relocatable(const H5T_t *dt); +H5_DLL H5T_t * H5T_decode(size_t buf_size, const unsigned char *buf); +H5_DLL herr_t H5T_debug(const H5T_t *dt, FILE *stream); +H5_DLL struct H5O_loc_t * H5T_oloc(H5T_t *dt); +H5_DLL struct H5G_name_t *H5T_nameof(const H5T_t *dt); +H5_DLL htri_t H5T_is_immutable(const H5T_t *dt); +H5_DLL htri_t H5T_is_named(const H5T_t *dt); +H5_DLL herr_t H5T_convert_committed_datatype(H5T_t *dt, H5F_t *f); +H5_DLL htri_t H5T_is_relocatable(const H5T_t *dt); H5_DLL H5T_path_t *H5T_path_find(const H5T_t *src, const H5T_t *dst); H5_DLL hbool_t H5T_path_noop(const H5T_path_t *p); H5_DLL H5T_bkg_t H5T_path_bkg(const H5T_path_t *p); @@ -159,7 +161,7 @@ H5_DLL herr_t H5T_invoke_vol_optional(H5T_t *dt, H5VL_optional_args_t *args, hi H5_DLL H5R_type_t H5T_get_ref_type(const H5T_t *dt); /* Operations on named datatypes */ -H5_DLL H5T_t *H5T_open(const H5G_loc_t *loc); +H5_DLL H5T_t *H5T_open(const struct H5G_loc_t *loc); H5_DLL int H5T_link(const H5T_t *type, int adjust); H5_DLL herr_t H5T_update_shared(H5T_t *type); diff --git a/src/H5private.h b/src/H5private.h index d4c055e2fd..a83ecf0c61 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -1979,6 +1979,11 @@ extern hbool_t H5_libterm_g; /* Is the library being shutdown? */ #endif /* H5_HAVE_THREADSAFE */ +/* Extern global to determine if we should use selection I/O if available (this + * variable should be removed once selection I/O performs as well as the + * previous scalar I/O implementation */ +extern hbool_t H5_use_selection_io_g; + #ifdef H5_HAVE_CODESTACK /* Include required function stack header */ @@ -2473,6 +2478,16 @@ H5_DLL herr_t H5CX_pop(hbool_t update_dxpl_props); #define HDcompile_assert(e) do { typedef struct { unsigned int b: (e); } x; } while(0) */ +/* Private typedefs */ + +/* Union for const/non-const pointer for use by functions that manipulate + * pointers but do not write to their targets or return pointers to const + * specified locations. This helps us avoid compiler warnings. */ +typedef union { + void * vp; + const void *cvp; +} H5_flexible_const_ptr_t; + /* Private functions, not part of the publicly documented API */ H5_DLL herr_t H5_init_library(void); H5_DLL void H5_term_library(void); diff --git a/test/h5test.c b/test/h5test.c index 0528623896..ac15043833 100644 --- a/test/h5test.c +++ b/test/h5test.c @@ -1773,41 +1773,46 @@ dummy_vfd_write(H5FD_t H5_ATTR_UNUSED *_file, H5FD_mem_t H5_ATTR_UNUSED type, hi /* Dummy VFD with the minimum parameters to make a VFD that can be registered */ #define DUMMY_VFD_VALUE (H5FD_class_value_t)155 static const H5FD_class_t H5FD_dummy_g = { - DUMMY_VFD_VALUE, /* value */ - "dummy", /* name */ - 1, /* maxaddr */ - H5F_CLOSE_WEAK, /* fc_degree */ - NULL, /* terminate */ - NULL, /* sb_size */ - NULL, /* sb_encode */ - NULL, /* sb_decode */ - 0, /* fapl_size */ - NULL, /* fapl_get */ - NULL, /* fapl_copy */ - NULL, /* fapl_free */ - 0, /* dxpl_size */ - NULL, /* dxpl_copy */ - NULL, /* dxpl_free */ - dummy_vfd_open, /* open */ - dummy_vfd_close, /* close */ - NULL, /* cmp */ - NULL, /* query */ - NULL, /* get_type_map */ - NULL, /* alloc */ - NULL, /* free */ - dummy_vfd_get_eoa, /* get_eoa */ - dummy_vfd_set_eoa, /* set_eoa */ - dummy_vfd_get_eof, /* get_eof */ - NULL, /* get_handle */ - dummy_vfd_read, /* read */ - dummy_vfd_write, /* write */ - NULL, /* flush */ - NULL, /* truncate */ - NULL, /* lock */ - NULL, /* unlock */ - NULL, /* del */ - NULL, /* ctl */ - H5FD_FLMAP_DICHOTOMY /* fl_map */ + H5FD_CLASS_VERSION, /* struct version */ + DUMMY_VFD_VALUE, /* value */ + "dummy", /* name */ + 1, /* maxaddr */ + H5F_CLOSE_WEAK, /* fc_degree */ + NULL, /* terminate */ + NULL, /* sb_size */ + NULL, /* sb_encode */ + NULL, /* sb_decode */ + 0, /* fapl_size */ + NULL, /* fapl_get */ + NULL, /* fapl_copy */ + NULL, /* fapl_free */ + 0, /* dxpl_size */ + NULL, /* dxpl_copy */ + NULL, /* dxpl_free */ + dummy_vfd_open, /* open */ + dummy_vfd_close, /* close */ + NULL, /* cmp */ + NULL, /* query */ + NULL, /* get_type_map */ + NULL, /* alloc */ + NULL, /* free */ + dummy_vfd_get_eoa, /* get_eoa */ + dummy_vfd_set_eoa, /* set_eoa */ + dummy_vfd_get_eof, /* get_eof */ + NULL, /* get_handle */ + dummy_vfd_read, /* read */ + dummy_vfd_write, /* write */ + NULL, /* read_vector */ + NULL, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ + NULL, /* flush */ + NULL, /* truncate */ + NULL, /* lock */ + NULL, /* unlock */ + NULL, /* del */ + NULL, /* ctl */ + H5FD_FLMAP_DICHOTOMY /* fl_map */ }; /*------------------------------------------------------------------------- diff --git a/test/null_vfd_plugin.c b/test/null_vfd_plugin.c index f41da00f05..ca59939801 100644 --- a/test/null_vfd_plugin.c +++ b/test/null_vfd_plugin.c @@ -35,41 +35,46 @@ static herr_t H5FD_null_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t addr); static haddr_t H5FD_null_get_eof(const H5FD_t *_file, H5FD_mem_t type); static const H5FD_class_t H5FD_null_g = { - NULL_VFD_VALUE, /* value */ - NULL_VFD_NAME, /* name */ - 1, /* maxaddr */ - H5F_CLOSE_WEAK, /* fc_degree */ - NULL, /* terminate */ - NULL, /* sb_size */ - NULL, /* sb_encode */ - NULL, /* sb_decode */ - 0, /* fapl_size */ - NULL, /* fapl_get */ - NULL, /* fapl_copy */ - NULL, /* fapl_free */ - 0, /* dxpl_size */ - NULL, /* dxpl_copy */ - NULL, /* dxpl_free */ - H5FD_null_open, /* open */ - H5FD_null_close, /* close */ - NULL, /* cmp */ - NULL, /* query */ - NULL, /* get_type_map */ - NULL, /* alloc */ - NULL, /* free */ - H5FD_null_get_eoa, /* get_eoa */ - H5FD_null_set_eoa, /* set_eoa */ - H5FD_null_get_eof, /* get_eof */ - NULL, /* get_handle */ - H5FD_null_read, /* read */ - H5FD_null_write, /* write */ - NULL, /* flush */ - NULL, /* truncate */ - NULL, /* lock */ - NULL, /* unlock */ - NULL, /* del */ - NULL, /* ctl */ - H5FD_FLMAP_DICHOTOMY /* fl_map */ + H5FD_CLASS_VERSION, /* struct version */ + NULL_VFD_VALUE, /* value */ + NULL_VFD_NAME, /* name */ + 1, /* maxaddr */ + H5F_CLOSE_WEAK, /* fc_degree */ + NULL, /* terminate */ + NULL, /* sb_size */ + NULL, /* sb_encode */ + NULL, /* sb_decode */ + 0, /* fapl_size */ + NULL, /* fapl_get */ + NULL, /* fapl_copy */ + NULL, /* fapl_free */ + 0, /* dxpl_size */ + NULL, /* dxpl_copy */ + NULL, /* dxpl_free */ + H5FD_null_open, /* open */ + H5FD_null_close, /* close */ + NULL, /* cmp */ + NULL, /* query */ + NULL, /* get_type_map */ + NULL, /* alloc */ + NULL, /* free */ + H5FD_null_get_eoa, /* get_eoa */ + H5FD_null_set_eoa, /* set_eoa */ + H5FD_null_get_eof, /* get_eof */ + NULL, /* get_handle */ + H5FD_null_read, /* read */ + H5FD_null_write, /* write */ + NULL, /* read_vector */ + NULL, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ + NULL, /* flush */ + NULL, /* truncate */ + NULL, /* lock */ + NULL, /* unlock */ + NULL, /* del */ + NULL, /* ctl */ + H5FD_FLMAP_DICHOTOMY /* fl_map */ }; static H5FD_t * diff --git a/test/vfd.c b/test/vfd.c index 4f28766843..74889fd5bc 100644 --- a/test/vfd.c +++ b/test/vfd.c @@ -86,6 +86,60 @@ static int __k; HDprintf((__k % 4 == 0) ? " %02X" : " %02X", (unsigned char)(buf)[__k]); \ } /* end #define HEXPRINT() */ +/* Macro SET_SIZE() + * + * Helper macro to track the sizes of entries in a vector + * I/O call when stepping through the vector incrementally. + * Assuming that bool_size_fixed is initialized to FALSE + * before the scan, this macro will detect the sizes array + * optimization for the case in which all remaining entries + * are of the same size, and set size_value accordingly. + * + * JRM -- 3/11/21 + */ +#define SET_SIZE(bool_size_fixed, sizes_array, size_value, idx) \ + do { \ + if (!(bool_size_fixed)) { \ + \ + if ((sizes_array)[idx] == 0) { \ + \ + HDassert((idx) > 0); \ + (bool_size_fixed) = TRUE; \ + } \ + else { \ + \ + (size_value) = (sizes_array)[idx]; \ + } \ + } \ + } while (FALSE) + +/* Macro SET_TYPE() + * + * Helper macro to track the types of entries in a vector + * I/O call when stepping through the vector incrementally. + * Assuming that bool_type_fixed is initialized to FALSE + * before the scan, this macro will detect the types array + * optimization for the case in which all remaining entries + * are of the same type, and set type_value accordingly. + * + * JRM -- 3/11/21 + */ +#define SET_TYPE(bool_type_fixed, types_array, type_value, idx) \ + do { \ + if (!(bool_type_fixed)) { \ + \ + if ((types_array)[idx] == H5FD_MEM_NOLIST) { \ + \ + HDassert((idx) > 0); \ + (bool_type_fixed) = TRUE; \ + } \ + else { \ + \ + (type_value) = (types_array)[idx]; \ + } \ + } \ + } while (FALSE) + /* Helper structure to pass around dataset information. */ struct splitter_dataset_def { @@ -3420,6 +3474,60 @@ error: #undef SPLITTER_TEST_FAULT +/***************************************************************************** + * + * Function setup_rand() + * + * Purpose: Use gettimeofday() to obtain a seed for rand(), print the + * seed to stdout, and then pass it to srand(). + * + * This is a version of the same routine in + * testpar/t_cache.c modified for use in serial tests. + * + * Return: void. + * + * Programmer: JRM -- 6/20/20 + * + *****************************************************************************/ +static void +setup_rand(void) +{ + hbool_t use_predefined_seed = FALSE; + unsigned predefined_seed = 18669; + unsigned seed; + struct timeval tv; + + if (use_predefined_seed) { + + seed = predefined_seed; + + HDfprintf(stdout, "\n%s: predefined_seed = %d.\n\n", __func__, seed); + HDfflush(stdout); + + HDsrand(seed); + } + else { + + if (HDgettimeofday(&tv, NULL) != 0) { + + HDfprintf(stdout, "\n%s: gettimeofday() failed -- srand() not called.\n\n", __func__); + HDfflush(stdout); + } + else { + + seed = (unsigned)tv.tv_usec; + + HDfprintf(stdout, "\n%s: seed = %d.\n\n", __func__, seed); + HDfflush(stdout); + + HDsrand(seed); + } + } + + return; + +} /* setup_rand() */ + /* * Callback implementations for ctl feature testing VFD */ @@ -3488,6 +3596,7 @@ H5FD__ctl_test_vfd_ctl(H5FD_t H5_ATTR_UNUSED *_file, uint64_t op_code, uint64_t /* Minimal VFD for ctl feature tests */ static const H5FD_class_t H5FD_ctl_test_vfd_g = { + H5FD_CLASS_VERSION, /* struct version */ (H5FD_class_value_t)201, /* value */ "ctl_test_vfd", /* name */ HADDR_MAX, /* maxaddr */ @@ -3516,6 +3625,10 @@ static const H5FD_class_t H5FD_ctl_test_vfd_g = { NULL, /* get_handle */ H5FD__ctl_test_vfd_read, /* read */ H5FD__ctl_test_vfd_write, /* write */ + NULL, /* read_vector */ + NULL, /* write_vector */ + NULL, /* read_selection */ + NULL, /* write_selection */ NULL, /* flush */ NULL, /* truncate */ NULL, /* lock */ @@ -3914,6 +4027,1917 @@ error: return -1; } +/*------------------------------------------------------------------------- + * Function: test_vector_io__setup_v + * + * Purpose: Construct and initialize a vector of I/O requests used + * to test vector I/O. Note that while the vectors are + * allocated and initialized, they are not assigned + * base addresses. + * + * All arrays parameters are presumed to be of length + * count. + * + * Return: Return TRUE if successful, and FALSE if any errors + * are encountered. + * + * Programmer: John Mainzer + * 6/21/20 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static hbool_t +test_vector_io__setup_v(uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], + void *write_bufs[], void *read_bufs[], char base_fill_char) +{ + hbool_t result = TRUE; /* will set to FALSE on failure */ + char fill_char = base_fill_char; + uint32_t i; + uint32_t j; + H5FD_mem_t mem_types[6] = {H5FD_MEM_SUPER, H5FD_MEM_BTREE, H5FD_MEM_DRAW, + H5FD_MEM_GHEAP, H5FD_MEM_LHEAP, H5FD_MEM_OHDR}; + + /* set the arrays of pointers to the write and read buffers to NULL, + * so that we can release memory on failure. + */ + for (i = 0; i < count; i++) { + + write_bufs[i] = NULL; + read_bufs[i] = NULL; + } + + for (i = 0; i < count; i++) { + + types[i] = mem_types[i % 6]; + + addrs[i] = HADDR_UNDEF; + + sizes[i] = (size_t)((rand() & 1023) + 1); + + write_bufs[i] = HDmalloc(sizes[i] + 1); + read_bufs[i] = HDmalloc(sizes[i] + 1); + + if ((NULL == write_bufs[i]) || (NULL == read_bufs[i])) { + + HDfprintf(stderr, "%s: can't malloc read / write bufs.\n", __func__); + result = FALSE; + break; + } + + for (j = 0; j < sizes[i]; j++) { + + ((char *)(write_bufs[i]))[j] = fill_char; + ((char *)(read_bufs[i]))[j] = '\0'; + } + + ((char *)(write_bufs[i]))[sizes[i]] = '\0'; + ((char *)(read_bufs[i]))[sizes[i]] = '\0'; + + fill_char++; + } + + if (!result) { /* free buffers */ + + for (i = 0; i < count; i++) { + + if (write_bufs[i]) { + + HDfree(write_bufs[i]); + write_bufs[i] = NULL; + } + + if (read_bufs[i]) { + + HDfree(read_bufs[i]); + read_bufs[i] = NULL; + } + } + } + + return (result); + +} /* end test_vector_io__setup_v() */ + +/*------------------------------------------------------------------------- + * Function: test_vector_io__setup_fixed_size_v + * + * Purpose: To test the optimization allowing short sizes and types + * arrays, construct and initialize a vector of I/O requests + * with each request of the same size and type, and use the + * optimizatin to allow reduced length sizes and types + * vectors. Since the function is supplied with types and + * sizes vectors of length count, simulate shorter vectors + * by initializing the sizes and types vectors to values + * that will cause failure if used. + * + * All arrays parameters are presumed to be of length + * count. Count is presumed to be a power of 2, and at + * least 2. + * + * Return: Return TRUE if successful, and FALSE if any errors + * are encountered. + * + * Programmer: John Mainzer + * 3/10/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static hbool_t +test_vector_io__setup_fixed_size_v(uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], + void *write_bufs[], void *read_bufs[], char base_fill_char) +{ + hbool_t result = TRUE; /* will set to FALSE on failure */ + char fill_char = base_fill_char; + uint32_t fix_point; + uint32_t i; + uint32_t j; + uint32_t k; + H5FD_mem_t mem_types[6] = {H5FD_MEM_SUPER, H5FD_MEM_BTREE, H5FD_MEM_DRAW, + H5FD_MEM_GHEAP, H5FD_MEM_LHEAP, H5FD_MEM_OHDR}; + + /* set the arrays of pointers to the write and read buffers to NULL, + * so that we can release memory on failure. + * + * Set the types[] and sizes[] arrays to invalid / improbable values + * so that use of these values will trigger failures. + */ + for (i = 0; i < count; i++) { + + write_bufs[i] = NULL; + read_bufs[i] = NULL; + types[i] = H5FD_MEM_NTYPES; + sizes[i] = SIZE_MAX; + } + + /* randomly select the point in the vector after which all entries are + * fixed at the same size and type. Observe that 0 <= fix_point < + * count / 2. + */ + fix_point = ((uint32_t)rand() & (count - 1)) / 2; + + HDassert(fix_point < count / 2); + + for (i = 0; i < count; i++) { + + if (i <= fix_point) { + + types[i] = mem_types[i % 6]; + + addrs[i] = HADDR_UNDEF; + + sizes[i] = (size_t)((rand() & 1023) + 1); + + write_bufs[i] = HDmalloc(sizes[i] + 1); + read_bufs[i] = HDmalloc(sizes[i] + 1); + } + else { + + if (i == fix_point + 1) { + + /* set the sentinels that indicate that all remaining + * types and sizes are the same as the previous value. + */ + types[i] = H5FD_MEM_NOLIST; + sizes[i] = 0; + } + + addrs[i] = HADDR_UNDEF; + + write_bufs[i] = HDmalloc(sizes[fix_point] + 1); + read_bufs[i] = HDmalloc(sizes[fix_point] + 1); + } + + if ((NULL == write_bufs[i]) || (NULL == read_bufs[i])) { + + HDfprintf(stderr, "%s: can't malloc read / write bufs.\n", __func__); + result = FALSE; + break; + } + + /* need to avoid examining sizes beyond the fix_point */ + k = MIN(i, fix_point); + + for (j = 0; j < sizes[k]; j++) { + + ((char *)(write_bufs[i]))[j] = fill_char; + ((char *)(read_bufs[i]))[j] = '\0'; + } + + ((char *)(write_bufs[i]))[sizes[k]] = '\0'; + ((char *)(read_bufs[i]))[sizes[k]] = '\0'; + + fill_char++; + } + + if (!result) { /* free buffers */ + + for (i = 0; i < count; i++) { + + if (write_bufs[i]) { + + HDfree(write_bufs[i]); + write_bufs[i] = NULL; + } + + if (read_bufs[i]) { + + HDfree(read_bufs[i]); + read_bufs[i] = NULL; + } + } + } + + return (result); + +} /* end test_vector_io__setup_fixed_size_v() */ + +/*------------------------------------------------------------------------- + * Function: test_vector_io__read_v_indiv + * + * Purpose: Read the supplied vector as a sequence of individual + * reads. + * + * All arrays parameters are presumed to be of length + * count. + * + * Return: Return TRUE if successful, and FALSE if any errors + * are encountered. + * + * Programmer: John Mainzer + * 6/21/20 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static hbool_t +test_vector_io__read_v_indiv(H5FD_t *lf, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], + void *read_bufs[]) +{ + hbool_t size_fixed = FALSE; + hbool_t type_fixed = FALSE; + hbool_t result = TRUE; /* will set to FALSE on failure */ + hbool_t verbose = FALSE; + uint32_t i; + size_t size = SIZE_MAX; + H5FD_mem_t type = H5FD_MEM_NTYPES; + + for (i = 0; i < count; i++) { + + SET_SIZE(size_fixed, sizes, size, i); + + SET_TYPE(type_fixed, types, type, i); + + if (H5FDread(lf, type, H5P_DEFAULT, addrs[i], size, read_bufs[i]) < 0) { + + if (verbose) { + + HDfprintf(stdout, "%s: H5FDread() failed on entry %d.\n", __func__, i); + } + result = FALSE; + break; + } + } + + return (result); + +} /* end test_vector_io__read_v_indiv() */ + +/*------------------------------------------------------------------------- + * Function: test_vector_io__write_v_indiv + * + * Purpose: Write the supplied vector as a sequence of individual + * writes. + * + * All arrays parameters are presumed to be of length + * count. + * + * Return: Return TRUE if successful, and FALSE if any errors + * are encountered. + * + * Programmer: John Mainzer + * 6/21/20 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static hbool_t +test_vector_io__write_v_indiv(H5FD_t *lf, uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], + void *write_bufs[]) +{ + hbool_t size_fixed = FALSE; + hbool_t type_fixed = FALSE; + hbool_t result = TRUE; /* will set to FALSE on failure */ + hbool_t verbose = FALSE; + uint32_t i; + size_t size = SIZE_MAX; + H5FD_mem_t type = H5FD_MEM_NTYPES; + + for (i = 0; i < count; i++) { + + SET_SIZE(size_fixed, sizes, size, i); + + SET_TYPE(type_fixed, types, type, i); + + if (H5FDwrite(lf, type, H5P_DEFAULT, addrs[i], size, write_bufs[i]) < 0) { + + if (verbose) { + + HDfprintf(stdout, "%s: HDwrite() failed on entry %d.\n", __func__, i); + } + result = FALSE; + break; + } + } + + return (result); + +} /* end test_vector_io__write_v_indiv() */ + +/*------------------------------------------------------------------------- + * + * Function: test_vector_io__verify_v + * + * Purpose: Verify that the read and write buffers of the supplied + * vectors are identical. + * + * Return: TRUE if the read and write vectors are identical, and + * FALSE otherwise. + * + * Programmer: John Mainzer + * 6/21/20 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ + +static hbool_t +test_vector_io__verify_v(uint32_t count, H5FD_mem_t types[], size_t sizes[], void *write_bufs[], + void *read_bufs[], const char *name) +{ + hbool_t size_fixed = FALSE; + hbool_t type_fixed = FALSE; + hbool_t identical = TRUE; + hbool_t verbose = TRUE; + uint32_t i; + size_t j; + uint32_t buf_size; + char * w_buf; + char * r_buf; + const char *mem_type_names[7] = {"H5FD_MEM_DEFAULT", "H5FD_MEM_SUPER", "H5FD_MEM_BTREE", "H5FD_MEM_DRAW", + "H5FD_MEM_GHEAP", "H5FD_MEM_LHEAP", "H5FD_MEM_OHDR"}; + size_t size = SIZE_MAX; + H5FD_mem_t type = H5FD_MEM_NTYPES; + + i = 0; + + while ((i < count) && (identical)) { + + SET_SIZE(size_fixed, sizes, size, i); + + SET_TYPE(type_fixed, types, type, i); + + w_buf = (char *)(write_bufs[i]); + r_buf = (char *)(read_bufs[i]); + + j = 0; + while ((j < size) && (identical)) { + + if (w_buf[j] != r_buf[j]) { + + identical = FALSE; + + if (verbose) { + + HDfprintf(stdout, "\n\nread/write buf mismatch in vector/entry"); + HDfprintf(stdout, "\"%s\"/%u at offset %llu/%llu w/r = %c/%c type = %s\n\n", name, + (unsigned)i, (long long unsigned)j, (long long unsigned)buf_size, w_buf[j], + r_buf[j], mem_type_names[type]); + } + } + j++; + } + i++; + } + + return (identical); + +} /* end test_vector_io__verify_v() */ + +/*------------------------------------------------------------------------- + * + * Function: test_vector_io__dump_test_vectors + * + * Purpose: Print a set of test vectors to stdout. + * Vectors are assumed to be of length count, and + * buffers must be either NULL, or null terminate strings + * of char. + * + * Return: void. + * + * Programmer: John Mainzer + * 6/21/20 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ + +static void +test_vector_io__dump_test_vectors(uint32_t count, H5FD_mem_t types[], haddr_t addrs[], size_t sizes[], + void *write_bufs[], void *read_bufs[], const char *name) +{ + hbool_t size_fixed = FALSE; + hbool_t type_fixed = FALSE; + uint32_t i; + const char *mem_type_names[7] = {"H5FD_MEM_DEFAULT", "H5FD_MEM_SUPER", "H5FD_MEM_BTREE", "H5FD_MEM_DRAW", + "H5FD_MEM_GHEAP", "H5FD_MEM_LHEAP", "H5FD_MEM_OHDR"}; + size_t size = SIZE_MAX; + H5FD_mem_t type = H5FD_MEM_NTYPES; + + char *w_buf; + char *r_buf; + + HDfprintf(stdout, "\n\nDumping test vector \"%s\" of length %d\n\n", name, count); + + for (i = 0; i < count; i++) { + + SET_SIZE(size_fixed, sizes, size, i); + + SET_TYPE(type_fixed, types, type, i); + + HDassert((H5FD_MEM_DEFAULT <= type) && (type <= H5FD_MEM_OHDR)); + + w_buf = (char *)(write_bufs[i]); + + if (read_bufs) { + + r_buf = (char *)(read_bufs[i]); + } + else { + + r_buf = NULL; + } + + HDfprintf(stdout, "%u: addr/len = %llu/%llu, type = %s, w_buf = \"%s\"\n", (unsigned)i, + (long long unsigned)(addrs[i]), (long long unsigned)(size), mem_type_names[type], w_buf); + + if (r_buf) { + + HDfprintf(stdout, " r_buf = \"%s\"\n", r_buf); + } + } + + return; + +} /* end test_vector_io__dump_test_vectors() */ + +/*------------------------------------------------------------------------- + * Function: test_vector_io + * + * Purpose: Test I/O using the vector I/O VFD public VFD calls. + * + * Test proceeds as follows: + * + * 1) read / write vectors and verify results + * + * 2) write individual / read vector and verify results + * + * 3) write vector / read individual and verify results + * + * 4) Close and then re-open the file, verify data written + * above. + * + * Return: Success: 0 + * Failure: -1 + * + * Programmer: John Mainzer + * 6/20/20 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +#define VECTOR_LEN 16 + +static herr_t +test_vector_io(const char *vfd_name) +{ + char test_title[80]; + hbool_t size_fixed_0 = FALSE; /* whether remaining entry */ + hbool_t size_fixed_1 = FALSE; /* sizes in vector are fixed. */ + hbool_t size_fixed_2 = FALSE; /* */ + hbool_t type_fixed_0 = FALSE; /* whether remaining entry */ + hbool_t type_fixed_1 = FALSE; /* types in vector are fixed. */ + hbool_t type_fixed_2 = FALSE; /* */ + hbool_t verbose = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + haddr_t eoa; /* file eoa */ + char filename[1024]; /* filename */ + char * buf; /* tmp ptr to buf */ + unsigned flags = 0; /* file open flags */ + H5FD_t * lf; /* VFD struct ptr */ + uint32_t i; /* index */ + uint32_t j; /* index */ + uint32_t count = VECTOR_LEN; /* length of vectors */ + H5FD_mem_t types_0[VECTOR_LEN]; /* types vector */ + H5FD_mem_t types_1[VECTOR_LEN]; /* types vector */ + H5FD_mem_t types_2[VECTOR_LEN]; /* types vector */ + H5FD_mem_t f_types_0[VECTOR_LEN]; /* fixed types vector */ + H5FD_mem_t f_types_1[VECTOR_LEN]; /* fixed types vector */ + H5FD_mem_t f_types_2[VECTOR_LEN]; /* fixed types vector */ + H5FD_mem_t f_type_0 = H5FD_MEM_NTYPES; /* current type for f vector 0 */ + H5FD_mem_t f_type_1 = H5FD_MEM_NTYPES; /* current type for f vector 1 */ + H5FD_mem_t f_type_2 = H5FD_MEM_NTYPES; /* current type for f vector 2 */ + haddr_t addrs_0[VECTOR_LEN]; /* addresses vector */ + haddr_t addrs_1[VECTOR_LEN]; /* addresses vector */ + haddr_t addrs_2[VECTOR_LEN]; /* addresses vector */ + haddr_t f_addrs_0[VECTOR_LEN]; /* fixed addresses vector */ + haddr_t f_addrs_1[VECTOR_LEN]; /* fixed addresses vector */ + haddr_t f_addrs_2[VECTOR_LEN]; /* fixed addresses vector */ + size_t sizes_0[VECTOR_LEN]; /* sizes vector */ + size_t sizes_1[VECTOR_LEN]; /* sizes vector */ + size_t sizes_2[VECTOR_LEN]; /* sizes vector */ + size_t f_sizes_0[VECTOR_LEN]; /* fixed sizes vector */ + size_t f_sizes_1[VECTOR_LEN]; /* fixed sizes vector */ + size_t f_sizes_2[VECTOR_LEN]; /* fixed sizes vector */ + size_t f_size_0 = 0; /* current size for f vector 0 */ + size_t f_size_1 = 0; /* current size for f vector 1 */ + size_t f_size_2 = 0; /* current size for f vector 2 */ + void * write_bufs_0[VECTOR_LEN]; /* write bufs vector */ + void * write_bufs_1[VECTOR_LEN]; /* write bufs vector */ + void * write_bufs_2[VECTOR_LEN]; /* write bufs vector */ + void * f_write_bufs_0[VECTOR_LEN]; /* fixed write bufs vector */ + void * f_write_bufs_1[VECTOR_LEN]; /* fixed write bufs vector */ + void * f_write_bufs_2[VECTOR_LEN]; /* fixed write bufs vector */ + void * read_bufs_0[VECTOR_LEN]; /* read bufs vector */ + void * read_bufs_1[VECTOR_LEN]; /* read bufs vector */ + void * read_bufs_2[VECTOR_LEN]; /* read bufs vector */ + void * f_read_bufs_0[VECTOR_LEN]; /* fixed read bufs vector */ + void * f_read_bufs_1[VECTOR_LEN]; /* fixed read bufs vector */ + void * f_read_bufs_2[VECTOR_LEN]; /* fixed read bufs vector */ + + HDsnprintf(test_title, sizeof(test_title), "vector I/O with %s VFD", vfd_name); + + TESTING(test_title); + + /* Set property list and file name for target driver */ + + if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) + TEST_ERROR; + + if (HDstrcmp(vfd_name, "sec2") == 0) { + + if (H5Pset_fapl_sec2(fapl_id) < 0) + TEST_ERROR; + + h5_fixname(FILENAME[0], fapl_id, filename, sizeof(filename)); + } + else if (HDstrcmp(vfd_name, "stdio") == 0) { + + if (H5Pset_fapl_stdio(fapl_id) < 0) + TEST_ERROR; + + h5_fixname(FILENAME[7], fapl_id, filename, sizeof filename); + } + else { + + HDfprintf(stdout, "un-supported VFD\n"); + TEST_ERROR + } + + /* setup the test vectors -- note that addresses are not set until + * we allocate space via the file driver. + */ + if (!(test_vector_io__setup_v(count, types_0, addrs_0, sizes_0, write_bufs_0, read_bufs_0, 'a') && + test_vector_io__setup_v(count, types_1, addrs_1, sizes_1, write_bufs_1, read_bufs_1, 'e') && + test_vector_io__setup_v(count, types_2, addrs_2, sizes_2, write_bufs_2, read_bufs_2, 'A'))) + TEST_ERROR; + + if (!(test_vector_io__setup_fixed_size_v(count, f_types_0, f_addrs_0, f_sizes_0, f_write_bufs_0, + f_read_bufs_0, 'b') && + test_vector_io__setup_fixed_size_v(count, f_types_1, f_addrs_1, f_sizes_1, f_write_bufs_1, + f_read_bufs_1, 'f') && + test_vector_io__setup_fixed_size_v(count, f_types_2, f_addrs_2, f_sizes_2, f_write_bufs_2, + f_read_bufs_2, 'B'))) + TEST_ERROR; + + flags = H5F_ACC_RDWR | H5F_ACC_CREAT | H5F_ACC_TRUNC; + + if (NULL == (lf = H5FDopen(filename, flags, fapl_id, HADDR_UNDEF))) + TEST_ERROR; + + /* allocate space for the data in the test vectors */ + for (i = 0; i < count; i++) { + + addrs_0[i] = H5FDalloc(lf, types_0[i], H5P_DEFAULT, (hsize_t)(sizes_0[i])); + addrs_1[i] = H5FDalloc(lf, types_1[i], H5P_DEFAULT, (hsize_t)(sizes_1[i])); + addrs_2[i] = H5FDalloc(lf, types_2[i], H5P_DEFAULT, (hsize_t)(sizes_2[i])); + + if ((addrs_0[i] == HADDR_UNDEF) || (addrs_1[i] == HADDR_UNDEF) || (addrs_2[i] == HADDR_UNDEF)) + TEST_ERROR; + + SET_SIZE(size_fixed_0, f_sizes_0, f_size_0, i); + SET_SIZE(size_fixed_1, f_sizes_1, f_size_1, i); + SET_SIZE(size_fixed_2, f_sizes_2, f_size_2, i); + + SET_TYPE(type_fixed_0, f_types_0, f_type_0, i); + SET_TYPE(type_fixed_1, f_types_1, f_type_1, i); + SET_TYPE(type_fixed_2, f_types_2, f_type_2, i); + + f_addrs_0[i] = H5FDalloc(lf, f_type_0, H5P_DEFAULT, (hsize_t)(f_size_0)); + f_addrs_1[i] = H5FDalloc(lf, f_type_1, H5P_DEFAULT, (hsize_t)(f_size_1)); + f_addrs_2[i] = H5FDalloc(lf, f_type_2, H5P_DEFAULT, (hsize_t)(f_size_2)); + + if ((f_addrs_0[i] == HADDR_UNDEF) || (f_addrs_1[i] == HADDR_UNDEF) || (f_addrs_2[i] == HADDR_UNDEF)) + TEST_ERROR; + } + + if (verbose) { + + test_vector_io__dump_test_vectors(count, types_0, addrs_0, sizes_0, write_bufs_0, NULL, "zero"); + + test_vector_io__dump_test_vectors(count, types_1, addrs_1, sizes_1, write_bufs_1, NULL, "one"); + + test_vector_io__dump_test_vectors(count, types_2, addrs_2, sizes_2, write_bufs_2, NULL, "two"); + + test_vector_io__dump_test_vectors(count, f_types_0, f_addrs_0, f_sizes_0, f_write_bufs_0, NULL, + "fixed zero"); + + test_vector_io__dump_test_vectors(count, f_types_1, f_addrs_1, f_sizes_1, f_write_bufs_1, NULL, + "fixed one"); + + test_vector_io__dump_test_vectors(count, f_types_2, f_addrs_2, f_sizes_2, f_write_bufs_2, NULL, + "fixed two"); + } + + /* write and then read using vector I/O. First, read/write vector + * of length 1, then of length 2, then remainder of vector + */ + if (H5FDwrite_vector(lf, H5P_DEFAULT, 1, &(types_0[0]), &(addrs_0[0]), &(sizes_0[0]), + &(write_bufs_0[0])) < 0) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, 1, &(types_0[0]), &(addrs_0[0]), &(sizes_0[0]), &(read_bufs_0[0])) < + 0) + TEST_ERROR; + + if (H5FDwrite_vector(lf, H5P_DEFAULT, 2, &(types_0[1]), &(addrs_0[1]), &(sizes_0[1]), + &(write_bufs_0[1])) < 0) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, 2, &(types_0[1]), &(addrs_0[1]), &(sizes_0[1]), &(read_bufs_0[1])) < + 0) + TEST_ERROR; + + if (H5FDwrite_vector(lf, H5P_DEFAULT, count - 3, &(types_0[3]), &(addrs_0[3]), &(sizes_0[3]), + &(write_bufs_0[3])) < 0) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, count - 3, &(types_0[3]), &(addrs_0[3]), &(sizes_0[3]), + &(read_bufs_0[3])) < 0) + TEST_ERROR; + + /* for fixed size / type vector, just write and read as single operations */ + if (H5FDwrite_vector(lf, H5P_DEFAULT, count, f_types_0, f_addrs_0, f_sizes_0, f_write_bufs_0) < 0) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, count, f_types_0, f_addrs_0, f_sizes_0, f_read_bufs_0) < 0) + TEST_ERROR; + + /* verify that the expected data is read */ + if (!test_vector_io__verify_v(count, types_0, sizes_0, write_bufs_0, read_bufs_0, "zero")) + TEST_ERROR; + + if (!test_vector_io__verify_v(count, f_types_0, f_sizes_0, f_write_bufs_0, f_read_bufs_0, "fixed zero")) + TEST_ERROR; + + /* write the contents of a vector individually, and then read it back + * in several vector reads. + */ + if (!test_vector_io__write_v_indiv(lf, count, types_1, addrs_1, sizes_1, write_bufs_1)) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, 1, &(types_1[0]), &(addrs_1[0]), &(sizes_1[0]), &(read_bufs_1[0])) < + 0) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, 2, &(types_1[1]), &(addrs_1[1]), &(sizes_1[1]), &(read_bufs_1[1])) < + 0) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, count - 3, &(types_1[3]), &(addrs_1[3]), &(sizes_1[3]), + &(read_bufs_1[3])) < 0) + TEST_ERROR; + + /* for fixed size, write individually, and the read back in a single call */ + if (!test_vector_io__write_v_indiv(lf, count, f_types_1, f_addrs_1, f_sizes_1, f_write_bufs_1)) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, count, f_types_1, f_addrs_1, f_sizes_1, f_read_bufs_1) < 0) + TEST_ERROR; + + /* verify that the expected data is read */ + if (!test_vector_io__verify_v(count, types_1, sizes_1, write_bufs_1, read_bufs_1, "one")) + TEST_ERROR; + + if (!test_vector_io__verify_v(count, f_types_1, f_sizes_1, f_write_bufs_1, f_read_bufs_1, "fixed one")) + TEST_ERROR; + + /* Write the contents of a vector as several vector writes, then + * read it back in individual reads. + */ + if (H5FDwrite_vector(lf, H5P_DEFAULT, 1, &(types_2[0]), &(addrs_2[0]), &(sizes_2[0]), + &(write_bufs_2[0])) < 0) + TEST_ERROR; + + if (H5FDwrite_vector(lf, H5P_DEFAULT, 2, &(types_2[1]), &(addrs_2[1]), &(sizes_2[1]), + &(write_bufs_2[1])) < 0) + TEST_ERROR; + + if (H5FDwrite_vector(lf, H5P_DEFAULT, count - 3, &(types_2[3]), &(addrs_2[3]), &(sizes_2[3]), + &(write_bufs_2[3])) < 0) + TEST_ERROR; + + if (!test_vector_io__read_v_indiv(lf, count, types_2, addrs_2, sizes_2, read_bufs_2)) + TEST_ERROR; + + /* for fixed size, write as a single vector, read back individually */ + if (H5FDwrite_vector(lf, H5P_DEFAULT, count, f_types_2, f_addrs_2, f_sizes_2, f_write_bufs_2) < 0) + TEST_ERROR; + + if (!test_vector_io__read_v_indiv(lf, count, f_types_2, f_addrs_2, f_sizes_2, f_read_bufs_2)) + TEST_ERROR; + + /* verify that the expected data is read */ + if (!test_vector_io__verify_v(count, types_2, sizes_2, write_bufs_2, read_bufs_2, "two")) + TEST_ERROR; + + if (!test_vector_io__verify_v(count, f_types_2, f_sizes_2, f_write_bufs_2, f_read_bufs_2, "fixed two")) + TEST_ERROR; + + /* make note of eoa -- needed after we re-open the file */ + if (HADDR_UNDEF == (eoa = H5FDget_eoa(lf, H5FD_MEM_DEFAULT))) + TEST_ERROR; + + /* close the file and then re-open it */ + if (H5FDclose(lf) < 0) + TEST_ERROR; + + flags = H5F_ACC_RDWR; + + if (NULL == (lf = H5FDopen(filename, flags, fapl_id, HADDR_UNDEF))) + TEST_ERROR; + + /* The EOA is set to 0 on open. To avoid errors, we must set it + * to its correct value before we do any reads. + * + * Note: In the context of using the VFD layer without the HDF5 + * library on top, this doesn't make much sense. Consider + * adding an open flag that sets the EOA to the current file + * size. + */ + if (H5FDset_eoa(lf, H5FD_MEM_DEFAULT, eoa) < 0) + TEST_ERROR; + + /* Null the read vectors */ + + size_fixed_0 = FALSE; + size_fixed_1 = FALSE; + size_fixed_2 = FALSE; + + for (i = 0; i < count; i++) { + + buf = read_bufs_0[i]; + for (j = 0; j < sizes_0[i]; j++) { + buf[j] = '\0'; + } + + buf = read_bufs_1[i]; + for (j = 0; j < sizes_1[i]; j++) { + buf[j] = '\0'; + } + + buf = read_bufs_2[i]; + for (j = 0; j < sizes_2[i]; j++) { + buf[j] = '\0'; + } + + SET_SIZE(size_fixed_0, f_sizes_0, f_size_0, i); + SET_SIZE(size_fixed_1, f_sizes_1, f_size_1, i); + SET_SIZE(size_fixed_2, f_sizes_2, f_size_2, i); + + buf = f_read_bufs_0[i]; + for (j = 0; j < f_size_0; j++) { + buf[j] = '\0'; + } + + buf = f_read_bufs_1[i]; + for (j = 0; j < f_size_1; j++) { + buf[j] = '\0'; + } + + buf = f_read_bufs_2[i]; + for (j = 0; j < f_size_2; j++) { + buf[j] = '\0'; + } + } + + /* read the contents of the file */ + if (H5FDread_vector(lf, H5P_DEFAULT, count, types_0, addrs_0, sizes_0, read_bufs_0) < 0) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, count, types_1, addrs_1, sizes_1, read_bufs_1) < 0) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, count, types_2, addrs_2, sizes_2, read_bufs_2) < 0) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, count, f_types_0, f_addrs_0, f_sizes_0, f_read_bufs_0) < 0) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, count, f_types_1, f_addrs_1, f_sizes_1, f_read_bufs_1) < 0) + TEST_ERROR; + + if (H5FDread_vector(lf, H5P_DEFAULT, count, f_types_2, f_addrs_2, f_sizes_2, f_read_bufs_2) < 0) + TEST_ERROR; + + /* verify the contents. */ + if (!test_vector_io__verify_v(count, types_0, sizes_0, write_bufs_0, read_bufs_0, "zero-")) + TEST_ERROR; + + if (!test_vector_io__verify_v(count, types_1, sizes_1, write_bufs_1, read_bufs_1, "one-")) + TEST_ERROR; + + if (!test_vector_io__verify_v(count, types_2, sizes_2, write_bufs_2, read_bufs_2, "two-")) + TEST_ERROR; + + if (!test_vector_io__verify_v(count, f_types_0, f_sizes_0, f_write_bufs_0, f_read_bufs_0, "fixed zero-")) + TEST_ERROR; + + if (!test_vector_io__verify_v(count, f_types_1, f_sizes_1, f_write_bufs_1, f_read_bufs_1, "fixed one-")) + TEST_ERROR; + + if (!test_vector_io__verify_v(count, f_types_2, f_sizes_2, f_write_bufs_2, f_read_bufs_2, "fixed two-")) + TEST_ERROR; + + if (H5FDclose(lf) < 0) + TEST_ERROR; + + h5_delete_test_file(FILENAME[0], fapl_id); + + /* Close the fapl */ + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + + /* discard the read and write buffers */ + + for (i = 0; i < count; i++) { + + HDfree(write_bufs_0[i]); + write_bufs_0[i] = NULL; + + HDfree(write_bufs_1[i]); + write_bufs_1[i] = NULL; + + HDfree(write_bufs_2[i]); + write_bufs_2[i] = NULL; + + HDfree(read_bufs_0[i]); + read_bufs_0[i] = NULL; + + HDfree(read_bufs_1[i]); + read_bufs_1[i] = NULL; + + HDfree(read_bufs_2[i]); + read_bufs_2[i] = NULL; + + HDfree(f_write_bufs_0[i]); + f_write_bufs_0[i] = NULL; + + HDfree(f_write_bufs_1[i]); + f_write_bufs_1[i] = NULL; + + HDfree(f_write_bufs_2[i]); + f_write_bufs_2[i] = NULL; + + HDfree(f_read_bufs_0[i]); + f_read_bufs_0[i] = NULL; + + HDfree(f_read_bufs_1[i]); + f_read_bufs_1[i] = NULL; + + HDfree(f_read_bufs_2[i]); + f_read_bufs_2[i] = NULL; + } + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(fapl_id); + H5FDclose(lf); + } + H5E_END_TRY; + return -1; +} /* end test_vector_io() */ + +/*------------------------------------------------------------------------- + * Function: test_selection_io_write + * + * Purpose: Updates write buffers to ensure a unique value is written + * to each element and issues a selection write call. + * + * Return: Success: TRUE + * Failure: FALSE + * + * Programmer: Neil Fortner + * 7/1/21 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +/* Array dimensions, used for all selection I/O tests. Currently both must be + * even. 1-Dimensional arrays have a size of SEL_IO_DIM0 * SEL_IO_DIM1. */ +#define SEL_IO_DIM0 8 +#define SEL_IO_DIM1 10 + +static herr_t +test_selection_io_write(H5FD_t *lf, H5FD_mem_t type, uint32_t count, hid_t mem_spaces[], hid_t file_spaces[], + haddr_t offsets[], size_t element_sizes[], int *wbufs[]) +{ + int i; + int j; + + /* Update write buffer */ + for (i = 0; i < (int)count; i++) + if (wbufs[i] && (i == 0 || wbufs[i] != wbufs[i - 1])) + for (j = 0; j < SEL_IO_DIM0 * SEL_IO_DIM1; j++) + wbufs[i][j] += 2 * SEL_IO_DIM0 * SEL_IO_DIM1; + + /* Issue write call */ + if (H5FDwrite_selection(lf, type, H5P_DEFAULT, count, mem_spaces, file_spaces, offsets, element_sizes, + (const void **)wbufs) < 0) + TEST_ERROR + + return 0; + +error: + return -1; +} /* end test_selection_io_write() */ + +/*------------------------------------------------------------------------- + * Function: test_selection_io_read_verify + * + * Purpose: Issues a selection read call and compares the result to + * the arrays provided in erbufs. If rbufcount is less than + * count the last element in erbufs will be repeated to make + * up the difference. + * + * Return: Success: TRUE + * Failure: FALSE + * + * Programmer: Neil Fortner + * 7/1/21 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +static herr_t +test_selection_io_read_verify(H5FD_t *lf, H5FD_mem_t type, uint32_t count, hid_t mem_spaces[], + hid_t file_spaces[], haddr_t offsets[], size_t element_sizes[], + uint32_t rbufcount, int *erbufs[], hbool_t shorten_rbufs) +{ + int rbuf1[SEL_IO_DIM0 * SEL_IO_DIM1]; + int rbuf2[SEL_IO_DIM0 * SEL_IO_DIM1]; + int *rbufs[2] = {rbuf1, rbuf2}; + int i; + int j; + + /* Initialize read buffer */ + for (i = 0; i < (int)rbufcount; i++) + for (j = 0; j < SEL_IO_DIM0 * SEL_IO_DIM1; j++) + rbufs[i][j] = -1; + + /* Handle elements in count that are not part of rbufcount */ + for (i = (int)rbufcount; i < (int)count; i++) + if (shorten_rbufs) + rbufs[i] = NULL; + else + rbufs[i] = rbufs[rbufcount - 1]; + + /* Issue read call */ + if (H5FDread_selection(lf, type, H5P_DEFAULT, count, mem_spaces, file_spaces, offsets, element_sizes, + (void **)rbufs) < 0) + TEST_ERROR + + /* Verify result */ + for (i = 0; i < (int)rbufcount; i++) + for (j = 0; j < SEL_IO_DIM0 * SEL_IO_DIM1; j++) + if (rbufs[i][j] != erbufs[i][j]) { + H5_FAILED() + AT() + HDprintf("data read from file does not match expected values at mapping array location %d\n", + i); + HDprintf("expected data: \n"); + for (j = 0; j < SEL_IO_DIM0 * SEL_IO_DIM1; j++) { + printf("%6d", erbufs[i][j]); + if (!((j + 1) % SEL_IO_DIM1)) + printf("\n"); + } + HDprintf("read data: \n"); + for (j = 0; j < SEL_IO_DIM0 * SEL_IO_DIM1; j++) { + printf("%6d", rbufs[i][j]); + if (!((j + 1) % SEL_IO_DIM1)) + printf("\n"); + } + goto error; + } + + return 0; + +error: + return -1; +} /* end test_selection_io_read_verify() */ + +/*------------------------------------------------------------------------- + * Function: test_selection_io + * + * Purpose: Test I/O using the selection I/O VFD public VFD calls. + * + * Tests various combinations of 1D, 2D, contiguous, and + * strided selections with different file data types and + * with and without shortened arrays. + * + * Return: Success: 0 + * Failure: -1 + * + * Programmer: Neil Fortner + * 7/1/21 + * + * Changes: None. + * + *------------------------------------------------------------------------- + */ +static herr_t +test_selection_io(const char *vfd_name) +{ + char test_title[80]; + hid_t fapl_id = -1; /* file access property list ID */ + char filename[1024]; /* filename */ + unsigned flags = 0; /* file open flags */ + H5FD_t * lf; /* VFD struct ptr */ + int i; /* index */ + int j; /* index */ + int i2; /* index */ + int j2; /* index */ + hid_t mem_spaces[2] = {H5I_INVALID_HID, H5I_INVALID_HID}; /* memory dataspaces vector */ + hid_t file_spaces[2] = {H5I_INVALID_HID, H5I_INVALID_HID}; /* file dataspaces vector */ + hsize_t dims1[1] = {SEL_IO_DIM0 * SEL_IO_DIM1}; /* 1D dataspace dimensions */ + hsize_t dims2[2] = {SEL_IO_DIM0, SEL_IO_DIM1}; /* 1D dataspace dimensions */ + hsize_t start[2]; /* start for hyperslab */ + hsize_t stride[2]; /* stride for hyperslab */ + hsize_t count[2]; /* count for hyperslab */ + hsize_t block[2]; /* block for hyperslab */ + H5FD_mem_t type; /* file data type */ + haddr_t addrs[2]; /* addresses vector */ + size_t element_sizes[2] = {sizeof(int), sizeof(int)}; /* element sizes vector */ + int wbuf1[SEL_IO_DIM0 * SEL_IO_DIM1]; /* 1D write buffer */ + int wbuf2[SEL_IO_DIM0][SEL_IO_DIM1]; /* 2D write buffer */ + int * wbufs[2] = {wbuf1, wbuf2[0]}; /* Array of write buffers */ + int fbuf1[SEL_IO_DIM0 * SEL_IO_DIM1]; /* 1D file buffer */ + int fbuf2[SEL_IO_DIM0][SEL_IO_DIM1]; /* 2D file buffer */ + int * fbufs[2] = {fbuf1, fbuf2[0]}; /* Array of file buffers */ + int erbuf1[SEL_IO_DIM0 * SEL_IO_DIM1]; /* 1D expected read buffer */ + int erbuf2[SEL_IO_DIM0][SEL_IO_DIM1]; /* 2D expected read buffer */ + int * erbufs[2] = {erbuf1, erbuf2[0]}; /* Array of expected read buffers */ + int shorten_element_sizes; /* Whether to shorten the element sizes array */ + + HDsnprintf(test_title, sizeof(test_title), "selection I/O with %s VFD", vfd_name); + + TESTING(test_title); + + /* Set property list and file name for target driver */ + + if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) + TEST_ERROR + + if (HDstrcmp(vfd_name, "sec2") == 0) { + + if (H5Pset_fapl_sec2(fapl_id) < 0) + TEST_ERROR + + h5_fixname(FILENAME[0], fapl_id, filename, sizeof(filename)); + } + else if (HDstrcmp(vfd_name, "stdio") == 0) { + + if (H5Pset_fapl_stdio(fapl_id) < 0) + TEST_ERROR + + h5_fixname(FILENAME[7], fapl_id, filename, sizeof filename); + } + else { + + HDfprintf(stdout, "un-supported VFD\n"); + TEST_ERROR + } + + /* Initialize write buffers */ + for (i = 0; i < SEL_IO_DIM0; i++) + for (j = 0; j < SEL_IO_DIM1; j++) { + wbuf1[(i * SEL_IO_DIM1) + j] = (i * SEL_IO_DIM1) + j; + wbuf2[i][j] = (i * SEL_IO_DIM1) + j + (SEL_IO_DIM0 * SEL_IO_DIM1); + } + + /* Create dataspaces - location 0 will be 1D and location 1 will be 2D */ + if ((mem_spaces[0] = H5Screate_simple(1, dims1, NULL)) < 0) + TEST_ERROR + if ((mem_spaces[1] = H5Screate_simple(2, dims2, NULL)) < 0) + TEST_ERROR + if ((file_spaces[0] = H5Screate_simple(1, dims1, NULL)) < 0) + TEST_ERROR + if ((file_spaces[1] = H5Screate_simple(2, dims2, NULL)) < 0) + TEST_ERROR + + /* Create file */ + flags = H5F_ACC_RDWR | H5F_ACC_CREAT | H5F_ACC_TRUNC; + + if (NULL == (lf = H5FDopen(filename, flags, fapl_id, HADDR_UNDEF))) + TEST_ERROR; + + /* Loop over memory types */ + for (type = 1; type < H5FD_MEM_NTYPES; type++) { + /* Allocate space for I/O */ + addrs[0] = H5FDalloc(lf, type, H5P_DEFAULT, (hsize_t)(sizeof(int) * SEL_IO_DIM0 * SEL_IO_DIM1)); + addrs[1] = H5FDalloc(lf, type, H5P_DEFAULT, (hsize_t)(sizeof(int) * SEL_IO_DIM0 * SEL_IO_DIM1)); + + /* + * Test 1: Simple 1D contiguous I/O + */ + /* Issue write call */ + if (test_selection_io_write(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], element_sizes, + (int **)&wbufs[0]) < 0) + TEST_ERROR + + /* Update file buf */ + for (i = 0; i < SEL_IO_DIM0 * SEL_IO_DIM1; i++) + fbuf1[i] = wbuf1[i]; + + /* Read and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], + element_sizes, 1, (int **)&fbufs[0], FALSE) < 0) + TEST_ERROR + + /* + * Test 2: Simple 2D contiguous I/O + */ + /* Issue write call */ + if (test_selection_io_write(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], element_sizes, + (int **)&wbufs[1]) < 0) + TEST_ERROR + + /* Update file buf */ + for (i = 0; i < SEL_IO_DIM0; i++) + for (j = 0; j < SEL_IO_DIM1; j++) + fbuf2[i][j] = wbuf2[i][j]; + + /* Read and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], + element_sizes, 1, (int **)&fbufs[1], FALSE) < 0) + TEST_ERROR + + /* + * Test 3: Strided <> Contiguous 1D I/O + */ + /* SEL_IO_DIM1 must be even */ + HDassert(SEL_IO_DIM1 / 2 == (SEL_IO_DIM1 + 1) / 2); + + /* Strided selection in memory */ + start[0] = 1; + stride[0] = 2; + count[0] = (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; + block[0] = 1; + if (H5Sselect_hyperslab(mem_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Contiguous selection in file */ + if (H5Sselect_hyperslab(file_spaces[0], H5S_SELECT_SET, start, NULL, count, NULL) < 0) + TEST_ERROR + + /* Issue write call */ + if (test_selection_io_write(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], element_sizes, + (int **)&wbufs[0]) < 0) + TEST_ERROR + + /* Update file buf */ + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++) + fbuf1[i + 1] = wbuf1[(2 * i) + 1]; + + /* Update expected read buf */ + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i++) + erbuf1[i] = -1; + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++) + erbuf1[(2 * i) + 1] = wbuf1[(2 * i) + 1]; + + /* Read and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], + element_sizes, 1, (int **)&erbufs[0], FALSE) < 0) + TEST_ERROR + + /* Reset selections */ + if (H5Sselect_all(mem_spaces[0]) < 0) + TEST_ERROR + if (H5Sselect_all(file_spaces[0]) < 0) + TEST_ERROR + + /* Read entire file buffer and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], + element_sizes, 1, (int **)&fbufs[0], FALSE) < 0) + TEST_ERROR + + /* + * Test 4: Contiguous <> Strided 1D I/O + */ + /* SEL_IO_DIM1 must be even */ + HDassert(SEL_IO_DIM1 / 2 == (SEL_IO_DIM1 + 1) / 2); + + /* Contiguous selection in memory */ + start[0] = 1; + stride[0] = 2; + if (H5Sselect_hyperslab(mem_spaces[0], H5S_SELECT_SET, start, NULL, count, NULL) < 0) + TEST_ERROR + + /* Strided selection in file */ + count[0] = (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; + block[0] = 1; + if (H5Sselect_hyperslab(file_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Issue write call */ + if (test_selection_io_write(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], element_sizes, + (int **)&wbufs[0]) < 0) + TEST_ERROR + + /* Update file buf */ + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++) + fbuf1[(2 * i) + 1] = wbuf1[i + 1]; + + /* Update expected read buf */ + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i++) + erbuf1[i] = -1; + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++) + erbuf1[i + 1] = wbuf1[i + 1]; + + /* Read and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], + element_sizes, 1, (int **)&erbufs[0], FALSE) < 0) + TEST_ERROR + + /* Reset selections */ + if (H5Sselect_all(mem_spaces[0]) < 0) + TEST_ERROR + if (H5Sselect_all(file_spaces[0]) < 0) + TEST_ERROR + + /* Read entire file buffer and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], + element_sizes, 1, (int **)&fbufs[0], FALSE) < 0) + TEST_ERROR + + /* + * Test 5: Strided <> Strided 1D I/O + */ + /* SEL_IO_DIM1 must be even */ + HDassert(SEL_IO_DIM1 / 2 == (SEL_IO_DIM1 + 1) / 2); + + /* Strided selection in memory */ + start[0] = 1; + stride[0] = 2; + count[0] = (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; + block[0] = 1; + if (H5Sselect_hyperslab(mem_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Strided selection in file */ + start[0] = 0; + if (H5Sselect_hyperslab(file_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Issue write call */ + if (test_selection_io_write(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], element_sizes, + (int **)&wbufs[0]) < 0) + TEST_ERROR + + /* Update file buf */ + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++) + fbuf1[2 * i] = wbuf1[(2 * i) + 1]; + + /* Update expected read buf */ + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i++) + erbuf1[i] = -1; + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++) + erbuf1[(2 * i) + 1] = wbuf1[(2 * i) + 1]; + + /* Read and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], + element_sizes, 1, (int **)&erbufs[0], FALSE) < 0) + TEST_ERROR + + /* Reset selections */ + if (H5Sselect_all(mem_spaces[0]) < 0) + TEST_ERROR + if (H5Sselect_all(file_spaces[0]) < 0) + TEST_ERROR + + /* Read entire file buffer and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[0], &addrs[0], + element_sizes, 1, (int **)&fbufs[0], FALSE) < 0) + TEST_ERROR + + /* + * Test 6: Strided <> Contiguous 2D I/O + */ + /* Strided selection in memory */ + start[0] = 1; + start[1] = 0; + stride[0] = 2; + stride[1] = 1; + count[0] = SEL_IO_DIM0 / 2; + count[1] = SEL_IO_DIM1; + block[0] = 1; + block[1] = 1; + if (H5Sselect_hyperslab(mem_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Contiguous selection in file */ + if (H5Sselect_hyperslab(file_spaces[1], H5S_SELECT_SET, start, NULL, count, NULL) < 0) + TEST_ERROR + + /* Issue write call */ + if (test_selection_io_write(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], element_sizes, + (int **)&wbufs[1]) < 0) + TEST_ERROR + + /* Update file buf */ + for (i = 0; i < SEL_IO_DIM0 / 2; i++) + for (j = 0; j < SEL_IO_DIM1; j++) + fbuf2[i + 1][j] = wbuf2[(2 * i) + 1][j]; + + /* Update expected read buf */ + for (i = 0; i < SEL_IO_DIM0; i++) + for (j = 0; j < SEL_IO_DIM1; j++) + erbuf2[i][j] = -1; + for (i = 0; i < SEL_IO_DIM0 / 2; i++) + for (j = 0; j < SEL_IO_DIM1; j++) + erbuf2[(2 * i) + 1][j] = wbuf2[(2 * i) + 1][j]; + + /* Read and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], + element_sizes, 1, (int **)&erbufs[1], FALSE) < 0) + TEST_ERROR + + /* Reset selections */ + if (H5Sselect_all(mem_spaces[1]) < 0) + TEST_ERROR + if (H5Sselect_all(file_spaces[1]) < 0) + TEST_ERROR + + /* Read entire file buffer and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], + element_sizes, 1, (int **)&fbufs[1], FALSE) < 0) + TEST_ERROR + + /* + * Test 7: Contiguous <> Strided 2D I/O + */ + /* Contiguous selection in memory */ + start[0] = 0; + start[1] = 1; + count[0] = SEL_IO_DIM0; + count[1] = SEL_IO_DIM1 / 2; + if (H5Sselect_hyperslab(mem_spaces[1], H5S_SELECT_SET, start, NULL, count, NULL) < 0) + TEST_ERROR + + /* Strided selection in file */ + stride[0] = 1; + stride[1] = 2; + block[0] = 1; + block[1] = 1; + if (H5Sselect_hyperslab(file_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Issue write call */ + if (test_selection_io_write(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], element_sizes, + (int **)&wbufs[1]) < 0) + TEST_ERROR + + /* Update file buf */ + for (i = 0; i < SEL_IO_DIM0; i++) + for (j = 0; j < SEL_IO_DIM1 / 2; j++) + fbuf2[i][(2 * j) + 1] = wbuf2[i][j + 1]; + + /* Update expected read buf */ + for (i = 0; i < SEL_IO_DIM0; i++) + for (j = 0; j < SEL_IO_DIM1; j++) + erbuf2[i][j] = -1; + for (i = 0; i < SEL_IO_DIM0; i++) + for (j = 0; j < SEL_IO_DIM1 / 2; j++) + erbuf2[i][j + 1] = wbuf2[i][j + 1]; + + /* Read and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], + element_sizes, 1, (int **)&erbufs[1], FALSE) < 0) + TEST_ERROR + + /* Reset selections */ + if (H5Sselect_all(mem_spaces[1]) < 0) + TEST_ERROR + if (H5Sselect_all(file_spaces[1]) < 0) + TEST_ERROR + + /* Read entire file buffer and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], + element_sizes, 1, (int **)&fbufs[1], FALSE) < 0) + TEST_ERROR + + /* + * Test 8: Strided <> Strided 2D I/O + */ + /* SEL_IO_DIM0 and SEL_IO_DIM1 must be even */ + HDassert(SEL_IO_DIM0 / 2 == (SEL_IO_DIM0 + 1) / 2); + HDassert(SEL_IO_DIM1 / 2 == (SEL_IO_DIM1 + 1) / 2); + + /* Strided selection (across dim 1) in memory */ + start[0] = 0; + start[1] = 1; + stride[0] = 1; + stride[1] = 2; + count[0] = SEL_IO_DIM0; + count[1] = SEL_IO_DIM1 / 2; + block[0] = 1; + block[1] = 1; + if (H5Sselect_hyperslab(mem_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Strided selection (across dim 0) in file */ + start[0] = 1; + start[1] = 0; + stride[0] = 2; + stride[1] = 1; + count[0] = SEL_IO_DIM0 / 2; + count[1] = SEL_IO_DIM1; + block[0] = 1; + block[1] = 1; + if (H5Sselect_hyperslab(file_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Issue write call */ + if (test_selection_io_write(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], element_sizes, + (int **)&wbufs[1]) < 0) + TEST_ERROR + + /* Update file buf */ + for (i = 0, i2 = 1, j2 = 0; i < SEL_IO_DIM0; i++) + for (j = 1; j < SEL_IO_DIM1; j += 2) { + fbuf2[i2][j2] = wbuf2[i][j]; + if (++j2 == SEL_IO_DIM1) { + i2 += 2; + j2 = 0; + } + } + + /* Update expected read buf */ + for (i = 0; i < SEL_IO_DIM0; i++) + for (j = 0; j < SEL_IO_DIM1; j++) + erbuf2[i][j] = -1; + for (i = 0; i < SEL_IO_DIM0; i++) + for (j = 1; j < SEL_IO_DIM1; j += 2) + erbuf2[i][j] = wbuf2[i][j]; + + /* Read and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], + element_sizes, 1, (int **)&erbufs[1], FALSE) < 0) + TEST_ERROR + + /* Reset selections */ + if (H5Sselect_all(mem_spaces[1]) < 0) + TEST_ERROR + if (H5Sselect_all(file_spaces[1]) < 0) + TEST_ERROR + + /* Read entire file buffer and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[1], &addrs[1], + element_sizes, 1, (int **)&fbufs[1], FALSE) < 0) + TEST_ERROR + + /* + * Test 9: Strided 1D <> Strided 2D I/O + */ + /* Strided selection in memory */ + start[0] = 1; + stride[0] = 2; + count[0] = (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; + block[0] = 1; + if (H5Sselect_hyperslab(mem_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Strided selection (across dim 1) in file */ + start[0] = 0; + start[1] = 1; + stride[0] = 1; + stride[1] = 2; + count[0] = SEL_IO_DIM0; + count[1] = SEL_IO_DIM1 / 2; + block[0] = 1; + block[1] = 1; + if (H5Sselect_hyperslab(file_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Issue write call */ + if (test_selection_io_write(lf, type, 1, &mem_spaces[0], &file_spaces[1], &addrs[1], element_sizes, + (int **)&wbufs[0]) < 0) + TEST_ERROR + + /* Update file buf */ + for (i = 1, i2 = 0, j2 = 1; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i += 2) { + fbuf2[i2][j2] = wbuf1[i]; + j2 += 2; + if (j2 == SEL_IO_DIM1) { + i2++; + j2 = 1; + } + } + + /* Update expected read buf */ + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i++) + erbuf1[i] = -1; + for (i = 1; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i += 2) + erbuf1[i] = wbuf1[i]; + + /* Read and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[1], &addrs[1], + element_sizes, 1, (int **)&erbufs[0], FALSE) < 0) + TEST_ERROR + + /* Reset selections */ + if (H5Sselect_all(mem_spaces[0]) < 0) + TEST_ERROR + if (H5Sselect_all(file_spaces[1]) < 0) + TEST_ERROR + + /* Read entire file buffer and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[0], &file_spaces[1], &addrs[1], + element_sizes, 1, (int **)&fbufs[1], FALSE) < 0) + TEST_ERROR + + /* + * Test 10: Strided 2D <> Strided 1D I/O + */ + /* Strided selection (across dim 0) in memory */ + start[0] = 0; + start[1] = 0; + stride[0] = 2; + stride[1] = 1; + count[0] = SEL_IO_DIM0 / 2; + count[1] = SEL_IO_DIM1; + block[0] = 1; + block[1] = 1; + if (H5Sselect_hyperslab(mem_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Strided selection in file */ + start[0] = 0; + stride[0] = 2; + count[0] = (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; + block[0] = 1; + if (H5Sselect_hyperslab(file_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Issue write call */ + if (test_selection_io_write(lf, type, 1, &mem_spaces[1], &file_spaces[0], &addrs[0], element_sizes, + (int **)&wbufs[1]) < 0) + TEST_ERROR + + /* Update file buf */ + for (i = 0, i2 = 0; i < SEL_IO_DIM0; i += 2) + for (j = 0; j < SEL_IO_DIM1; j++) { + fbuf1[i2] = wbuf2[i][j]; + i2 += 2; + } + + /* Update expected read buf */ + for (i = 0; i < SEL_IO_DIM0; i++) + for (j = 0; j < SEL_IO_DIM1; j++) + erbuf2[i][j] = -1; + for (i = 0; i < SEL_IO_DIM0; i += 2) + for (j = 0; j < SEL_IO_DIM1; j++) + erbuf2[i][j] = wbuf2[i][j]; + + /* Read and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[0], &addrs[0], + element_sizes, 1, (int **)&erbufs[1], FALSE) < 0) + TEST_ERROR + + /* Reset selections */ + if (H5Sselect_all(mem_spaces[1]) < 0) + TEST_ERROR + if (H5Sselect_all(file_spaces[0]) < 0) + TEST_ERROR + + /* Read entire file buffer and verify */ + if (test_selection_io_read_verify(lf, type, 1, &mem_spaces[1], &file_spaces[0], &addrs[0], + element_sizes, 1, (int **)&fbufs[0], FALSE) < 0) + TEST_ERROR + + /* Run tests with full and partial element sizes array */ + for (shorten_element_sizes = 0; shorten_element_sizes <= 1; shorten_element_sizes++) { + /* + * Test 11: Strided <> Strided 1D and 2D I/O + */ + /* SEL_IO_DIM1 must be even */ + HDassert(SEL_IO_DIM1 / 2 == (SEL_IO_DIM1 + 1) / 2); + + /* Strided selection in memory (1D) */ + start[0] = 0; + stride[0] = 2; + count[0] = (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; + block[0] = 1; + if (H5Sselect_hyperslab(mem_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Strided selection in file (1D) */ + start[0] = 1; + if (H5Sselect_hyperslab(file_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Strided selection (across dim 0) in memory (2D) */ + start[0] = 1; + start[1] = 0; + stride[0] = 2; + stride[1] = 1; + count[0] = SEL_IO_DIM0 / 2; + count[1] = SEL_IO_DIM1; + block[0] = 1; + block[1] = 1; + if (H5Sselect_hyperslab(mem_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Strided selection (across dim 1) in file (2D) */ + start[0] = 0; + start[1] = 1; + stride[0] = 1; + stride[1] = 2; + count[0] = SEL_IO_DIM0; + count[1] = SEL_IO_DIM1 / 2; + block[0] = 1; + block[1] = 1; + if (H5Sselect_hyperslab(file_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Issue write call */ + if (test_selection_io_write(lf, type, 2, mem_spaces, file_spaces, addrs, element_sizes, + (int **)wbufs) < 0) + TEST_ERROR + + /* Update file bufs */ + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++) + fbuf1[(2 * i) + 1] = wbuf1[2 * i]; + for (i = 1, i2 = 0, j2 = 1; i < SEL_IO_DIM0; i += 2) + for (j = 0; j < SEL_IO_DIM1; j++) { + fbuf2[i2][j2] = wbuf2[i][j]; + j2 += 2; + if (j2 >= SEL_IO_DIM1) { + i2++; + j2 = 1; + } + } + + /* Update expected read bufs */ + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1); i++) + erbuf1[i] = -1; + for (i = 0; i < (SEL_IO_DIM0 * SEL_IO_DIM1) / 2; i++) + erbuf1[2 * i] = wbuf1[2 * i]; + for (i = 0; i < SEL_IO_DIM0; i++) + for (j = 0; j < SEL_IO_DIM1; j++) + erbuf2[i][j] = -1; + for (i = 1; i < SEL_IO_DIM0; i += 2) + for (j = 0; j < SEL_IO_DIM1; j++) + erbuf2[i][j] = wbuf2[i][j]; + + /* Read and verify */ + if (test_selection_io_read_verify(lf, type, 2, mem_spaces, file_spaces, addrs, element_sizes, 2, + (int **)erbufs, FALSE) < 0) + TEST_ERROR + + /* Reset selections */ + if (H5Sselect_all(mem_spaces[0]) < 0) + TEST_ERROR + if (H5Sselect_all(file_spaces[0]) < 0) + TEST_ERROR + if (H5Sselect_all(mem_spaces[1]) < 0) + TEST_ERROR + if (H5Sselect_all(file_spaces[1]) < 0) + TEST_ERROR + + /* Read entire file buffer and verify */ + if (test_selection_io_read_verify(lf, type, 2, mem_spaces, file_spaces, addrs, element_sizes, 2, + (int **)fbufs, FALSE) < 0) + TEST_ERROR + + /* + * Test 12: Strided <> Strided 2D I/O, 2 different selections in the same memory buffer + */ + /* Switch mem and file spaces to both be 2D */ + if (H5Sset_extent_simple(mem_spaces[0], 2, dims2, NULL) < 0) + TEST_ERROR + if (H5Sset_extent_simple(file_spaces[0], 2, dims2, NULL) < 0) + TEST_ERROR + + /* Strided selection in memory (1st) */ + start[0] = 0; + start[1] = 0; + stride[0] = 2; + stride[1] = 1; + count[0] = SEL_IO_DIM0 / 2; + count[1] = SEL_IO_DIM1; + block[0] = 1; + block[1] = 1; + if (H5Sselect_hyperslab(mem_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Strided selection (across dim 0) in memory (2nd) */ + start[0] = 1; + if (H5Sselect_hyperslab(mem_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Strided selection in file (1st) */ + start[0] = 0; + start[1] = 0; + stride[0] = 1; + stride[1] = 2; + count[0] = SEL_IO_DIM0; + count[1] = SEL_IO_DIM1 / 2; + block[0] = 1; + block[1] = 1; + if (H5Sselect_hyperslab(file_spaces[0], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Strided selection (across dim 1) in file (2nd) */ + start[0] = 0; + start[1] = 1; + stride[0] = 1; + stride[1] = 2; + count[0] = SEL_IO_DIM0; + count[1] = SEL_IO_DIM1 / 2; + block[0] = 1; + block[1] = 1; + if (H5Sselect_hyperslab(file_spaces[1], H5S_SELECT_SET, start, stride, count, block) < 0) + TEST_ERROR + + /* Use the same memory buffer for both selections */ + wbufs[0] = wbuf2[0]; + + /* Shorten wbuf array */ + if (shorten_element_sizes) + wbufs[1] = NULL; + else + wbufs[1] = wbufs[0]; + + /* Issue write call */ + if (test_selection_io_write(lf, type, 2, mem_spaces, file_spaces, addrs, element_sizes, + (int **)wbufs) < 0) + TEST_ERROR + + /* Update file bufs - need to reuse 1D array so data stays consistent, so use math to + * find 1D index into 2D array */ + for (i = 0, i2 = 0, j2 = 0; i < SEL_IO_DIM0; i += 2) + for (j = 0; j < SEL_IO_DIM1; j++) { + fbuf1[(i2 * SEL_IO_DIM1) + j2] = wbuf2[i][j]; + j2 += 2; + if (j2 >= SEL_IO_DIM1) { + i2++; + j2 = 0; + } + } + for (i = 1, i2 = 0, j2 = 1; i < SEL_IO_DIM0; i += 2) + for (j = 0; j < SEL_IO_DIM1; j++) { + fbuf2[i2][j2] = wbuf2[i][j]; + j2 += 2; + if (j2 >= SEL_IO_DIM1) { + i2++; + j2 = 1; + } + } + + /* Update expected read buf */ + for (i = 0; i < SEL_IO_DIM0; i++) + for (j = 0; j < SEL_IO_DIM1; j++) + erbuf2[i][j] = -1; + for (i = 0; i < SEL_IO_DIM0; i += 2) + for (j = 0; j < SEL_IO_DIM1; j++) + erbuf2[i][j] = wbuf2[i][j]; + for (i = 1; i < SEL_IO_DIM0; i += 2) + for (j = 0; j < SEL_IO_DIM1; j++) + erbuf2[i][j] = wbuf2[i][j]; + + /* Read and verify */ + if (test_selection_io_read_verify(lf, type, 2, mem_spaces, file_spaces, addrs, element_sizes, 1, + (int **)&erbufs[1], shorten_element_sizes ? TRUE : FALSE) < 0) + TEST_ERROR + + /* Reset selections */ + if (H5Sselect_all(mem_spaces[0]) < 0) + TEST_ERROR + if (H5Sselect_all(file_spaces[0]) < 0) + TEST_ERROR + if (H5Sselect_all(mem_spaces[1]) < 0) + TEST_ERROR + if (H5Sselect_all(file_spaces[1]) < 0) + TEST_ERROR + + /* Read entire file buffer and verify */ + if (test_selection_io_read_verify(lf, type, 2, mem_spaces, file_spaces, addrs, element_sizes, 2, + (int **)fbufs, FALSE) < 0) + TEST_ERROR + + /* Reset first spaces to 1D */ + if (H5Sset_extent_simple(mem_spaces[0], 1, dims1, NULL) < 0) + TEST_ERROR + if (H5Sset_extent_simple(file_spaces[0], 1, dims1, NULL) < 0) + TEST_ERROR + + /* Reset write buffer array */ + wbufs[0] = wbuf1; + wbufs[1] = wbuf2[0]; + + /* Change to shortened element sizes array */ + element_sizes[1] = 0; + } + + /* Reset element sizes array */ + element_sizes[1] = element_sizes[0]; + } + + /* + * Cleanup + */ + /* Close file */ + if (H5FDclose(lf) < 0) + TEST_ERROR; + + h5_delete_test_file(FILENAME[0], fapl_id); + + /* Close the fapl */ + if (H5Pclose(fapl_id) < 0) + TEST_ERROR; + + /* Close dataspaces */ + for (i = 0; i < 2; i++) { + if (H5Sclose(mem_spaces[i]) < 0) + TEST_ERROR + if (H5Sclose(file_spaces[i]) < 0) + TEST_ERROR + } + + PASSED(); + return 0; + +error: + H5E_BEGIN_TRY + { + H5Pclose(fapl_id); + H5FDclose(lf); + for (i = 0; i < 2; i++) { + H5Sclose(mem_spaces[i]); + H5Sclose(file_spaces[i]); + } + } + H5E_END_TRY; + return -1; +} /* end test_selection_io() */ + /*------------------------------------------------------------------------- * Function: main * @@ -3943,6 +5967,8 @@ main(void) HDprintf("Testing basic Virtual File Driver functionality.\n"); + setup_rand(); + nerrors += test_sec2() < 0 ? 1 : 0; nerrors += test_core() < 0 ? 1 : 0; nerrors += test_direct() < 0 ? 1 : 0; @@ -3956,6 +5982,10 @@ main(void) nerrors += test_windows() < 0 ? 1 : 0; nerrors += test_ros3() < 0 ? 1 : 0; nerrors += test_splitter() < 0 ? 1 : 0; + nerrors += test_vector_io("sec2") < 0 ? 1 : 0; + nerrors += test_vector_io("stdio") < 0 ? 1 : 0; + nerrors += test_selection_io("sec2") < 0 ? 1 : 0; + nerrors += test_selection_io("stdio") < 0 ? 1 : 0; nerrors += test_ctl() < 0 ? 1 : 0; if (nerrors) { diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt index ff4446ce97..32f4a0f46e 100644 --- a/testpar/CMakeLists.txt +++ b/testpar/CMakeLists.txt @@ -89,6 +89,7 @@ set (H5P_TESTS t_shapesame t_filters_parallel t_2Gio + t_vfd ) foreach (h5_testp ${H5P_TESTS}) diff --git a/testpar/Makefile.am b/testpar/Makefile.am index 6a8cc2b2ec..cbde0c1e68 100644 --- a/testpar/Makefile.am +++ b/testpar/Makefile.am @@ -30,7 +30,7 @@ check_SCRIPTS = $(TEST_SCRIPT_PARA) # Test programs. These are our main targets. # -TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel t_2Gio +TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel t_2Gio t_vfd # t_pflush1 and t_pflush2 are used by testpflush.sh check_PROGRAMS = $(TEST_PROG_PARA) t_pflush1 t_pflush2 diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c index 104460a85e..20efaa10fb 100644 --- a/testpar/t_coll_chunk.c +++ b/testpar/t_coll_chunk.c @@ -832,7 +832,10 @@ coll_chunktest(const char *filename, int chunk_factor, int select_factor, int ap VRFY((status >= 0), "dataset write succeeded"); #ifdef H5_HAVE_INSTRUMENTED_LIBRARY - if (facc_type == FACC_MPIO) { + /* Only check chunk optimization mode if selection I/O is not being used - + * selection I/O bypasses this IO mode decision - it's effectively always + * multi chunk currently */ + if (facc_type == FACC_MPIO && !H5_use_selection_io_g) { switch (api_option) { case API_LINK_HARD: status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value); diff --git a/testpar/t_dset.c b/testpar/t_dset.c index 8616beface..51e72bda08 100644 --- a/testpar/t_dset.c +++ b/testpar/t_dset.c @@ -3351,32 +3351,38 @@ actual_io_mode_tests(void) int mpi_size = -1; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); - test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE); + /* Only run these tests if selection I/O is not being used - selection I/O + * bypasses this IO mode decision - it's effectively always multi chunk + * currently */ + if (!H5_use_selection_io_g) { + test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE); - /* - * Test multi-chunk-io via proc_num threshold - */ - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND); - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL); + /* + * Test multi-chunk-io via proc_num threshold + */ + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND); + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL); - /* The Multi Chunk Mixed test requires at least three processes. */ - if (mpi_size > 2) - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX); - else - HDfprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n"); + /* The Multi Chunk Mixed test requires at least three processes. */ + if (mpi_size > 2) + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX); + else + HDfprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n"); - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE); + test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE); - /* - * Test multi-chunk-io via setting direct property - */ - test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND); - test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); + /* + * Test multi-chunk-io via setting direct property + */ + test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND); + test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); - test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK); - test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS); + test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK); + test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS); + + test_actual_io_mode(TEST_ACTUAL_IO_RESET); + } - test_actual_io_mode(TEST_ACTUAL_IO_RESET); return; } diff --git a/testpar/t_vfd.c b/testpar/t_vfd.c new file mode 100644 index 0000000000..2072afe8d7 --- /dev/null +++ b/testpar/t_vfd.c @@ -0,0 +1,4055 @@ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Copyright by The HDF Group. * + * All rights reserved. * + * * + * This file is part of HDF5. The full HDF5 copyright notice, including * + * terms governing use, modification, and redistribution, is contained in * + * the COPYING file, which can be found at the root of the source code * + * distribution tree, or in https://www.hdfgroup.org/licenses. * + * If you do not have access to either file, you may request a copy from * + * help@hdfgroup.org. * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* Programmer: John Mainzer + * + * This file is a catchall for parallel VFD tests. + */ + +#include "testphdf5.h" + +/* Must be a power of 2. Reducing it below 1024 may cause problems */ +#define INTS_PER_RANK 1024 + +/* global variable declarations: */ + +hbool_t pass = TRUE; /* set to FALSE on error */ +const char *failure_mssg = NULL; + +const char *FILENAMES[] = {"mpio_vfd_test_file_0", /*0*/ + "mpio_vfd_test_file_1", /*1*/ + "mpio_vfd_test_file_2", /*2*/ + "mpio_vfd_test_file_3", /*3*/ + "mpio_vfd_test_file_4", /*4*/ + "mpio_vfd_test_file_5", /*5*/ + NULL}; + +/* File Test Images + * + * Pointers to dynamically allocated buffers of size + * INTS_PER_RANK * sizeof(int32_t) * mpi_size(). These + * buffers are used to put the test file in a known + * state, and to test if the test file contains the + * expected data. + */ + +int32_t *increasing_fi_buf = NULL; +int32_t *decreasing_fi_buf = NULL; +int32_t *negative_fi_buf = NULL; +int32_t *zero_fi_buf = NULL; +int32_t *read_fi_buf = NULL; + +/* local utility function declarations */ + +static unsigned alloc_and_init_file_images(int mpi_size); +static void free_file_images(void); +static void setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name, haddr_t eoa, + H5FD_t **lf_ptr, hid_t *fapl_id_ptr, hid_t *dxpl_id_ptr); +static void takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fapl_id_ptr, + hid_t *dxpl_id_ptr); + +/* test functions */ +static unsigned vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); + +static unsigned vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); +static unsigned vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name); + +/****************************************************************************/ +/***************************** Utility Functions ****************************/ +/****************************************************************************/ + +/*------------------------------------------------------------------------- + * Function: alloc_and_init_file_images + * + * Purpose: Allocate and initialize the global buffers used to construct, + * load and verify test file contents. + * + * Return: void + * + * Programmer: John Mainzer + * 3/25/26 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +alloc_and_init_file_images(int mpi_size) +{ + const char *fcn_name = "alloc_and_init_file_images()"; + int cp = 0; + int buf_len; + size_t buf_size; + int i; + hbool_t show_progress = FALSE; + + pass = TRUE; + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* allocate the file image buffers */ + if (pass) { + + buf_len = INTS_PER_RANK * mpi_size; + buf_size = sizeof(int32_t) * (size_t)INTS_PER_RANK * (size_t)mpi_size; + + increasing_fi_buf = (int32_t *)HDmalloc(buf_size); + decreasing_fi_buf = (int32_t *)HDmalloc(buf_size); + negative_fi_buf = (int32_t *)HDmalloc(buf_size); + zero_fi_buf = (int32_t *)HDmalloc(buf_size); + read_fi_buf = (int32_t *)HDmalloc(buf_size); + + if ((!increasing_fi_buf) || (!decreasing_fi_buf) || (!negative_fi_buf) || (!zero_fi_buf) || + (!read_fi_buf)) { + + pass = FALSE; + failure_mssg = "Can't allocate one or more file image buffers."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* initialize the file image buffers */ + if (pass) { + + for (i = 0; i < buf_len; i++) { + + increasing_fi_buf[i] = i; + decreasing_fi_buf[i] = buf_len - i; + negative_fi_buf[i] = -i; + zero_fi_buf[i] = 0; + read_fi_buf[i] = 0; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* discard file image buffers if there was an error */ + if (!pass) { + + free_file_images(); + } + + return !pass; + +} /* alloc_and_init_file_images() */ + +/*------------------------------------------------------------------------- + * Function: free_file_images + * + * Purpose: Deallocate any glogal file image buffers that exist, and + * set their associated pointers to NULL. + * + * Return: void + * + * Programmer: John Mainzer + * 1/25/17 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +free_file_images(void) +{ + if (increasing_fi_buf) { + + HDfree(increasing_fi_buf); + increasing_fi_buf = NULL; + } + + if (decreasing_fi_buf) { + + HDfree(decreasing_fi_buf); + decreasing_fi_buf = NULL; + } + + if (negative_fi_buf) { + + HDfree(negative_fi_buf); + negative_fi_buf = NULL; + } + + if (zero_fi_buf) { + + HDfree(zero_fi_buf); + zero_fi_buf = NULL; + } + + if (read_fi_buf) { + + HDfree(read_fi_buf); + read_fi_buf = NULL; + } + + return; + +} /* free_file_images() */ + +/*------------------------------------------------------------------------- + * Function: setup_vfd_test_file + * + * Purpose: Create / open the specified test file with the specified + * VFD, and set the EOA to the specified value. + * + * Setup the dxpl for subsequent I/O via the target VFD. + * + * Return a pointer to the instance of H5FD_t created on + * file open in *lf_ptr, and the FAPL and DXPL ids in + * *fapl_id_ptr and *dxpl_id_ptr. Similarly, copy the + * "fixed" file name into file_name on exit. + * + * Return: void + * + * Programmer: John Mainzer + * 3/25/26 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +setup_vfd_test_file(int file_name_id, char *file_name, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name, haddr_t eoa, + H5FD_t **lf_ptr, hid_t *fapl_id_ptr, hid_t *dxpl_id_ptr) +{ + const char *fcn_name = "setup_vfd_test_file()"; + char filename[512]; + int cp = 0; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + unsigned flags = 0; /* file open flags */ + H5FD_t * lf = NULL; /* VFD struct ptr */ + + HDassert(vfd_name); + HDassert(lf_ptr); + HDassert(fapl_id_ptr); + HDassert(dxpl_id_ptr); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setupf fapl for target VFD */ + if (pass) { + + if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) { + + pass = FALSE; + failure_mssg = "Can't create fapl."; + } + } + + if (pass) { + + if (strcmp(vfd_name, "mpio") == 0) { + + if (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL) < 0) { + + pass = FALSE; + failure_mssg = "Can't set mpio fapl."; + } + } + else { + + pass = FALSE; + failure_mssg = "un-supported VFD"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* setup the file name */ + if (pass) { + + if (h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT, filename, sizeof(filename)) == NULL) { + + pass = FALSE; + failure_mssg = "h5_fixname() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Open the VFD test file with the specified VFD. */ + + if (pass) { + + flags = H5F_ACC_RDWR | H5F_ACC_CREAT | H5F_ACC_TRUNC; + + if (NULL == (lf = H5FDopen(filename, flags, fapl_id, HADDR_UNDEF))) { + + pass = FALSE; + failure_mssg = "H5FDopen() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* set eoa as specified */ + + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + if (H5FDset_eoa(lf, H5FD_MEM_DEFAULT, eoa) < 0) { + + pass = FALSE; + failure_mssg = "H5FDset_eoa() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { /* setup dxpl */ + + dxpl_id = H5Pcreate(H5P_DATASET_XFER); + + if (dxpl_id < 0) { + + pass = FALSE; + failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { + + if (H5Pset_dxpl_mpio(dxpl_id, xfer_mode) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_dxpl_mpio() failed."; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + if (pass) { + + if (H5Pset_dxpl_mpio_collective_opt(dxpl_id, coll_opt_mode) < 0) { + + pass = FALSE; + failure_mssg = "H5Pset_dxpl_mpio() failed."; + } + } + + if (pass) { /* setup pointers with return values */ + + strncpy(file_name, filename, 512); + *lf_ptr = lf; + *fapl_id_ptr = fapl_id; + *dxpl_id_ptr = dxpl_id; + } + else { /* tidy up from failure as possible */ + + if (lf) + H5FDclose(lf); + + if (fapl_id != -1) + H5Pclose(fapl_id); + + if (dxpl_id != -1) + H5Pclose(dxpl_id); + } + + return; + +} /* setup_vfd_test_file() */ + +/*------------------------------------------------------------------------- + * Function: takedown_vfd_test_file + * + * Purpose: Close and delete the specified test file. Close the + * FAPL & DXPL. + * + * Return: void + * + * Programmer: John Mainzer + * 3/25/26 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static void +takedown_vfd_test_file(int mpi_rank, char *filename, H5FD_t **lf_ptr, hid_t *fapl_id_ptr, hid_t *dxpl_id_ptr) +{ + const char *fcn_name = "takedown_vfd_test_file()"; + int cp = 0; + hbool_t show_progress = FALSE; + + HDassert(lf_ptr); + HDassert(fapl_id_ptr); + HDassert(dxpl_id_ptr); + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Close the test file if it is open, regardless of the value of pass. + * This should let the test program shut down more cleanly. + */ + + if (*lf_ptr) { + + if (H5FDclose(*lf_ptr) < 0) { + + pass = FALSE; + failure_mssg = "H5FDclose() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) On rank 0, delete the test file. + */ + + if (pass) { + + /* wait for everyone to close the file */ + MPI_Barrier(MPI_COMM_WORLD); + + if ((mpi_rank == 0) && (HDremove(filename) < 0)) { + + pass = FALSE; + failure_mssg = "HDremove() failed.\n"; + } + + /* wait for the file delete to complete */ + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Close the fapl */ + if (H5Pclose(*fapl_id_ptr) < 0) { + + pass = FALSE; + failure_mssg = "can't close fapl.\n"; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* Close the dxpl */ + if (H5Pclose(*dxpl_id_ptr) < 0) { + + pass = FALSE; + failure_mssg = "can't close dxpl.\n"; + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + return; + +} /* takedown_vfd_test_file() */ + +/****************************************************************************/ +/******************************* Test Functions *****************************/ +/****************************************************************************/ + +/*------------------------------------------------------------------------- + * Function: vector_read_test_1() + * + * Purpose: Simple vector read test: + * + * 1) Open the test file with the specified VFD, set the eoa, + * and setup the DXPL. + * + * 2) Using rank zero, write the entire increasing_fi_buf to + * the file. + * + * 3) Barrier + * + * 4) On each rank, zero the read buffer, and then read + * INTS_PER_RANK * sizeof(int32) bytes from the file + * starting at offset mpi_rank * INTS_PER_RANK * + * sizeof(int32_t) in both the file and read_fi_buf. + * Do this with a vector read containing a single + * element. + * + * Verify that read_fi_buf contains zeros for all + * indices less than mpi_rank * INTS_PER_RANK, or + * greater than or equal to (mpi_rank + 1) * INTS_PER_RANK. + * For all other indices, read_fi_buf should equal + * increasing_fi_buf. + * + * 5) Barrier + * + * 6) Close the test file. + * + * 7) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_read_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_read_test_1()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t * lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + uint32_t count; + H5FD_mem_t types[1]; + haddr_t addrs[1]; + size_t sizes[1]; + void * bufs[1]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + sprintf(test_title, "parallel vector read test 1 -- %s / independent", vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + sprintf(test_title, "parallel vector read test 1 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + sprintf(test_title, "parallel vector read test 1 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Using rank zero, write the entire increasing_fi_buf to + * the file. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (mpi_rank == 0) { + + if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)increasing_fi_buf) < + 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite() on rank 0 failed.\n"; + } + } + } + + /* 3) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) On each rank, zero the read buffer, and then read + * INTS_PER_RANK * sizeof(int32) bytes from the file + * starting at offset mpi_rank * INTS_PER_RANK * + * sizeof(int32_t) in both the file and read_fi_buf. + * Do this with a vector read containing a single + * element. + * + * Verify that read_fi_buf contains zeros for all + * indices less than mpi_rank * INTS_PER_RANK, or + * greater than or equal to (mpi_rank + 1) * INTS_PER_RANK. + * For all other indices, read_fi_buf should equal + * increasing_fi_buf. + */ + if (pass) { + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + read_fi_buf[i] = 0; + } + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (void *)(&(read_fi_buf[mpi_rank * INTS_PER_RANK])); + + if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread_vector() failed.\n"; + } + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + if ((i < mpi_rank * INTS_PER_RANK) || (i >= (mpi_rank + 1) * INTS_PER_RANK)) { + + if (read_fi_buf[i] != 0) { + + pass = FALSE; + failure_mssg = "Unexpected value in read_fi_buf (1).\n"; + break; + } + } + else { + + if (read_fi_buf[i] != increasing_fi_buf[i]) { + + pass = FALSE; + failure_mssg = "Unexpected value in read_fi_buf (2).\n"; + break; + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + + if (pass) { + + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_read_test_1() */ + +/*------------------------------------------------------------------------- + * Function: vector_read_test_2() + * + * Purpose: Simple vector read test with only half of ranks + * participating in each vector read. + * + * 1) Open the test file with the specified VFD, set the eoa, + * and setup the DXPL. + * + * 2) Using rank zero, write the entire decreasing_fi_buf to + * the file. + * + * 3) Barrier + * + * 4) On each rank, zero the read buffer. + * + * 5) On even ranks, read INTS_PER_RANK * sizeof(int32) + * bytes from the file starting at offset mpi_rank * + * INTS_PER_RANK * sizeof(int32_t) in both the file and + * read_fi_buf. Do this with a vector read containing + * a single element. + * + * Odd ranks perform an empty read. + * + * 6) Barrier. + * + * 7) On odd ranks, read INTS_PER_RANK * sizeof(int32) + * bytes from the file starting at offset mpi_rank * + * INTS_PER_RANK * sizeof(int32_t) in both the file and + * read_fi_buf. Do this with a vector read containing + * a single element. + * + * Even ranks perform an empty read. + * + * 8) Verify that read_fi_buf contains zeros for all + * indices less than mpi_rank * INTS_PER_RANK, or + * greater than or equal to (mpi_rank + 1) * INTS_PER_RANK. + * For all other indices, read_fi_buf should equal + * decreasing_fi_buf. + * + * 9) Barrier + * + * 10) Close the test file. + * + * 11) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_read_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_read_test_2()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t * lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + uint32_t count; + H5FD_mem_t types[1]; + haddr_t addrs[1]; + size_t sizes[1]; + void * bufs[1]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + sprintf(test_title, "parallel vector read test 2 -- %s / independent", vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + sprintf(test_title, "parallel vector read test 2 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + sprintf(test_title, "parallel vector read test 2 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Using rank zero, write the entire decreasing_fi_buf to + * the file. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (mpi_rank == 0) { + + if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)decreasing_fi_buf) < + 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite() on rank 0 failed.\n"; + } + } + } + + /* 3) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) On each rank, zero the read buffer. */ + if (pass) { + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + read_fi_buf[i] = 0; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) On even ranks, read INTS_PER_RANK * sizeof(int32) + * bytes from the file starting at offset mpi_rank * + * INTS_PER_RANK * sizeof(int32_t) in both the file and + * read_fi_buf. Do this with a vector read containing + * a single element. + * + * Odd ranks perform an empty read. + */ + if (pass) { + + if (mpi_rank % 2 == 0) { + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (void *)(&(read_fi_buf[mpi_rank * INTS_PER_RANK])); + } + else { + + count = 0; + } + + if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread_vector() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) On odd ranks, read INTS_PER_RANK * sizeof(int32) + * bytes from the file starting at offset mpi_rank * + * INTS_PER_RANK * sizeof(int32_t) in both the file and + * read_fi_buf. Do this with a vector read containing + * a single element. + * + * Even ranks perform an empty read. + */ + if (pass) { + + if (mpi_rank % 2 == 1) { + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (void *)(&(read_fi_buf[mpi_rank * INTS_PER_RANK])); + } + else { + + count = 0; + } + + if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread_vector() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 8) Verify that read_fi_buf contains zeros for all + * indices less than mpi_rank * INTS_PER_RANK, or + * greater than or equal to (mpi_rank + 1) * INTS_PER_RANK. + * For all other indices, read_fi_buf should equal + * decreasing_fi_buf. + */ + + if (pass) { + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + if ((i < mpi_rank * INTS_PER_RANK) || (i >= (mpi_rank + 1) * INTS_PER_RANK)) { + + if (read_fi_buf[i] != 0) { + + pass = FALSE; + failure_mssg = "Unexpected value in read_fi_buf (1).\n"; + break; + } + } + else { + + if (read_fi_buf[i] != decreasing_fi_buf[i]) { + + pass = FALSE; + failure_mssg = "Unexpected value in read_fi_buf (2).\n"; + break; + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 9) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 10) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + + if (pass) { + + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_read_test_2() */ + +/*------------------------------------------------------------------------- + * Function: vector_read_test_3() + * + * Purpose: Verify that vector read works with multiple entries in + * the vector in each read, and that read buffers need not + * be in increasing (memory) address order. + * + * 1) Open the test file with the specified VFD, set the eoa, + * and setup the DXPL. + * + * 2) Using rank zero, write the entire negative_fi_buf to + * the file. + * + * 3) Barrier + * + * 4) On each rank, zero the four read buffers. + * + * 5) On each rank, do a vector read from the file, with + * each rank's vector having four elements, with each + * element reading INTS_PER_RANK / 4 * sizeof(int32) + * bytes, and the reads starting at address: + * + * (mpi_rank * INTS_PER_RANK) * sizeof(int32_t) + * + * (mpi_rank * INTS_PER_RANK + INTS_PER_RANK / 4) * + * sizeof(int32_t) + * + * (mpi_rank * INTS_PER_RANK + INTS_PER_RANK / 2) * + * sizeof(int32_t) + * + * (mpi_rank * INTS_PER_RANK + 3 * INTS_PER_RANK / 2) * + * sizeof(int32_t) + * + * On even ranks, the targets of the reads should be + * buf_0, buf_1, buf_2, and buf_3 respectively. + * + * On odd ranks, the targets of the reads should be + * buf_3, buf_2, buf_1, and buf_0 respectively. + * + * This has the effect of ensuring that on at least + * some ranks, the read buffers are not in increasing + * address order. + * + * 6) Verify that buf_0, buf_1, buf_2, and buf_3 contain + * the expected data. Note that this will be different + * on even vs. odd ranks. + * + * 7) Barrier. + * + * 8) Close the test file. + * + * 9) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_read_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_read_test_3()"; + char test_title[120]; + char filename[512]; + int32_t buf_0[(INTS_PER_RANK / 4) + 1]; + int32_t buf_1[(INTS_PER_RANK / 4) + 1]; + int32_t buf_2[(INTS_PER_RANK / 4) + 1]; + int32_t buf_3[(INTS_PER_RANK / 4) + 1]; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t * lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + uint32_t count; + H5FD_mem_t types[4]; + haddr_t addrs[4]; + size_t sizes[4]; + void * bufs[4]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + sprintf(test_title, "parallel vector read test 3 -- %s / independent", vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + sprintf(test_title, "parallel vector read test 3 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + sprintf(test_title, "parallel vector read test 3 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Using rank zero, write the entire negative_fi_buf to + * the file. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (mpi_rank == 0) { + + if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)negative_fi_buf) < + 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite() on rank 0 failed.\n"; + } + } + } + + /* 3) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) On each rank, zero the four read buffers. */ + if (pass) { + + for (i = 0; i <= INTS_PER_RANK / 4; i++) { + + buf_0[i] = 0; + buf_1[i] = 0; + buf_2[i] = 0; + buf_3[i] = 0; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) On each rank, do a vector read from the file, with + * each rank's vector having four elements, with each + * element reading INTS_PER_RANK / 4 * sizeof(int32) + * bytes, and the reads starting at address: + * + * (mpi_rank * INTS_PER_RANK) * sizeof(int32_t) + * + * (mpi_rank * INTS_PER_RANK + INTS_PER_RANK / 4) * + * sizeof(int32_t) + * + * (mpi_rank * INTS_PER_RANK + INTS_PER_RANK / 2) * + * sizeof(int32_t) + * + * (mpi_rank * INTS_PER_RANK + 3 * INTS_PER_RANK / 2) * + * sizeof(int32_t) + * + * On even ranks, the targets of the reads should be + * buf_0, buf_1, buf_2, and buf_3 respectively. + * + * On odd ranks, the targets of the reads should be + * buf_3, buf_2, buf_1, and buf_0 respectively. + * + * This has the effect of ensuring that on at least + * some ranks, the read buffers are not in increasing + * address order. + */ + if (pass) { + + haddr_t base_addr = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + count = 4; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr; + sizes[0] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = base_addr + ((haddr_t)(INTS_PER_RANK / 4) * (haddr_t)(sizeof(int32_t))); + sizes[1] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t); + + types[2] = H5FD_MEM_DRAW; + addrs[2] = base_addr + ((haddr_t)(INTS_PER_RANK / 2) * (haddr_t)(sizeof(int32_t))); + sizes[2] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t); + + types[3] = H5FD_MEM_DRAW; + addrs[3] = base_addr + ((haddr_t)(3 * INTS_PER_RANK / 4) * (haddr_t)(sizeof(int32_t))); + sizes[3] = (size_t)INTS_PER_RANK / 4 * sizeof(int32_t); + + if (mpi_rank % 2 == 0) { + + bufs[0] = (void *)(&(buf_0[0])); + bufs[1] = (void *)(buf_1); + bufs[2] = (void *)(buf_2); + bufs[3] = (void *)(buf_3); + } + else { + + bufs[0] = (void *)(&(buf_3[0])); + bufs[1] = (void *)(buf_2); + bufs[2] = (void *)(buf_1); + bufs[3] = (void *)(buf_0); + } + + if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread_vector() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) Verify that buf_0, buf_1, buf_2, and buf_3 contain + * the expected data. Note that this will be different + * on even vs. odd ranks. + */ + if (pass) { + + int base_index = mpi_rank * INTS_PER_RANK; + + for (i = 0; ((pass) && (i < INTS_PER_RANK / 4)); i++) { + + if (((mpi_rank % 2 == 0) && (buf_0[i] != negative_fi_buf[base_index + i])) || + ((mpi_rank % 2 == 1) && (buf_3[i] != negative_fi_buf[base_index + i]))) { + + pass = FALSE; + failure_mssg = "Unexpected value in buf (1).\n"; + } + } + + base_index += INTS_PER_RANK / 4; + + for (i = 0; ((pass) && (i < INTS_PER_RANK / 4)); i++) { + + if (((mpi_rank % 2 == 0) && (buf_1[i] != negative_fi_buf[base_index + i])) || + ((mpi_rank % 2 == 1) && (buf_2[i] != negative_fi_buf[base_index + i]))) { + + pass = FALSE; + failure_mssg = "Unexpected value in buf (2).\n"; + } + } + + base_index += INTS_PER_RANK / 4; + + for (i = 0; ((pass) && (i < INTS_PER_RANK / 4)); i++) { + + if (((mpi_rank % 2 == 0) && (buf_2[i] != negative_fi_buf[base_index + i])) || + ((mpi_rank % 2 == 1) && (buf_1[i] != negative_fi_buf[base_index + i]))) { + + pass = FALSE; + failure_mssg = "Unexpected value in buf (3).\n"; + } + } + + base_index += INTS_PER_RANK / 4; + + for (i = 0; ((pass) && (i < INTS_PER_RANK / 4)); i++) { + + if (((mpi_rank % 2 == 0) && (buf_3[i] != negative_fi_buf[base_index + i])) || + ((mpi_rank % 2 == 1) && (buf_0[i] != negative_fi_buf[base_index + i]))) { + + pass = FALSE; + failure_mssg = "Unexpected value in buf (4).\n"; + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 8) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + + if (pass) { + + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_read_test_3() */ + +/*------------------------------------------------------------------------- + * Function: vector_read_test_4() + * + * Purpose: Test vector I/O reads with vectors of different lengths + * and entry sizes across the ranks. Vectors are not, in + * general, sorted in increasing address order. Further, + * reads are not, in general, contiguous. + * + * 1) Open the test file with the specified VFD, set the eoa. + * and setup the DXPL. + * + * 2) Using rank zero, write the entire increasing_fi_buf to + * the file. + * + * 3) Barrier + * + * 4) Set all cells of read_fi_buf to zero. + * + * 5) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector read between base_addr and + * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1 + * as follows: + * + * if ( rank % 4 == 0 ) construct a vector that reads: + * + * INTS_PER_RANK / 4 * sizeof(int32_t) bytes + * starting at base_addr + INTS_PER_RANK / 2 * + * sizeof(int32_t), + * + * INTS_PER_RANK / 8 * sizeof(int32_t) bytes + * starting at base_addr + INTS_PER_RANK / 4 * + * sizeof(int32_t), and + * + * INTS_PER_RANK / 16 * sizeof(int32_t) butes + * starting at base_addr + INTS_PER_RANK / 16 * + * sizeof(int32_t) + * + * to the equivalent locations in read_fi_buf + * + * if ( rank % 4 == 1 ) construct a vector that reads: + * + * ((INTS_PER_RANK / 2) - 2) * sizeof(int32_t) + * bytes starting at base_addr + sizeof(int32_t), and + * + * ((INTS_PER_RANK / 2) - 2) * sizeof(int32_t) bytes + * starting at base_addr + (INTS_PER_RANK / 2 + 1) * + * sizeof(int32_t). + * + * to the equivalent locations in read_fi_buf + * + * if ( rank % 4 == 2 ) construct a vector that reads: + * + * sizeof(int32_t) bytes starting at base_index + + * (INTS_PER_RANK / 2) * sizeof int32_t. + * + * to the equivalent locations in read_fi_buf + * + * if ( rank % 4 == 3 ) construct and read the empty vector + * + * 6) On each rank, verify that read_fi_buf contains the + * the expected values -- that is the matching values from + * increasing_fi_buf where ever there was a read, and zero + * otherwise. + * + * 7) Barrier. + * + * 8) Close the test file. + * + * 9) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_read_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_read_test_4()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + haddr_t base_addr; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t * lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + int k; + int base_index; + uint32_t count = 0; + H5FD_mem_t types[4]; + haddr_t addrs[4]; + size_t sizes[4]; + void * bufs[4]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + sprintf(test_title, "parallel vector read test 4 -- %s / independent", vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + sprintf(test_title, "parallel vector read test 4 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + sprintf(test_title, "parallel vector read test 4 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Using rank zero, write the entire negative_fi_buf to + * the file. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (mpi_rank == 0) { + + if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)increasing_fi_buf) < + 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite() on rank 0 failed.\n"; + } + } + } + + /* 3) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) Set all cells of read_fi_buf to zero. */ + if (pass) { + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + read_fi_buf[i] = 0; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector read between base_addr and + * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1 + * as follows: + */ + if (pass) { + + base_index = mpi_rank * INTS_PER_RANK; + base_addr = (haddr_t)base_index * (haddr_t)sizeof(int32_t); + + if ((mpi_rank % 4) == 0) { + + /* if ( rank % 4 == 0 ) construct a vector that reads: + * + * INTS_PER_RANK / 4 * sizeof(int32_t) bytes + * starting at base_addr + INTS_PER_RANK / 2 * + * sizeof(int32_t), + * + * INTS_PER_RANK / 8 * sizeof(int32_t) bytes + * starting at base_addr + INTS_PER_RANK / 4 * + * sizeof(int32_t), and + * + * INTS_PER_RANK / 16 * sizeof(int32_t) butes + * starting at base_addr + INTS_PER_RANK / 16 * + * sizeof(int32_t) + * + * to the equivalent locations in read_fi_buf + */ + + count = 3; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 2) * sizeof(int32_t)); + sizes[0] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t); + bufs[0] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 2)])); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 4) * sizeof(int32_t)); + sizes[1] = (size_t)(INTS_PER_RANK / 8) * sizeof(int32_t); + bufs[1] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 4)])); + + types[2] = H5FD_MEM_DRAW; + addrs[2] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 16) * sizeof(int32_t)); + sizes[2] = (size_t)(INTS_PER_RANK / 16) * sizeof(int32_t); + bufs[2] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 16)])); + } + else if ((mpi_rank % 4) == 1) { + + /* if ( rank % 4 == 1 ) construct a vector that reads: + * + * ((INTS_PER_RANK / 2) - 2) * sizeof(int32_t) + * bytes starting at base_addr + sizeof(int32_t), and + * + * ((INTS_PER_RANK / 2) - 2) * sizeof(int32_t) bytes + * starting at base_addr + (INTS_PER_RANK / 2 + 1) * + * sizeof(int32_t). + * + * to the equivalent locations in read_fi_buf + */ + count = 2; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr + (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)((INTS_PER_RANK / 2) - 2) * sizeof(int32_t); + bufs[0] = (void *)(&(read_fi_buf[base_index + 1])); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = base_addr + (haddr_t)((size_t)((INTS_PER_RANK / 2) + 1) * sizeof(int32_t)); + sizes[1] = (size_t)((INTS_PER_RANK / 2) - 2) * sizeof(int32_t); + bufs[1] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 2) + 1])); + } + else if ((mpi_rank % 4) == 2) { + + /* if ( rank % 4 == 2 ) construct a vector that reads: + * + * sizeof(int32_t) bytes starting at base_index + + * (INTS_PER_RANK / 2) * sizeof int32_t. + * + * to the equivalent locations in read_fi_buf + */ + count = 1; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 2) * sizeof(int32_t)); + sizes[0] = sizeof(int32_t); + bufs[0] = (void *)(&(read_fi_buf[base_index + (INTS_PER_RANK / 2)])); + } + else if ((mpi_rank % 4) == 3) { + + /* if ( rank % 4 == 3 ) construct and read the empty vector */ + + count = 0; + } + + if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread_vector() failed (1).\n"; + } + } + + /* 6) On each rank, verify that read_fi_buf contains the + * the expected values -- that is the matching values from + * increasing_fi_buf where ever there was a read, and zero + * otherwise. + */ + if (pass) { + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + base_index = i * INTS_PER_RANK; +#if 1 + for (j = base_index; j < base_index + INTS_PER_RANK; j++) { + + k = j - base_index; +#else + for (k = 0; k < INTS_PER_RANK; k++) { + + j = k + base_index; +#endif + + if (i == mpi_rank) { + + switch (i % 4) { + + case 0: + if (((INTS_PER_RANK / 2) <= k) && (k < (3 * (INTS_PER_RANK / 4)))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.1)"; + HDfprintf(stdout, "\nread_fi_buf[%d] = %d, increasing_fi_buf[%d] = %d\n", + j, read_fi_buf[j], j, increasing_fi_buf[j]); + } + } + else if (((INTS_PER_RANK / 4) <= k) && (k < (3 * (INTS_PER_RANK / 8)))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.2)"; + } + } + else if (((INTS_PER_RANK / 16) <= k) && (k < (INTS_PER_RANK / 8))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.3)"; + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.4)"; + } + } + break; + + case 1: + if ((1 <= k) && (k <= ((INTS_PER_RANK / 2) - 2))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2.1)"; + } + } + else if ((((INTS_PER_RANK / 2) + 1) <= k) && (k <= (INTS_PER_RANK - 2))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2.2)"; + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2.3)"; + } + } + break; + + case 2: + if (k == INTS_PER_RANK / 2) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3.1)"; + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3.2)"; + } + } + break; + + case 3: + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (4)"; + } + break; + + default: + HDassert(FALSE); /* should be un-reachable */ + break; + } + } + else if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (5)"; + } + } /* end for loop */ + } /* end for loop */ + } /* end if */ + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 8) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + + if (pass) { + + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_read_test_4() */ + +/*------------------------------------------------------------------------- + * Function: vector_read_test_5() + * + * Purpose: Test correct management of the sizes[] array optimization, + * where, if sizes[i] == 0, we use sizes[i - 1] as the value + * of size[j], for j >= i. + * + * 1) Open the test file with the specified VFD, set the eoa. + * and setup the DXPL. + * + * 2) Using rank zero, write the entire increasing_fi_buf to + * the file. + * + * 3) Barrier + * + * 4) Set all cells of read_fi_buf to zero. + * + * 5) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector read between base_addr and + * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1 + * that reads every 16th integer located in that + * that range starting at base_addr. Use a sizes[] + * array of length 2, with sizes[0] set to sizeof(int32_t), + * and sizes[1] = 0. + * + * Read the integers into the corresponding locations in + * read_fi_buf. + * + * 6) On each rank, verify that read_fi_buf contains the + * the expected values -- that is the matching values from + * increasing_fi_buf where ever there was a read, and zero + * otherwise. + * + * 7) Barrier. + * + * 8) Close the test file. + * + * 9) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_read_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_read_test_5()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + haddr_t base_addr; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t * lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + int base_index; + uint32_t count = 0; + H5FD_mem_t types[(INTS_PER_RANK / 16) + 1]; + haddr_t addrs[(INTS_PER_RANK / 16) + 1]; + size_t sizes[2]; + void * bufs[(INTS_PER_RANK / 16) + 1]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + sprintf(test_title, "parallel vector read test 5 -- %s / independent", vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + sprintf(test_title, "parallel vector read test 5 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + sprintf(test_title, "parallel vector read test 5 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Using rank zero, write the entire negative_fi_buf to + * the file. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (mpi_rank == 0) { + + if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)increasing_fi_buf) < + 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite() on rank 0 failed.\n"; + } + } + } + + /* 3) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) Set all cells of read_fi_buf to zero. */ + if (pass) { + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + read_fi_buf[i] = 0; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector read between base_addr and + * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1 + * that reads every 16th integer located in that + * that range starting at base_addr. Use a sizes[] + * array of length 2, with sizes[0] set to sizeof(int32_t), + * and sizes[1] = 0. + * + * Read the integers into the corresponding locations in + * read_fi_buf. + */ + if (pass) { + + base_index = (mpi_rank * INTS_PER_RANK); + base_addr = (haddr_t)base_index * (haddr_t)sizeof(int32_t); + + count = INTS_PER_RANK / 16; + sizes[0] = sizeof(int32_t); + sizes[1] = 0; + + for (i = 0; i < INTS_PER_RANK / 16; i++) { + + types[i] = H5FD_MEM_DRAW; + addrs[i] = base_addr + ((haddr_t)(16 * i) * (haddr_t)sizeof(int32_t)); + bufs[i] = (void *)(&(read_fi_buf[base_index + (i * 16)])); + } + + if (H5FDread_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread_vector() failed (1).\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) On each rank, verify that read_fi_buf contains the + * the expected values -- that is the matching values from + * increasing_fi_buf where ever there was a read, and zero + * otherwise. + */ + if (pass) { + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + base_index = i * INTS_PER_RANK; + + for (j = base_index; j < base_index + INTS_PER_RANK; j++) { + + if ((i == mpi_rank) && (j % 16 == 0)) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1)"; + } + } + else if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2)"; + } + } /* end for loop */ + } /* end for loop */ + } /* end if */ + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 8) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + + if (pass) { + + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_read_test_5() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_1() + * + * Purpose: Simple vector write test: + * + * 1) Open the test file with the specified VFD, set the eoa, + * and setup the DXPL. + * + * 2) Write the entire increasing_fi_buf to the file, with + * exactly one buffer per vector per rank. Use either + * independent or collective I/O as specified. + * + * 3) Barrier + * + * 4) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf. Report failure + * if any differences are detected. + * + * 5) Close the test file. + * + * 6) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_1(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_1()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t * lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + uint32_t count; + H5FD_mem_t types[1]; + haddr_t addrs[1]; + size_t sizes[1]; + void * bufs[1]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + sprintf(test_title, "parallel vector write test 1 -- %s / independent", vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + sprintf(test_title, "parallel vector write test 1 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + sprintf(test_title, "parallel vector write test 1 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Write the entire increasing_fi_buf to the file, with + * exactly one buffer per vector per rank. Use either + * independent or collective I/O as specified. + */ + + if (pass) { + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (void *)(&(increasing_fi_buf[mpi_rank * INTS_PER_RANK])); + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Barrier + */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf. Report failure + * if any differences are detected. + */ + + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; i < mpi_size * INTS_PER_RANK; i++) { + + if (read_fi_buf[i] != increasing_fi_buf[i]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file"; + break; + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + + if (pass) { + + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_1() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_2() + * + * Purpose: Test vector I/O writes in which only some ranks participate. + * Depending on the collective parameter, these writes will + * be either collective or independent. + * + * 1) Open the test file with the specified VFD, and set + * the eoa. + * + * 2) Write the odd blocks of the increasing_fi_buf to the file, + * with the odd ranks writing the odd blocks, and the even + * ranks writing an empty vector. + * + * Here, a "block" of the increasing_fi_buf is a sequence + * of integers in increasing_fi_buf of length INTS_PER_RANK, + * and with start index a multiple of INTS_PER_RANK. + * + * 3) Write the even blocks of the negative_fi_buf to the file, + * with the even ranks writing the even blocks, and the odd + * ranks writing an empty vector. + * + * 4) Barrier + * + * 4) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf and negative_fi_buf + * as appropriate. Report failure if any differences are + * detected. + * + * 5) Close the test file. On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/28/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_2(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_2()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t * lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + uint32_t count; + H5FD_mem_t types[1]; + haddr_t addrs[1]; + size_t sizes[1]; + void * bufs[1]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + sprintf(test_title, "parallel vector write test 2 -- %s / independent", vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + sprintf(test_title, "parallel vector write test 2 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + sprintf(test_title, "parallel vector write test 2 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Write the odd blocks of the increasing_fi_buf to the file, + * with the odd ranks writing the odd blocks, and the even + * ranks writing an empty vector. + * + * Here, a "block" of the increasing_fi_buf is a sequence + * of integers in increasing_fi_buf of length INTS_PER_RANK, + * and with start index a multiple of INTS_PER_RANK. + */ + if (pass) { + + if (mpi_rank % 2 == 1) { /* odd ranks */ + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (void *)(&(increasing_fi_buf[mpi_rank * INTS_PER_RANK])); + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (1).\n"; + } + } + else { /* even ranks */ + + if (H5FDwrite_vector(lf, dxpl_id, 0, NULL, NULL, NULL, NULL) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (2).\n"; + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Write the even blocks of the negative_fi_buf to the file, + * with the even ranks writing the even blocks, and the odd + * ranks writing an empty vector. + */ + if (pass) { + + if (mpi_rank % 2 == 1) { /* odd ranks */ + + if (H5FDwrite_vector(lf, dxpl_id, 0, NULL, NULL, NULL, NULL) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (3).\n"; + } + } + else { /* even ranks */ + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (void *)(&(negative_fi_buf[mpi_rank * INTS_PER_RANK])); + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (4).\n"; + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) Barrier + */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf. Report failure + * if any differences are detected. + */ + + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + if (i % 2 == 1) { /* odd block */ + + for (j = i * INTS_PER_RANK; ((pass) && (j < (i + 1) * INTS_PER_RANK)); j++) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file"; + break; + } + } + } + else { /* even block */ + + for (j = i * INTS_PER_RANK; ((pass) && (j < (i + 1) * INTS_PER_RANK)); j++) { + + if (read_fi_buf[j] != negative_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file"; + break; + } + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + + if (pass) { + + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_2() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_3() + * + * Purpose: Test vector I/O writes with vectors of multiple entries. + * For now, keep the vectors sorted in increasing address + * order. + * + * 1) Open the test file with the specified VFD, and set + * the eoa. + * + * 2) For each rank, construct a vector with base address + * (mpi_rank * INTS_PER_RANK) and writing all bytes from + * that address to ((mpi_rank + 1) * INTS_PER_RANK) - 1. + * Draw equal parts from increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf. + * + * Write to file. + * + * 3) Barrier + * + * 4) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf as + * appropriate. Report failure if any differences are + * detected. + * + * 5) Close the test file. On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/31/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_3(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_3()"; + char test_title[120]; + char filename[512]; + haddr_t base_addr; + int base_index; + int ints_per_write; + size_t bytes_per_write; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t * lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + uint32_t count; + H5FD_mem_t types[4]; + haddr_t addrs[4]; + size_t sizes[4]; + void * bufs[4]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + sprintf(test_title, "parallel vector write test 3 -- %s / independent", vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + sprintf(test_title, "parallel vector write test 3 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + sprintf(test_title, "parallel vector write test 3 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) For each rank, construct a vector with base address + * (mpi_rank * INTS_PER_RANK) and writing all bytes from + * that address to ((mpi_rank + 1) * INTS_PER_RANK) - 1. + * Draw equal parts from increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf. + * + * Write to file. + */ + if (pass) { + + count = 4; + + base_addr = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + ints_per_write = INTS_PER_RANK / 4; + bytes_per_write = (size_t)(ints_per_write) * sizeof(int32_t); + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr; + sizes[0] = bytes_per_write; + bufs[0] = (void *)(&(increasing_fi_buf[mpi_rank * INTS_PER_RANK])); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = addrs[0] + (haddr_t)(bytes_per_write); + sizes[1] = bytes_per_write; + bufs[1] = (void *)(&(decreasing_fi_buf[(mpi_rank * INTS_PER_RANK) + (INTS_PER_RANK / 4)])); + + types[2] = H5FD_MEM_DRAW; + addrs[2] = addrs[1] + (haddr_t)(bytes_per_write); + sizes[2] = bytes_per_write; + bufs[2] = (void *)(&(negative_fi_buf[(mpi_rank * INTS_PER_RANK) + (INTS_PER_RANK / 2)])); + + types[3] = H5FD_MEM_DRAW; + addrs[3] = addrs[2] + (haddr_t)(bytes_per_write); + sizes[3] = bytes_per_write; + bufs[3] = (void *)(&(zero_fi_buf[(mpi_rank * INTS_PER_RANK) + (3 * (INTS_PER_RANK / 4))])); + +#if 0 /* JRM */ + HDfprintf(stdout, "addrs = { %lld, %lld, %lld, %lld}\n", + (long long)addrs[0], (long long)addrs[1], (long long)addrs[2], (long long)addrs[3]); + HDfprintf(stdout, "sizes = { %lld, %lld, %lld, %lld}\n", + (long long)sizes[0], (long long)sizes[1], (long long)sizes[2], (long long)sizes[3]); + HDfprintf(stdout, "bufs = { 0x%llx, 0x%llx, 0x%llx, 0x%llx}\n", + (unsigned long long)bufs[0], (unsigned long long)bufs[1], + (unsigned long long)bufs[2], (unsigned long long)bufs[3]); +#endif /* JRM */ + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (1).\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Barrier + */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf as + * appropriate. Report failure if any differences are + * detected. + */ + + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + base_index = i * INTS_PER_RANK; + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1)"; + break; + } + } + + base_index += (INTS_PER_RANK / 4); + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != decreasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2)"; + break; + } + } + + base_index += (INTS_PER_RANK / 4); + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != negative_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3)"; + break; + } + } + + base_index += (INTS_PER_RANK / 4); + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != zero_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3)"; + break; + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + + if (pass) { + + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_3() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_4() + * + * Purpose: Test vector I/O writes with vectors of multiple entries. + * For now, keep the vectors sorted in increasing address + * order. + * + * This test differs from vector_write_test_3() in the order + * in which the file image buffers appear in the vector + * write. This guarantees that at least one of these + * tests will present buffers with non-increasing addresses + * in RAM. + * + * 1) Open the test file with the specified VFD, and set + * the eoa. + * + * 2) For each rank, construct a vector with base address + * (mpi_rank * INTS_PER_RANK) and writing all bytes from + * that address to ((mpi_rank + 1) * INTS_PER_RANK) - 1. + * Draw equal parts from zero_fi_buf, negative_fi_buf, + * decreasing_fi_buf, and increasing_fi_buf. + * + * Write to file. + * + * 3) Barrier + * + * 4) On each rank, read the entire file into the read_fi_buf, + * and compare against zero_fi_buf, negative_fi_buf, + * decreasing_fi_buf, and increasing_fi_buf as + * appropriate. Report failure if any differences are + * detected. + * + * 5) Close the test file. On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/31/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_4(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_4()"; + char test_title[120]; + char filename[512]; + haddr_t base_addr; + int base_index; + int ints_per_write; + size_t bytes_per_write; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t * lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + uint32_t count; + H5FD_mem_t types[4]; + haddr_t addrs[4]; + size_t sizes[4]; + void * bufs[4]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + sprintf(test_title, "parallel vector write test 4 -- %s / independent", vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + sprintf(test_title, "parallel vector write test 4 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + sprintf(test_title, "parallel vector write test 4 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) For each rank, construct a vector with base address + * (mpi_rank * INTS_PER_RANK) and writing all bytes from + * that address to ((mpi_rank + 1) * INTS_PER_RANK) - 1. + * Draw equal parts from increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf. + * + * Write to file. + */ + if (pass) { + + count = 4; + + base_addr = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + ints_per_write = INTS_PER_RANK / 4; + bytes_per_write = (size_t)(ints_per_write) * sizeof(int32_t); + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr; + sizes[0] = bytes_per_write; + bufs[0] = (void *)(&(zero_fi_buf[mpi_rank * INTS_PER_RANK])); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = addrs[0] + (haddr_t)(bytes_per_write); + sizes[1] = bytes_per_write; + bufs[1] = (void *)(&(negative_fi_buf[(mpi_rank * INTS_PER_RANK) + (INTS_PER_RANK / 4)])); + + types[2] = H5FD_MEM_DRAW; + addrs[2] = addrs[1] + (haddr_t)(bytes_per_write); + sizes[2] = bytes_per_write; + bufs[2] = (void *)(&(decreasing_fi_buf[(mpi_rank * INTS_PER_RANK) + (INTS_PER_RANK / 2)])); + + types[3] = H5FD_MEM_DRAW; + addrs[3] = addrs[2] + (haddr_t)(bytes_per_write); + sizes[3] = bytes_per_write; + bufs[3] = (void *)(&(increasing_fi_buf[(mpi_rank * INTS_PER_RANK) + (3 * (INTS_PER_RANK / 4))])); + +#if 0 /* JRM */ + HDfprintf(stdout, "addrs = { %lld, %lld, %lld, %lld}\n", + (long long)addrs[0], (long long)addrs[1], (long long)addrs[2], (long long)addrs[3]); + HDfprintf(stdout, "sizes = { %lld, %lld, %lld, %lld}\n", + (long long)sizes[0], (long long)sizes[1], (long long)sizes[2], (long long)sizes[3]); + HDfprintf(stdout, "bufs = { 0x%llx, 0x%llx, 0x%llx, 0x%llx}\n", + (unsigned long long)bufs[0], (unsigned long long)bufs[1], + (unsigned long long)bufs[2], (unsigned long long)bufs[3]); +#endif /* JRM */ + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (1).\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Barrier + */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf as + * appropriate. Report failure if any differences are + * detected. + */ + + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + base_index = i * INTS_PER_RANK; + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != zero_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1)"; + break; + } + } + + base_index += (INTS_PER_RANK / 4); + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != negative_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2)"; + break; + } + } + + base_index += (INTS_PER_RANK / 4); + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != decreasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3)"; + break; + } + } + + base_index += (INTS_PER_RANK / 4); + + for (j = base_index; j < base_index + (INTS_PER_RANK / 4); j++) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3)"; + break; + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + + if (pass) { + + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_4() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_5() + * + * Purpose: Test vector I/O writes with vectors of different lengths + * and entry sizes across the ranks. Vectors are not, in + * general, sorted in increasing address order. Further, + * writes are not, in general, contiguous. + * + * 1) Open the test file with the specified VFD, and set + * the eoa. + * + * 2) Set the test file in a known state by writing zeros + * to all bytes in the test file. Since we have already + * tested this, do this via a vector write of zero_fi_buf. + * + * 3) Barrier + * + * 4) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector write between base_addr and + * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1 + * as follows: + * + * if ( rank % 4 == 0 ) construct a vector that writes: + * + * negative_fi_buf starting at base_index + + * INTS_PER_RANK / 2 and running for INTS_PER_RANK / 4 + * entries, + * + * decreasing_fi_buf starting at base_index + + * INTS_PER_RANK / 4 and running for INTS_PER_RANK / 8 + * entries, and + * + * increasing_fi_buf starting at base_index + + * INTS_PER_RANK / 16 and running for INTS_PER_RANK / 16 + * entries + * + * to the equivalent locations in the file. + * + * if ( rank % 4 == 1 ) construct a vector that writes: + * + * increasing_fi_buf starting at base_index + 1 and + * running for (INTS_PER_RANK / 2) - 2 entries, and + * + * decreasing_fi_buf startomg at base_index + + * INTS_PER_RANK / 2 + 1 and running for (INTS_PER_RANK / 2) + * - 2 entries + * + * if ( rank % 4 == 2 ) construct a vector that writes: + * + * negative_fi_buf starting at base_index + + * INTS_PER_RANK / 2 and running for one entry. + * + * if ( rank % 4 == 3 ) construct and write the empty vector + * + * 5) Barrier + * + * 6) On each rank, read the entire file into the read_fi_buf, + * and compare against zero_fi_buf, negative_fi_buf, + * decreasing_fi_buf, and increasing_fi_buf as + * appropriate. Report failure if any differences are + * detected. + * + * 7) Close the test file. On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/31/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_5(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_5()"; + char test_title[120]; + char filename[512]; + haddr_t base_addr; + int base_index; + haddr_t eoa; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t * lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int j; + int k; + uint32_t count; + H5FD_mem_t types[4]; + haddr_t addrs[4]; + size_t sizes[4]; + void * bufs[4]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + sprintf(test_title, "parallel vector write test 5 -- %s / independent", vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + sprintf(test_title, "parallel vector write test 5 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + sprintf(test_title, "parallel vector write test 5 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Set the test file in a known state by writing zeros + * to all bytes in the test file. Since we have already + * tested this, do this via a vector write of zero_fi_buf. + */ + if (pass) { + + count = 1; + types[0] = H5FD_MEM_DRAW; + addrs[0] = (haddr_t)mpi_rank * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)INTS_PER_RANK * sizeof(int32_t); + bufs[0] = (void *)(&(zero_fi_buf[mpi_rank * INTS_PER_RANK])); + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed.\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 3) Barrier + */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector write between base_addr and + * base_addr + INTS_PER_RANK * sizeof(int32_t) - 1 + * as follows: + */ + if (pass) { + + base_index = mpi_rank * INTS_PER_RANK; + base_addr = (haddr_t)((size_t)base_index * sizeof(int32_t)); + + if ((mpi_rank % 4) == 0) { + + /* if ( rank % 4 == 0 ) construct a vector that writes: + * + * negative_fi_buf starting at base_index + + * INTS_PER_RANK / 2 and running for INTS_PER_RANK / 4 + * entries, + * + * decreasing_fi_buf starting at base_index + + * INTS_PER_RANK / 4 and running for INTS_PER_RANK / 8 + * entries, and + * + * increasing_fi_buf starting at base_index + + * INTS_PER_RANK / 16 and running for INTS_PER_RANK / 16 + * entries + * + * to the equivalent locations in the file. + */ + count = 3; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 2) * sizeof(int32_t)); + sizes[0] = (size_t)(INTS_PER_RANK / 4) * sizeof(int32_t); + bufs[0] = (void *)(&(negative_fi_buf[base_index + (INTS_PER_RANK / 2)])); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 4) * sizeof(int32_t)); + sizes[1] = (size_t)(INTS_PER_RANK / 8) * sizeof(int32_t); + bufs[1] = (void *)(&(decreasing_fi_buf[base_index + (INTS_PER_RANK / 4)])); + + types[2] = H5FD_MEM_DRAW; + addrs[2] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 16) * sizeof(int32_t)); + sizes[2] = (size_t)(INTS_PER_RANK / 16) * sizeof(int32_t); + bufs[2] = (void *)(&(increasing_fi_buf[base_index + (INTS_PER_RANK / 16)])); + } + else if ((mpi_rank % 4) == 1) { + + /* if ( rank % 4 == 1 ) construct a vector that writes: + * + * increasing_fi_buf starting at base_index + 1 and + * running for (INTS_PER_RANK / 2) - 2 entries, and + * + * decreasing_fi_buf startomg at base_addr + + * INTS_PER_RANK / 2 + 1 and running for (INTS_PER_RANK / 2) + * - 2 entries + * + * to the equivalent locations in the file. + */ + count = 2; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr + (haddr_t)(sizeof(int32_t)); + sizes[0] = (size_t)((INTS_PER_RANK / 2) - 2) * sizeof(int32_t); + bufs[0] = (void *)(&(increasing_fi_buf[base_index + 1])); + + types[1] = H5FD_MEM_DRAW; + addrs[1] = base_addr + (haddr_t)((size_t)((INTS_PER_RANK / 2) + 1) * sizeof(int32_t)); + sizes[1] = (size_t)((INTS_PER_RANK / 2) - 2) * sizeof(int32_t); + bufs[1] = (void *)(&(decreasing_fi_buf[base_index + (INTS_PER_RANK / 2) + 1])); + } + else if ((mpi_rank % 4) == 2) { + + /* if ( rank % 4 == 2 ) construct a vector that writes: + * + * negative_fi_buf starting at base_index + + * INTS_PER_RANK / 2 and running for one entry. + * + * to the equivalent location in the file. + */ + count = 1; + + types[0] = H5FD_MEM_DRAW; + addrs[0] = base_addr + (haddr_t)((size_t)(INTS_PER_RANK / 2) * sizeof(int32_t)); + sizes[0] = sizeof(int32_t); + bufs[0] = (void *)(&(negative_fi_buf[base_index + (INTS_PER_RANK / 2)])); + } + else if ((mpi_rank % 4) == 3) { + + /* if ( rank % 4 == 3 ) construct and write the empty vector */ + + count = 0; + } + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (1).\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) On each rank, read the entire file into the read_fi_buf, + * and compare against increasing_fi_buf, + * decreasing_fi_buf, negative_fi_buf, and zero_fi_buf as + * appropriate. Report failure if any differences are + * detected. + */ + + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; ((pass) && (i < mpi_size)); i++) { + + base_index = i * INTS_PER_RANK; + + for (j = base_index; j < base_index + INTS_PER_RANK; j++) { + + k = j - base_index; + + switch (i % 4) { + + case 0: + if (((INTS_PER_RANK / 2) <= k) && (k < (3 * (INTS_PER_RANK / 4)))) { + + if (read_fi_buf[j] != negative_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.1)"; + } + } + else if (((INTS_PER_RANK / 4) <= k) && (k < (3 * (INTS_PER_RANK / 8)))) { + + if (read_fi_buf[j] != decreasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.2)"; + } + } + else if (((INTS_PER_RANK / 16) <= k) && (k < (INTS_PER_RANK / 8))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.3)"; + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1.4)"; + } + } + break; + + case 1: + if ((1 <= k) && (k <= ((INTS_PER_RANK / 2) - 2))) { + + if (read_fi_buf[j] != increasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2.1)"; + } + } + else if ((((INTS_PER_RANK / 2) + 1) <= k) && (k <= (INTS_PER_RANK - 2))) { + + if (read_fi_buf[j] != decreasing_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2.2)"; + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2.3)"; + } + } + break; + + case 2: + if (k == INTS_PER_RANK / 2) { + + if (read_fi_buf[j] != negative_fi_buf[j]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3.1)"; + } + } + else { + + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (3.2)"; + } + } + break; + + case 3: + if (read_fi_buf[j] != 0) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (4)"; + } + break; + + default: + HDassert(FALSE); /* should be un-reachable */ + break; + } + } + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + + if (pass) { + + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_5() */ + +/*------------------------------------------------------------------------- + * Function: vector_write_test_6() + * + * Purpose: Test correct management of the sizes[] array optimization, + * where, if sizes[i] == 0, we use sizes[i - 1] as the value + * of size[j], for j >= i. + * + * 1) Open the test file with the specified VFD, set the eoa. + * and setup the DXPL. + * + * 2) Using rank zero, write the entire zero_fi_buf to + * the file. + * + * 3) Barrier + * + * 4) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector write from increasing_fi_buf between + * base_addr and base_addr + INTS_PER_RANK * + * sizeof(int32_t) - 1 that writes every 16th integer + * located in that range starting at base_addr. + * Use a sizes[] array of length 2, with sizes[0] set + * to sizeof(int32_t), and sizes[1] = 0. + * + * Write the integers into the corresponding locations in + * the file. + * + * 5) Barrier + * + * 6) On each rank, read the entire file into the read_fi_buf, + * and compare against zero_fi_buf, and increasing_fi_buf + * as appropriate. Report failure if any differences are + * detected. + * + * 7) Barrier. + * + * 8) Close the test file. + * + * 9) On rank 0, delete the test file. + * + * Return: FALSE on success, TRUE if any errors are detected. + * + * Programmer: John Mainzer + * 3/26/21 + * + * Modifications: + * + * None. + * + *------------------------------------------------------------------------- + */ + +static unsigned +vector_write_test_6(int file_name_id, int mpi_rank, int mpi_size, H5FD_mpio_xfer_t xfer_mode, + H5FD_mpio_collective_opt_t coll_opt_mode, const char *vfd_name) +{ + const char *fcn_name = "vector_write_test_6()"; + char test_title[120]; + char filename[512]; + haddr_t eoa; + haddr_t base_addr; + hbool_t show_progress = FALSE; + hid_t fapl_id = -1; /* file access property list ID */ + hid_t dxpl_id = -1; /* data access property list ID */ + H5FD_t * lf = NULL; /* VFD struct ptr */ + int cp = 0; + int i; + int base_index; + uint32_t count = 0; + H5FD_mem_t types[(INTS_PER_RANK / 16) + 1]; + haddr_t addrs[(INTS_PER_RANK / 16) + 1]; + size_t sizes[2]; + void * bufs[(INTS_PER_RANK / 16) + 1]; + + pass = TRUE; + + if (mpi_rank == 0) { + + if (xfer_mode == H5FD_MPIO_INDEPENDENT) { + + sprintf(test_title, "parallel vector write test 6 -- %s / independent", vfd_name); + } + else if (coll_opt_mode == H5FD_MPIO_INDIVIDUAL_IO) { + + sprintf(test_title, "parallel vector write test 6 -- %s / col op / ind I/O", vfd_name); + } + else { + + HDassert(coll_opt_mode == H5FD_MPIO_COLLECTIVE_IO); + + sprintf(test_title, "parallel vector write test 6 -- %s / col op / col I/O", vfd_name); + } + + TESTING(test_title); + } + + show_progress = ((show_progress) && (mpi_rank == 0)); + + if (show_progress) + HDfprintf(stdout, "\n%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 1) Open the test file with the specified VFD, set the eoa, and setup the dxpl */ + if (pass) { + + eoa = (haddr_t)mpi_size * (haddr_t)INTS_PER_RANK * (haddr_t)(sizeof(int32_t)); + + setup_vfd_test_file(file_name_id, filename, mpi_size, xfer_mode, coll_opt_mode, vfd_name, eoa, &lf, + &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 2) Using rank zero, write the entire negative_fi_buf to + * the file. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (mpi_rank == 0) { + + if (H5FDwrite(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)zero_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite() on rank 0 failed.\n"; + } + } + } + + /* 3) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 4) For each rank, define base_index equal to: + * + * mpi_rank * INTS_PER_RANK + * + * and define base_addr equal to + * + * base_index * sizeof(int32_t). + * + * Setup a vector write from increasing_fi_buf between + * base_addr and base_addr + INTS_PER_RANK * + * sizeof(int32_t) - 1 that writes every 16th integer + * located in that range starting at base_addr. + * Use a sizes[] array of length 2, with sizes[0] set + * to sizeof(int32_t), and sizes[1] = 0. + * + * Write the integers into the corresponding locations in + * the file. + */ + if (pass) { + + base_index = (mpi_rank * INTS_PER_RANK); + base_addr = (haddr_t)base_index * (haddr_t)sizeof(int32_t); + + count = INTS_PER_RANK / 16; + sizes[0] = sizeof(int32_t); + sizes[1] = 0; + + for (i = 0; i < INTS_PER_RANK / 16; i++) { + + types[i] = H5FD_MEM_DRAW; + addrs[i] = base_addr + ((haddr_t)(16 * i) * (haddr_t)sizeof(int32_t)); + bufs[i] = (void *)(&(increasing_fi_buf[base_index + (i * 16)])); + } + + if (H5FDwrite_vector(lf, dxpl_id, count, types, addrs, sizes, bufs) < 0) { + + pass = FALSE; + failure_mssg = "H5FDwrite_vector() failed (1).\n"; + } + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 5) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 6) On each rank, read the entire file into the read_fi_buf, + * and compare against zero_fi_buf, and increasing_fi_buf + * as appropriate. Report failure if any differences are + * detected. + */ + if (pass) { + + size_t image_size = (size_t)mpi_size * (size_t)INTS_PER_RANK * sizeof(int32_t); + + if (H5FDread(lf, H5FD_MEM_DRAW, H5P_DEFAULT, (haddr_t)0, image_size, (void *)read_fi_buf) < 0) { + + pass = FALSE; + failure_mssg = "H5FDread() failed.\n"; + } + + for (i = 0; ((pass) && (i < mpi_size * INTS_PER_RANK)); i++) { + + if (i % 16 == 0) { + + if (read_fi_buf[i] != increasing_fi_buf[i]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (1)"; + } + } + else if (read_fi_buf[i] != zero_fi_buf[i]) { + + pass = FALSE; + failure_mssg = "unexpected data read from file (2)"; + } + } + } /* end if */ + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 7) Barrier */ + + if (pass) { + + MPI_Barrier(MPI_COMM_WORLD); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* 8) Close the test file and delete it (on rank 0 only). + * Close FAPL and DXPL. + */ + + if (pass) { + + takedown_vfd_test_file(mpi_rank, filename, &lf, &fapl_id, &dxpl_id); + } + + if (show_progress) + HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass); + + /* report results */ + if (mpi_rank == 0) { + + if (pass) { + + PASSED(); + } + else { + + H5_FAILED(); + + if (show_progress) { + HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", fcn_name, failure_mssg); + } + } + } + + return (!pass); + +} /* vector_write_test_6() */ + +/*------------------------------------------------------------------------- + * Function: main + * + * Purpose: Run parallel VFD tests. + * + * Return: Success: 0 + * + * Failure: 1 + * + * Programmer: John Mainzer + * 3/2621/ + * + * Modifications: + * + *------------------------------------------------------------------------- + */ + +int +main(int argc, char **argv) +{ + unsigned nerrs = 0; + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Info info = MPI_INFO_NULL; + int mpi_size; + int mpi_rank; + + MPI_Init(&argc, &argv); + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + /* Attempt to turn off atexit post processing so that in case errors + * occur during the test and the process is aborted, it will not hang + * in the atexit post processing. If it does, it may try to make MPI + * calls which may not work. + */ + if (H5dont_atexit() < 0) + HDprintf("%d:Failed to turn off atexit processing. Continue.\n", mpi_rank); + + H5open(); + + if (mpi_rank == 0) { + HDprintf("=========================================\n"); + HDprintf("Parallel virtual file driver (VFD) tests\n"); + HDprintf(" mpi_size = %d\n", mpi_size); + HDprintf("=========================================\n"); + } + + if (mpi_size < 2) { + if (mpi_rank == 0) + HDprintf(" Need at least 2 processes. Exiting.\n"); + goto finish; + } + + alloc_and_init_file_images(mpi_size); + + if (!pass) { + + HDprintf("\nAllocation and initialize of file image buffers failed. Test aborted.\n"); + } + + MPI_Barrier(MPI_COMM_WORLD); + + // sleep(60); + + nerrs += + vector_read_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_read_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_read_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_read_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_read_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += vector_read_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_1(0, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_2(1, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_3(2, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_4(3, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_5(4, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + + nerrs += + vector_write_test_6(5, mpi_rank, mpi_size, H5FD_MPIO_INDEPENDENT, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_6(5, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDIVIDUAL_IO, "mpio"); + nerrs += + vector_write_test_6(5, mpi_rank, mpi_size, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_COLLECTIVE_IO, "mpio"); + +finish: + + /* make sure all processes are finished before final report, cleanup + * and exit. + */ + MPI_Barrier(MPI_COMM_WORLD); + + if (mpi_rank == 0) { /* only process 0 reports */ + HDprintf("===================================\n"); + if (nerrs > 0) + HDprintf("***parallel vfd tests detected %d failures***\n", nerrs); + else + HDprintf("parallel vfd tests finished with no failures\n"); + HDprintf("===================================\n"); + } + + /* discard the file image buffers */ + free_file_images(); + + /* close HDF5 library */ + H5close(); + + /* MPI_Finalize must be called AFTER H5close which may use MPI calls */ + MPI_Finalize(); + + /* cannot just return (nerrs) because exit code is limited to 1byte */ + return (nerrs > 0); + +} /* main() */