[svn-r13997] Moved H5D_istore_lock and H5D_istore_unlock from H5D_istore_writevv and H5D_istore_readvv to

H5D_chunk_write and H5D_chunk_read to avoid frequent lock and unlock and to improve some
performance.

Tested with h5committest on THG machines.
This commit is contained in:
Raymond Lu 2007-07-21 14:50:43 -05:00
parent 774ee2fbf0
commit 7f2a3a97c3
9 changed files with 304 additions and 179 deletions

View File

@ -139,7 +139,7 @@ ssize_t
H5D_compact_readvv(const H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[],
void *buf)
haddr_t UNUSED addr, void UNUSED *pointer/*in*/, void *buf)
{
ssize_t ret_value; /* Return value */
@ -181,7 +181,7 @@ ssize_t
H5D_compact_writevv(const H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[],
const void *buf)
haddr_t UNUSED addr, void UNUSED *pointer/*in*/, const void *buf)
{
ssize_t ret_value; /* Return value */

View File

@ -359,8 +359,8 @@ H5D_contig_write(H5D_t *dset, const H5D_dxpl_cache_t *dxpl_cache,
assert (buf);
H5D_BUILD_IO_INFO(&io_info,dset,dxpl_cache,dxpl_id,store);
if(H5D_contig_writevv(&io_info, (size_t)1, &dset_curr_seq, &dset_len,
&dset_off, (size_t)1, &mem_curr_seq, &mem_len, &mem_off, buf) < 0)
if(H5D_contig_writevv(&io_info, (size_t)1, &dset_curr_seq, &dset_len, &dset_off,
(size_t)1, &mem_curr_seq, &mem_len, &mem_off, (haddr_t)0, NULL, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vector write failed")
done:
@ -390,7 +390,7 @@ ssize_t
H5D_contig_readvv(const H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *_buf)
haddr_t UNUSED address, void UNUSED *pointer, void *_buf)
{
H5F_t *file = io_info->dset->oloc.file; /* File for dataset */
H5D_rdcdc_t *dset_contig=&(io_info->dset->shared->cache.contig); /* Cached information about contiguous data */
@ -655,7 +655,7 @@ ssize_t
H5D_contig_writevv(const H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *_buf)
haddr_t UNUSED address, void UNUSED *pointer, const void *_buf)
{
H5F_t *file = io_info->dset->oloc.file; /* File for dataset */
H5D_rdcdc_t *dset_contig=&(io_info->dset->shared->cache.contig); /* Cached information about contiguous data */

View File

@ -256,7 +256,7 @@ ssize_t
H5D_efl_readvv(const H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *_buf)
haddr_t UNUSED address, void UNUSED *pointer/*in*/, void *_buf)
{
const H5O_efl_t *efl=&(io_info->store->efl); /* Pointer to efl info */
unsigned char *buf; /* Pointer to buffer to write */
@ -340,7 +340,7 @@ ssize_t
H5D_efl_writevv(const H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *_buf)
haddr_t UNUSED address, void UNUSED *pointer/*in*/, const void *_buf)
{
const H5O_efl_t *efl=&(io_info->store->efl); /* Pointer to efl info */
const unsigned char *buf; /* Pointer to buffer to write */

View File

@ -821,10 +821,10 @@ H5D_contig_read(H5D_io_info_t *io_info, hsize_t nelmts,
}
else
#endif
{
{
if((io_info->ops.read)(io_info, (size_t)nelmts,
H5T_get_size(dataset->shared->type), file_space, mem_space,
(haddr_t)0, buf/*out*/) < 0)
(haddr_t)0, NULL, buf/*out*/) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "contiguous read failed ");
}
@ -926,7 +926,7 @@ H5D_contig_read(H5D_io_info_t *io_info, hsize_t nelmts,
|| (dataset->shared->layout.type == H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))
|| dataset->shared->dcpl_cache.efl.nused > 0 ||
dataset->shared->layout.type == H5D_COMPACT);
n = H5D_select_fgath(io_info, file_space, &file_iter, smine_nelmts, tconv_buf/*out*/);
n = H5D_select_fgath(io_info, file_space, &file_iter, smine_nelmts, (haddr_t)0, NULL, tconv_buf/*out*/);
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[1].gath_timer), &timer);
@ -1096,7 +1096,7 @@ H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts,
{
if((io_info->ops.write)(io_info, (size_t)nelmts,
H5T_get_size(dataset->shared->type), file_space, mem_space,
(haddr_t)0, buf/*out*/) < 0)
(haddr_t)0, NULL, buf/*out*/) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "contiguous write failed ")
}
@ -1221,9 +1221,8 @@ H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts,
#ifdef H5S_DEBUG
H5_timer_begin(&timer);
#endif
n = H5D_select_fgath(io_info,
file_space, &bkg_iter, smine_nelmts,
bkg_buf/*out*/);
n = H5D_select_fgath(io_info, file_space, &bkg_iter, smine_nelmts,
(haddr_t)0, NULL, bkg_buf/*out*/);
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[0].bkg_timer), &timer);
@ -1253,7 +1252,7 @@ H5D_contig_write(H5D_io_info_t *io_info, hsize_t nelmts,
H5_timer_begin(&timer);
#endif
status = H5D_select_fscat(io_info, file_space, &file_iter, smine_nelmts,
tconv_buf);
(haddr_t)0, NULL, tconv_buf);
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[0].scat_timer), &timer);
io_info->stats->stats[0].scat_nbytes += smine_nelmts * dst_type_size;
@ -1297,6 +1296,12 @@ done:
* Programmer: Raymond Lu
* Thursday, April 10, 2003
*
* Modification:
* Raymond Lu
* 20 July 2007
* Moved H5D_istore_lock and H5D_istore_unlock to this level
* from H5D_istore_readvv to avoid frequent lock and unlock
* and to improve performance.
*-------------------------------------------------------------------------
*/
static herr_t
@ -1320,6 +1325,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
size_t request_nelmts; /*requested strip mine */
hsize_t smine_start; /*strip mine start loc */
size_t n, smine_nelmts; /*elements per strip */
size_t accessed_bytes; /*total accessed size in a chunk */
H5S_sel_iter_t mem_iter; /*memory selection iteration info*/
hbool_t mem_iter_init = FALSE; /*memory selection iteration info has been initialized */
H5S_sel_iter_t bkg_iter; /*background iteration info*/
@ -1330,6 +1336,10 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
uint8_t *tconv_buf = NULL; /*datatype conv buffer */
uint8_t *bkg_buf = NULL; /*background buffer */
H5D_storage_t store; /*union of EFL and chunk pointer in file space */
void *chunk = NULL;
haddr_t chunk_addr; /* Chunk address on disk */
H5D_istore_ud1_t udata; /*B-tree pass-through */
unsigned idx_hint=0; /* Cache index hint */
herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_read)
@ -1383,11 +1393,27 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
store.chunk.offset = chunk_info->coords;
store.chunk.index = chunk_info->index;
/* Load the chunk into cache and lock it. */
chunk_addr = H5D_istore_get_addr(io_info, &udata);
if (H5D_istore_if_load(dataset, chunk_addr)) {
if(NULL == (chunk = H5D_istore_lock(io_info, &udata, FALSE, &idx_hint)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
} else
chunk = NULL;
/* Perform the actual read operation */
if((io_info->ops.read)(io_info, chunk_info->chunk_points,
H5T_get_size(dataset->shared->type), chunk_info->fspace,
chunk_info->mspace, (haddr_t)0, buf) < 0)
chunk_info->mspace, chunk_addr, chunk, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, " chunked read failed")
/* Release the cache lock on the chunk. */
if (H5D_istore_if_load(dataset, chunk_addr)) {
accessed_bytes = chunk_info->chunk_points * H5T_get_size(dataset->shared->type);
if(H5D_istore_unlock(io_info, FALSE, idx_hint, chunk, accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
}
/* Advance to next chunk in list */
chunk_node = H5SL_next(chunk_node);
@ -1492,6 +1518,15 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
store.chunk.offset = chunk_info->coords;
store.chunk.index = chunk_info->index;
/* Load the chunk into cache and lock it. */
chunk_addr = H5D_istore_get_addr(io_info, &udata);
if (H5D_istore_if_load(dataset, chunk_addr)) {
if(NULL == (chunk = H5D_istore_lock(io_info, &udata, FALSE, &idx_hint)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
} else
chunk = NULL;
for (smine_start=0; smine_start<chunk_info->chunk_points; smine_start+=smine_nelmts) {
/* Go figure out how many elements to read from the file */
assert(H5S_SELECT_ITER_NELMTS(&file_iter)==(chunk_info->chunk_points-smine_start));
@ -1509,7 +1544,14 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
HDassert(((dataset->shared->layout.type == H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|| (dataset->shared->layout.type == H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))
|| dataset->shared->dcpl_cache.efl.nused > 0 || dataset->shared->layout.type == H5D_COMPACT);
n = H5D_select_fgath(io_info, chunk_info->fspace, &file_iter, smine_nelmts, tconv_buf/*out*/);
if(chunk) {
n = H5D_select_mgath(chunk, chunk_info->fspace, &file_iter,
smine_nelmts, dxpl_cache, tconv_buf/*out*/);
} else {
n = H5D_select_fgath(io_info, chunk_info->fspace, &file_iter, smine_nelmts,
(haddr_t)0, NULL, tconv_buf/*out*/);
}
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[1].gath_timer), &timer);
@ -1577,6 +1619,13 @@ H5D_chunk_read(H5D_io_info_t *io_info, hsize_t nelmts,
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "scatter failed")
} /* end for */
/* Release the cache lock on the chunk. */
if (H5D_istore_if_load(dataset, chunk_addr)) {
accessed_bytes = chunk_info->chunk_points * H5T_get_size(dataset->shared->type);
if(H5D_istore_unlock(io_info, FALSE, idx_hint, chunk, accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
}
/* Release selection iterators */
if(file_iter_init) {
if(H5S_SELECT_ITER_RELEASE(&file_iter) < 0)
@ -1636,6 +1685,12 @@ done:
* Programmer: Raymond Lu
* Thursday, April 10, 2003
*
* Modification:
* Raymond Lu
* 20 July 2007
* Moved H5D_istore_lock and H5D_istore_unlock to this level
* from H5D_istore_writevv to avoid frequent lock and unlock
* and to improve performance.
*-------------------------------------------------------------------------
*/
static herr_t
@ -1657,6 +1712,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
size_t max_type_size; /* Size of largest source/destination type */
size_t target_size; /*desired buffer size */
size_t request_nelmts; /*requested strip mine */
size_t accessed_bytes; /*total accessed size in a chunk */
hsize_t smine_start; /*strip mine start loc */
size_t n, smine_nelmts; /*elements per strip */
H5S_sel_iter_t mem_iter; /*memory selection iteration info*/
@ -1669,7 +1725,11 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
uint8_t *tconv_buf = NULL; /*datatype conv buffer */
uint8_t *bkg_buf = NULL; /*background buffer */
H5D_storage_t store; /*union of EFL and chunk pointer in file space */
void *chunk = NULL;
haddr_t chunk_addr; /* Chunk address on disk */
H5D_istore_ud1_t udata; /*B-tree pass-through */
unsigned idx_hint=0; /* Cache index hint */
hbool_t relax=TRUE; /* Whether whole chunk is selected */
herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_write)
@ -1681,6 +1741,16 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
/* Set dataset storage for I/O info */
io_info->store=&store;
#ifdef H5_HAVE_PARALLEL
/* Additional sanity checks when operating in parallel */
if(IS_H5FD_MPI(dataset->oloc.file)) {
if (chunk_addr==HADDR_UNDEF)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk")
if (dataset->shared->dcpl_cache.pline.nused>0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot write to chunked storage with filters in parallel")
} /* end if */
#endif /* H5_HAVE_PARALLEL */
/*
* If there is no type conversion then write directly from the
* application's buffer. This saves at least one mem-to-mem copy.
@ -1718,12 +1788,33 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
store.chunk.offset = chunk_info->coords;
store.chunk.index = chunk_info->index;
/* Load the chunk into cache. But if the whole chunk is written,
* simply allocate space instead of load the chunk. */
chunk_addr = H5D_istore_get_addr(io_info, &udata);
if (H5D_istore_if_load(dataset, chunk_addr)) {
accessed_bytes = chunk_info->chunk_points * H5T_get_size(dataset->shared->type);
if(accessed_bytes != dataset->shared->layout.u.chunk.size)
relax=FALSE;
if(NULL == (chunk = H5D_istore_lock(io_info, &udata, relax, &idx_hint)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
} else
chunk = NULL;
/* Perform the actual read operation */
if((io_info->ops.write)(io_info, chunk_info->chunk_points,
H5T_get_size(dataset->shared->type), chunk_info->fspace,
chunk_info->mspace, (haddr_t)0, buf) < 0)
chunk_info->mspace, chunk_addr, chunk, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, " chunked write failed")
/* Release the cache lock on the chunk. */
if (H5D_istore_if_load(dataset, chunk_addr)) {
if(H5D_istore_unlock(io_info, TRUE, idx_hint, chunk, accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
}
relax=TRUE;
/* Advance to next chunk in list */
chunk_node = H5SL_next(chunk_node);
} /* end while */
@ -1830,6 +1921,25 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
store.chunk.offset = chunk_info->coords;
store.chunk.index = chunk_info->index;
/* Load the chunk into cache. But if the whole chunk is written,
* simply allocate space instead of load the chunk. */
chunk_addr = H5D_istore_get_addr(io_info, &udata);
if (H5D_istore_if_load(dataset, chunk_addr)) {
accessed_bytes = chunk_info->chunk_points * H5T_get_size(dataset->shared->type);
if(accessed_bytes != dataset->shared->layout.u.chunk.size)
relax=FALSE;
if(relax) {
accessed_bytes = H5S_GET_SELECT_NPOINTS(chunk_info->mspace)*H5T_get_size(mem_type);
if(accessed_bytes != dataset->shared->layout.u.chunk.size)
relax = FALSE;
}
if(NULL == (chunk = H5D_istore_lock(io_info, &udata, relax, &idx_hint)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
} else
chunk = NULL;
for (smine_start=0; smine_start<chunk_info->chunk_points; smine_start+=smine_nelmts) {
/* Go figure out how many elements to read from the file */
assert(H5S_SELECT_ITER_NELMTS(&file_iter)==(chunk_info->chunk_points-smine_start));
@ -1868,8 +1978,13 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
#ifdef H5S_DEBUG
H5_timer_begin(&timer);
#endif
n = H5D_select_fgath(io_info, chunk_info->fspace, &bkg_iter, smine_nelmts,
bkg_buf/*out*/);
if(chunk) {
n = H5D_select_mgath(chunk, chunk_info->fspace, &bkg_iter,
smine_nelmts, dxpl_cache, bkg_buf/*out*/);
} else {
n = H5D_select_fgath(io_info, chunk_info->fspace, &bkg_iter, smine_nelmts,
(haddr_t)0, NULL, bkg_buf/*out*/);
}
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[0].bkg_timer), &timer);
@ -1900,7 +2015,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
#endif
status = H5D_select_fscat(io_info,
chunk_info->fspace, &file_iter, smine_nelmts,
tconv_buf);
chunk_addr, chunk, tconv_buf);
#ifdef H5S_DEBUG
H5_timer_end(&(io_info->stats->stats[0].scat_timer), &timer);
@ -1911,6 +2026,14 @@ H5D_chunk_write(H5D_io_info_t *io_info, hsize_t nelmts,
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "scatter failed")
} /* end for */
/* Release the cache lock on the chunk. */
if (H5D_istore_if_load(dataset, chunk_addr)) {
accessed_bytes = chunk_info->chunk_points * H5T_get_size(dataset->shared->type);
if(H5D_istore_unlock(io_info, TRUE, idx_hint, chunk, accessed_bytes) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
}
relax=TRUE;
/* Release selection iterators */
if(file_iter_init) {
if(H5S_SELECT_ITER_RELEASE(&file_iter) < 0)

View File

@ -128,37 +128,6 @@ typedef struct H5D_rdcc_ent_t {
} H5D_rdcc_ent_t;
typedef H5D_rdcc_ent_t *H5D_rdcc_ent_ptr_t; /* For free lists */
/*
* B-tree key. A key contains the minimum logical N-dimensional address and
* the logical size of the chunk to which this key refers. The
* fastest-varying dimension is assumed to reference individual bytes of the
* array, so a 100-element 1-d array of 4-byte integers would really be a 2-d
* array with the slow varying dimension of size 100 and the fast varying
* dimension of size 4 (the storage dimensionality has very little to do with
* the real dimensionality).
*
* Only the first few values of the OFFSET and SIZE fields are actually
* stored on disk, depending on the dimensionality.
*
* The chunk's file address is part of the B-tree and not part of the key.
*/
typedef struct H5D_istore_key_t {
size_t nbytes; /*size of stored data */
hsize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/
unsigned filter_mask; /*excluded filters */
} H5D_istore_key_t;
/*
* Common data exchange structure for indexed storage nodes. This structure is
* passed through the B-link tree layer to the methods for the objects
* to which the B-link tree points.
*/
typedef struct H5D_istore_bt_ud_common_t {
/* downward */
H5D_istore_key_t key; /*key values */
const H5O_layout_t *mesg; /*layout message */
} H5D_istore_bt_ud_common_t;
/*
* Data exchange structure for indexed storage nodes. This structure is
* passed through the B-link tree layer to the methods for the objects
@ -169,12 +138,6 @@ typedef struct H5D_istore_bt_ud_common_t {
*/
typedef H5D_istore_bt_ud_common_t H5D_istore_ud0_t;
/* B-tree callback info for various operations */
typedef struct H5D_istore_ud1_t {
H5D_istore_bt_ud_common_t common; /* Common info for B-tree user data (must be first) */
haddr_t addr; /*file address of chunk */
} H5D_istore_ud1_t;
/* B-tree callback info for iteration to total allocated space */
typedef struct H5D_istore_it_ud1_t {
H5D_istore_bt_ud_common_t common; /* Common info for B-tree user data (must be first) */
@ -1715,7 +1678,7 @@ done:
*
*-------------------------------------------------------------------------
*/
static void *
void *
H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
hbool_t relax, unsigned *idx_hint/*in,out*/)
{
@ -1781,6 +1744,10 @@ H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
#endif
if(NULL == (chunk = H5D_istore_chunk_alloc(chunk_size, pline)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
/* In the case that some dataset functions look through this data,
* clear it to all 0s. */
HDmemset(chunk, 0, chunk_size);
} /* end if */
else {
@ -1897,7 +1864,7 @@ H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
ent->offset[u] = io_info->store->chunk.offset[u];
ent->rd_count = chunk_size;
ent->wr_count = chunk_size;
ent->chunk = chunk;
ent->chunk = (uint8_t*)chunk;
/* Add it to the cache */
assert(NULL==rdcc->slot[idx]);
@ -1998,9 +1965,9 @@ done:
*
*-------------------------------------------------------------------------
*/
static herr_t
herr_t
H5D_istore_unlock(const H5D_io_info_t *io_info,
hbool_t dirty, unsigned idx_hint, uint8_t *chunk, size_t naccessed)
hbool_t dirty, unsigned idx_hint, void *chunk, size_t naccessed)
{
const H5O_layout_t *layout=&(io_info->dset->shared->layout); /* Dataset layout */
const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk);
@ -2029,7 +1996,7 @@ H5D_istore_unlock(const H5D_io_info_t *io_info,
assert(layout->u.chunk.size>0);
H5_ASSIGN_OVERFLOW(x.chunk_size,layout->u.chunk.size,hsize_t,size_t);
x.alloc_size = x.chunk_size;
x.chunk = chunk;
x.chunk = (uint8_t*)chunk;
if (H5D_istore_flush_entry(io_info, &x, TRUE)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer")
@ -2061,6 +2028,48 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_istore_unlock() */
/*-------------------------------------------------------------------------
* Function: H5D_istore_if_load
*
* Purpose: A small internal function to if it's necessary to load the
* chunk into cache. If the chunk is too large to load into
* the cache and it has no filters in the pipeline (i.e. not
* compressed) and if the address for the chunk has been
* defined, then don't load the chunk into the cache, just
* read the data from it directly. If MPI based VFD is used,
* must bypass the chunk-cache scheme because other MPI
* processes could be writing to other elements in the same
* chunk. Do a direct read-through of only the elements
* requested.
*
* Return: TRUE or FALSE
*
* Programmer: Raymond Lu
* 17 July 2007
*
*-------------------------------------------------------------------------
*/
hbool_t
H5D_istore_if_load(H5D_t *dataset, haddr_t caddr)
{
hbool_t ret_value;
FUNC_ENTER_NOAPI_NOINIT(H5D_istore_if_load)
assert(dataset);
if (dataset->shared->dcpl_cache.pline.nused==0 && ((dataset->shared->layout.u.chunk.size >
dataset->shared->cache.chunk.nbytes && caddr!=HADDR_UNDEF)
|| (IS_H5FD_MPI(dataset->oloc.file) && (H5F_ACC_RDWR &
H5F_get_intent(dataset->oloc.file))))) {
ret_value = FALSE;
} else
ret_value = TRUE;
FUNC_LEAVE_NOAPI(ret_value)
}
/*-------------------------------------------------------------------------
* Function: H5D_istore_readvv
@ -2073,19 +2082,25 @@ done:
* Programmer: Quincey Koziol
* Wednesday, May 7, 2003
*
* Modification:
* Raymond Lu
* 20 July 2007
* Moved H5D_istore_lock and H5D_istore_unlock to H5D_chunk_read
* from this function to avoid frequent lock and unlock.
*
*-------------------------------------------------------------------------
*/
ssize_t
H5D_istore_readvv(const H5D_io_info_t *io_info,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *buf)
haddr_t chunk_addr, void *chunk, void *buf)
{
H5D_t *dset=io_info->dset; /* Local pointer to the dataset info */
H5D_istore_ud1_t udata; /*B-tree pass-through */
haddr_t chunk_addr; /* Chunk address on disk */
size_t u; /* Local index variables */
ssize_t ret_value; /* Return value */
ssize_t naccessed; /* Number of bytes accessed in chunk */
FUNC_ENTER_NOAPI(H5D_istore_readvv, FAIL)
@ -2106,9 +2121,7 @@ H5D_istore_readvv(const H5D_io_info_t *io_info,
HDfprintf(stderr,"%s: io_info->store->chunk.offset={",FUNC);
for(u=0; u<dset->shared->layout.u.chunk.ndims; u++)
HDfprintf(stderr,"%Hd%s",io_info->store->chunk.offset[u],(u<(dset->shared->layout.u.chunk.ndims-1) ? ", " : "}\n"));
#endif /* QAK */
chunk_addr = H5D_istore_get_addr(io_info, &udata);
#ifdef QAK
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Zu\n",FUNC,chunk_addr,dset->shared->layout.u.chunk.size);
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
@ -2128,8 +2141,7 @@ HDfprintf(stderr,"%s: buf=%p\n",FUNC,buf);
* writing to other elements in the same chunk. Do a direct
* read-through of only the elements requested.
*/
if (dset->shared->dcpl_cache.pline.nused==0 && ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes && chunk_addr!=HADDR_UNDEF)
|| (IS_H5FD_MPI(dset->oloc.file) && (H5F_ACC_RDWR & H5F_get_intent(dset->oloc.file))))) {
if (!H5D_istore_if_load(dset, chunk_addr)) {
H5D_io_info_t chk_io_info; /* Temporary I/O info object */
H5D_storage_t chk_store; /* Chunk storage information */
@ -2141,12 +2153,11 @@ HDfprintf(stderr,"%s: buf=%p\n",FUNC,buf);
H5D_BUILD_IO_INFO(&chk_io_info,dset,io_info->dxpl_cache,io_info->dxpl_id,&chk_store);
/* Do I/O directly on chunk without reading it into the cache */
if ((ret_value=H5D_contig_readvv(&chk_io_info, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0)
if ((ret_value=H5D_contig_readvv(&chk_io_info, chunk_max_nseq, chunk_curr_seq, chunk_len_arr,
chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, (haddr_t)0, NULL, buf))<0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data to file")
} /* end if */
else {
uint8_t *chunk; /* Pointer to cached chunk in memory */
unsigned idx_hint=0; /* Cache index hint */
ssize_t naccessed; /* Number of bytes accessed in chunk */
/* If the chunk address is not defined, check if the fill value is
@ -2229,20 +2240,11 @@ HDfprintf(stderr,"%s: buf=%p\n",FUNC,buf);
} /* end if */
} /* end if */
/*
* Lock the chunk, copy from application to chunk, then unlock the
* chunk.
*/
if(NULL == (chunk = H5D_istore_lock(io_info, &udata, FALSE, &idx_hint)))
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
/* Use the vectorized memory copy routine to do actual work */
if((naccessed = H5V_memcpyvv(buf, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, chunk, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr)) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "vectorized memcpy failed")
H5_CHECK_OVERFLOW(naccessed, ssize_t, size_t);
if(H5D_istore_unlock(io_info, FALSE, idx_hint, chunk, (size_t)naccessed) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to unlock raw data chunk")
/* Set return value */
ret_value = naccessed;
@ -2264,17 +2266,22 @@ done:
* Programmer: Quincey Koziol
* Friday, May 2, 2003
*
* Modification:
* Raymond Lu
* 20 July 2007
* Moved H5D_istore_lock and H5D_istore_unlock to H5D_chunk_write
* from this function to avoid frequent lock and unlock.
*
*-------------------------------------------------------------------------
*/
ssize_t
H5D_istore_writevv(const H5D_io_info_t *io_info,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[], hsize_t chunk_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *buf)
haddr_t chunk_addr, void *chunk, const void *buf)
{
H5D_t *dset = io_info->dset; /* Local pointer to the dataset info */
H5D_istore_ud1_t udata; /*B-tree pass-through */
haddr_t chunk_addr; /* Chunk address on disk */
size_t u; /* Local index variables */
ssize_t ret_value; /* Return value */
@ -2292,14 +2299,11 @@ H5D_istore_writevv(const H5D_io_info_t *io_info,
HDassert(mem_offset_arr);
HDassert(buf);
/* Get the address of this chunk on disk */
#ifdef QAK
HDfprintf(stderr,"%s: io_info->store->chunk.offset={",FUNC);
for(u=0; u<dset->shared->layout.u.chunk.ndims; u++)
HDfprintf(stderr,"%Hd%s",io_info->store->chunk.offset[u],(u<(dset->shared->layout.u.chunk.ndims-1) ? ", " : "}\n"));
#endif /* QAK */
chunk_addr = H5D_istore_get_addr(io_info, &udata);
#ifdef QAK
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Zu\n",FUNC,chunk_addr,dset->shared->layout.u.chunk.size);
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
@ -2318,18 +2322,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* writing to other elements in the same chunk. Do a direct
* write-through of only the elements requested.
*/
#ifdef H5_HAVE_PARALLEL
/* Additional sanity checks when operating in parallel */
if(IS_H5FD_MPI(dset->oloc.file)) {
if (chunk_addr==HADDR_UNDEF)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk")
if (dset->shared->dcpl_cache.pline.nused>0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot write to chunked storage with filters in parallel")
} /* end if */
#endif /* H5_HAVE_PARALLEL */
if (dset->shared->dcpl_cache.pline.nused==0 && ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes && chunk_addr!=HADDR_UNDEF)
|| (IS_H5FD_MPI(dset->oloc.file) && (H5F_ACC_RDWR & H5F_get_intent(dset->oloc.file))))) {
if (!H5D_istore_if_load(dset, chunk_addr)) {
H5D_io_info_t chk_io_info; /* Temporary I/O info object */
H5D_storage_t chk_store; /* Chunk storage information */
@ -2341,56 +2334,17 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
H5D_BUILD_IO_INFO(&chk_io_info,dset,io_info->dxpl_cache,io_info->dxpl_id,&chk_store);
/* Do I/O directly on chunk without reading it into the cache */
if ((ret_value=H5D_contig_writevv(&chk_io_info, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0)
if ((ret_value=H5D_contig_writevv(&chk_io_info, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, (haddr_t)0, NULL, buf))<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
} /* end if */
else {
uint8_t *chunk; /* Pointer to cached chunk in memory */
unsigned idx_hint=0; /* Cache index hint */
ssize_t naccessed; /* Number of bytes accessed in chunk */
size_t total_bytes; /* Total # of bytes accessed on disk & memory */
hbool_t relax; /* Whether whole chunk is selected */
/*
* Lock the chunk, copy from application to chunk, then unlock the
* chunk.
*/
#ifdef OLD_WAY
/* Note that this is technically OK, since eventually all the data in the chunk
* will be overwritten. However, it seems risky and a better approach would
* be to lock the chunk in the dataset I/O routine (setting the relax flag
* appropriately) and then unlock it after all the I/O the chunk was finished. -QAK
*/
if(chunk_max_nseq==1 && chunk_len_arr[0] == dset->shared->layout.u.chunk.size)
relax = TRUE;
else
relax = FALSE;
#else /* OLD_WAY */
relax=TRUE;
total_bytes=0;
for(u=*chunk_curr_seq; u<chunk_max_nseq; u++)
total_bytes+=chunk_len_arr[u];
if(total_bytes!=dset->shared->layout.u.chunk.size)
relax=FALSE;
if(relax) {
total_bytes=0;
for(u=*mem_curr_seq; u<mem_max_nseq; u++)
total_bytes+=mem_len_arr[u];
if(total_bytes!=dset->shared->layout.u.chunk.size)
relax=FALSE;
} /* end if */
#endif /* OLD_WAY */
if (NULL==(chunk=H5D_istore_lock(io_info, &udata, relax, &idx_hint)))
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to read raw data chunk")
/* Use the vectorized memory copy routine to do actual work */
if((naccessed=H5V_memcpyvv(chunk,chunk_max_nseq,chunk_curr_seq,chunk_len_arr,chunk_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_len_arr,mem_offset_arr))<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed")
H5_CHECK_OVERFLOW(naccessed,ssize_t,size_t);
if (H5D_istore_unlock(io_info, TRUE, idx_hint, chunk, (size_t)naccessed)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "uanble to unlock raw data chunk")
/* Set return value */
ret_value=naccessed;

View File

@ -338,6 +338,7 @@ H5D_mpio_select_read(H5D_io_info_t *io_info,
const H5S_t UNUSED *file_space,
const H5S_t UNUSED *mem_space,
haddr_t addr,
void UNUSED *pointer,
void *buf/*out*/)
{
herr_t ret_value = SUCCEED;
@ -369,6 +370,7 @@ H5D_mpio_select_write(H5D_io_info_t *io_info,
const H5S_t UNUSED *file_space,
const H5S_t UNUSED *mem_space,
haddr_t addr,
void UNUSED *pointer,
const void *buf)
{
herr_t ret_value = SUCCEED;
@ -1168,15 +1170,13 @@ H5D_multi_chunk_collective_io(H5D_io_info_t *io_info,fm_map *fm,const void *buf,
if(do_write) {
if((io_info->ops.write)(io_info,
chunk_info->chunk_points,H5T_get_size(io_info->dset->shared->type),
chunk_info->fspace,chunk_info->mspace,0,
buf) < 0)
chunk_info->fspace,chunk_info->mspace,0, NULL, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
}
else {
if((io_info->ops.read)(io_info,
chunk_info->chunk_points,H5T_get_size(io_info->dset->shared->type),
chunk_info->fspace,chunk_info->mspace,0,
buf) < 0)
chunk_info->fspace,chunk_info->mspace,0, NULL, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
}
#else
@ -1322,15 +1322,13 @@ H5D_multi_chunk_collective_io_no_opt(H5D_io_info_t *io_info,fm_map *fm,const voi
if(do_write) {
if((io_info->ops.write)(io_info,
chunk_info->chunk_points,H5T_get_size(io_info->dset->shared->type),
chunk_info->fspace,chunk_info->mspace, (hsize_t)0,
buf) < 0)
chunk_info->fspace,chunk_info->mspace, (hsize_t)0, NULL, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
}
else {
if((io_info->ops.read)(io_info,
chunk_info->chunk_points,H5T_get_size(io_info->dset->shared->type),
chunk_info->fspace,chunk_info->mspace, (hsize_t)0,
buf) < 0)
chunk_info->fspace,chunk_info->mspace, (hsize_t)0, NULL, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
}
}
@ -1470,13 +1468,13 @@ H5D_final_collective_io(H5D_io_info_t *io_info,MPI_Datatype*mpi_file_type,MPI_Da
if(do_write) {
if((io_info->ops.write)(io_info,
coll_info->mpi_buf_count,0,NULL,NULL,coll_info->chunk_addr,
buf) < 0)
NULL, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "optimized write failed")
}
else {
if((io_info->ops.read)(io_info,
coll_info->mpi_buf_count,0,NULL,NULL,coll_info->chunk_addr,
buf) < 0)
NULL, buf) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "optimized read failed")
}

View File

@ -71,24 +71,24 @@ struct H5D_io_info_t;
typedef herr_t (*H5D_io_read_func_t)(struct H5D_io_info_t *io_info,
size_t nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space, haddr_t addr,
void *buf/*out*/);
void *chunk, void *buf/*out*/);
/* Write directly from app buffer to file */
typedef herr_t (*H5D_io_write_func_t)(struct H5D_io_info_t *io_info,
size_t nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space, haddr_t addr,
const void *buf);
void *chunk, const void *buf);
/* Function pointers for I/O on particular types of dataset layouts */
typedef ssize_t (*H5D_io_readvv_func_t)(const struct H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *buf);
haddr_t chunk_addr, void *chunk, void *buf);
typedef ssize_t (*H5D_io_writevv_func_t)(const struct H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *buf);
haddr_t chunk_addr, void *chunk, const void *buf);
/* Typedef for raw data I/O framework info */
typedef struct H5D_io_ops_t {
@ -250,6 +250,46 @@ typedef struct {
hbool_t has_vlen_fill_type; /* Whether the datatype for the fill value has a variable-length component */
} H5D_fill_buf_info_t;
/*************************/
/* For chunk lock */
/*************************/
/*
* B-tree key. A key contains the minimum logical N-dimensional address and
* the logical size of the chunk to which this key refers. The
* fastest-varying dimension is assumed to reference individual bytes of the
* array, so a 100-element 1-d array of 4-byte integers would really be a 2-d
* array with the slow varying dimension of size 100 and the fast varying
* dimension of size 4 (the storage dimensionality has very little to do with
* the real dimensionality).
*
* Only the first few values of the OFFSET and SIZE fields are actually
* stored on disk, depending on the dimensionality.
*
* The chunk's file address is part of the B-tree and not part of the key.
*/
typedef struct H5D_istore_key_t {
size_t nbytes; /*size of stored data */
hsize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/
unsigned filter_mask; /*excluded filters */
} H5D_istore_key_t;
/*
* Common data exchange structure for indexed storage nodes. This structure is
* passed through the B-link tree layer to the methods for the objects
* to which the B-link tree points.
*/
typedef struct H5D_istore_bt_ud_common_t {
/* downward */
H5D_istore_key_t key; /*key values */
const H5O_layout_t *mesg; /*layout message */
} H5D_istore_bt_ud_common_t;
/* B-tree callback info for various operations */
typedef struct H5D_istore_ud1_t {
H5D_istore_bt_ud_common_t common; /* Common info for B-tree user data (must be first) */
haddr_t addr; /*file address of chunk */
} H5D_istore_ud1_t;
/*****************************/
/* Package Private Variables */
@ -272,10 +312,10 @@ H5_DLL herr_t H5D_alloc_storage(H5F_t *f, hid_t dxpl_id, H5D_t *dset, H5D_time_a
/* Functions that perform serial I/O operations */
H5_DLL herr_t H5D_select_fscat (H5D_io_info_t *io_info,
const H5S_t *file_space, H5S_sel_iter_t *file_iter, size_t nelmts,
const void *_buf);
haddr_t chunk_addr, void *chunk, const void *_buf);
H5_DLL size_t H5D_select_fgath (H5D_io_info_t *io_info,
const H5S_t *file_space, H5S_sel_iter_t *file_iter, size_t nelmts,
void *buf);
haddr_t chunk_addr, void *chunk, void *buf);
H5_DLL herr_t H5D_select_mscat (const void *_tscat_buf,
const H5S_t *space, H5S_sel_iter_t *iter, size_t nelmts,
const H5D_dxpl_cache_t *dxpl_cache, void *_buf/*out*/);
@ -285,11 +325,11 @@ H5_DLL size_t H5D_select_mgath (const void *_buf,
H5_DLL herr_t H5D_select_read(H5D_io_info_t *io_info,
size_t nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
haddr_t addr, void *buf/*out*/);
haddr_t addr, void *chunk/*in*/, void *buf/*out*/);
H5_DLL herr_t H5D_select_write(H5D_io_info_t *io_info,
size_t nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
haddr_t addr, const void *buf/*out*/);
haddr_t addr, void *chunk/*in*/, const void *buf/*out*/);
/* Functions that operate on contiguous storage */
H5_DLL herr_t H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout);
@ -298,11 +338,11 @@ H5_DLL haddr_t H5D_contig_get_addr(const H5D_t *dset);
H5_DLL ssize_t H5D_contig_readvv(const H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *buf);
haddr_t UNUSED address, void UNUSED *pointer, void *buf);
H5_DLL ssize_t H5D_contig_writevv(const H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *buf);
haddr_t UNUSED address, void UNUSED *pointer, const void *buf);
H5_DLL herr_t H5D_contig_copy(H5F_t *f_src, const H5O_layout_t *layout_src, H5F_t *f_dst,
H5O_layout_t *layout_dst, H5T_t *src_dtype, H5O_copy_t *cpy_info, hid_t dxpl_id);
@ -311,11 +351,11 @@ H5_DLL herr_t H5D_compact_fill(H5D_t *dset, hid_t dxpl_id);
H5_DLL ssize_t H5D_compact_readvv(const H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[],
void *buf);
haddr_t UNUSED addr, void UNUSED *pointer/*in*/, void *buf);
H5_DLL ssize_t H5D_compact_writevv(const H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_size_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_size_arr[], hsize_t mem_offset_arr[],
const void *buf);
haddr_t UNUSED addr, void UNUSED *pointer/*in*/, const void *buf);
H5_DLL herr_t H5D_compact_copy(H5F_t *f_src, H5O_layout_t *layout_src,
H5F_t *f_dst, H5O_layout_t *layout_dst, H5T_t *src_dtype, H5O_copy_t *cpy_info, hid_t dxpl_id);
@ -344,26 +384,32 @@ H5_DLL herr_t H5D_istore_stats (H5D_t *dset, hbool_t headers);
H5_DLL ssize_t H5D_istore_readvv(const H5D_io_info_t *io_info,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[],
hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq,
size_t mem_len_arr[], hsize_t mem_offset_arr[], void *buf);
size_t mem_len_arr[], hsize_t mem_offset_arr[], haddr_t chunk_addr, void *chunk, void *buf);
H5_DLL ssize_t H5D_istore_writevv(const H5D_io_info_t *io_info,
size_t chunk_max_nseq, size_t *chunk_curr_seq, size_t chunk_len_arr[],
hsize_t chunk_offset_arr[], size_t mem_max_nseq, size_t *mem_curr_seq,
size_t mem_len_arr[], hsize_t mem_offset_arr[], const void *buf);
size_t mem_len_arr[], hsize_t mem_offset_arr[], haddr_t chunk_addr, void *chunk,
const void *buf);
H5_DLL haddr_t H5D_istore_get_addr(const H5D_io_info_t *io_info,
struct H5D_istore_ud1_t *_udata);
H5_DLL herr_t H5D_istore_copy(H5F_t *f_src, H5O_layout_t *layout_src,
H5F_t *f_dst, H5O_layout_t *layout_dst, H5T_t *src_dtype,
H5O_copy_t *cpy_info, H5O_pline_t *pline, hid_t dxpl_id);
H5_DLL void * H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
hbool_t relax, unsigned *idx_hint/*in,out*/);
H5_DLL herr_t H5D_istore_unlock(const H5D_io_info_t *io_info,
hbool_t dirty, unsigned idx_hint, void *chunk, size_t naccessed);
H5_DLL hbool_t H5D_istore_if_load(H5D_t *dataset, haddr_t caddr);
/* Functions that operate on external file list (efl) storage */
H5_DLL ssize_t H5D_efl_readvv(const H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
void *buf);
haddr_t UNUSED addr, void UNUSED *pointer/*in*/, void *buf);
H5_DLL ssize_t H5D_efl_writevv(const H5D_io_info_t *io_info,
size_t dset_max_nseq, size_t *dset_curr_seq, size_t dset_len_arr[], hsize_t dset_offset_arr[],
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
const void *buf);
haddr_t UNUSED addr, void UNUSED *pointer/*in*/, const void *buf);
/* Functions that perform fill value operations on datasets */
H5_DLL herr_t H5D_fill(const void *fill, const H5T_t *fill_type, void *buf,
@ -390,13 +436,13 @@ H5_DLL herr_t H5D_fill_term(H5D_fill_buf_info_t *fb_info);
H5_DLL herr_t H5D_mpio_select_read(H5D_io_info_t *io_info,
size_t nelmts, size_t elmt_size,
const struct H5S_t *file_space, const struct H5S_t *mem_space,
haddr_t addr,void *buf/*out*/);
haddr_t addr, void UNUSED *pointer/*in*/, void *buf/*out*/);
/* MPI-IO function to read , it will select either regular or irregular read */
H5_DLL herr_t H5D_mpio_select_write(H5D_io_info_t *io_info,
size_t nelmts, size_t elmt_size,
const struct H5S_t *file_space, const struct H5S_t *mem_space,
haddr_t addr,const void *buf);
haddr_t addr, void UNUSED *pointer/*in*/, const void *buf);
/* MPI-IO function to handle contiguous collective IO */
H5_DLL herr_t

View File

@ -80,7 +80,7 @@ H5FL_SEQ_DEFINE_STATIC(hsize_t);
herr_t
H5D_select_fscat (H5D_io_info_t *io_info,
const H5S_t *space, H5S_sel_iter_t *iter, size_t nelmts,
const void *_buf)
haddr_t chunk_addr, void *chunk/*in*/, const void *_buf)
{
const uint8_t *buf=_buf; /* Alias for pointer arithmetic */
hsize_t _off[H5D_IO_VECTOR_SIZE]; /* Array to store sequence offsets */
@ -129,7 +129,7 @@ H5D_select_fscat (H5D_io_info_t *io_info,
mem_off=0;
/* Write sequence list out */
if((*io_info->ops.writevv)(io_info, nseq, &dset_curr_seq, len, off, (size_t)1, &mem_curr_seq, &mem_len, &mem_off, buf) < 0)
if((*io_info->ops.writevv)(io_info, nseq, &dset_curr_seq, len, off, (size_t)1, &mem_curr_seq, &mem_len, &mem_off, chunk_addr, chunk, buf) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_WRITEERROR, FAIL, "write error");
/* Update buffer */
@ -174,9 +174,9 @@ done:
size_t
H5D_select_fgath (H5D_io_info_t *io_info,
const H5S_t *space, H5S_sel_iter_t *iter, size_t nelmts,
void *_buf/*out*/)
haddr_t chunk_addr, void *chunk/*in*/, void *_buf/*out*/)
{
uint8_t *buf=_buf; /* Alias for pointer arithmetic */
uint8_t *buf=(uint8_t*)_buf; /* Alias for pointer arithmetic */
hsize_t _off[H5D_IO_VECTOR_SIZE]; /* Array to store sequence offsets */
hsize_t *off=NULL; /* Pointer to sequence offsets */
hsize_t mem_off; /* Offset in memory */
@ -224,7 +224,8 @@ H5D_select_fgath (H5D_io_info_t *io_info,
mem_off=0;
/* Read sequence list in */
if((*io_info->ops.readvv)(io_info, nseq, &dset_curr_seq, len, off, (size_t)1, &mem_curr_seq, &mem_len, &mem_off, buf) < 0)
if((*io_info->ops.readvv)(io_info, nseq, &dset_curr_seq, len, off, (size_t)1,
&mem_curr_seq, &mem_len, &mem_off, chunk_addr, chunk, buf) < 0)
HGOTO_ERROR(H5E_DATASPACE, H5E_READERROR, 0, "read error");
/* Update buffer */
@ -433,7 +434,7 @@ herr_t
H5D_select_read(H5D_io_info_t *io_info,
size_t nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
haddr_t UNUSED addr,
haddr_t addr, void *chunk/*in*/,
void *buf/*out*/)
{
H5S_sel_iter_t mem_iter; /* Memory selection iteration info */
@ -525,7 +526,7 @@ H5D_select_read(H5D_io_info_t *io_info,
if ((tmp_file_len=(*io_info->ops.readvv)(io_info,
file_nseq, &curr_file_seq, file_len, file_off,
mem_nseq, &curr_mem_seq, mem_len, mem_off,
buf))<0)
addr, chunk, buf))<0)
HGOTO_ERROR(H5E_DATASPACE, H5E_READERROR, FAIL, "read error");
/* Decrement number of elements left to process */
@ -577,7 +578,7 @@ herr_t
H5D_select_write(H5D_io_info_t *io_info,
size_t nelmts, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
haddr_t UNUSED addr,
haddr_t addr, void *chunk/*in*/,
const void *buf/*out*/)
{
H5S_sel_iter_t mem_iter; /* Memory selection iteration info */
@ -668,7 +669,7 @@ H5D_select_write(H5D_io_info_t *io_info,
if ((tmp_file_len=(*io_info->ops.writevv)(io_info,
file_nseq, &curr_file_seq, file_len, file_off,
mem_nseq, &curr_mem_seq, mem_len, mem_off,
buf))<0)
addr, chunk, buf))<0)
HGOTO_ERROR(H5E_DATASPACE, H5E_WRITEERROR, FAIL, "write error");
/* Decrement number of elements left to process */

View File

@ -1950,6 +1950,8 @@ UNUSED
data_corrupt[1] = 33;
data_corrupt[2] = 27;
/* Temporarily disable this test because the changes in chunk caching conflicts with
* the way this test is conducted. -slu 2007/7/20 */
if (H5Zregister (H5Z_CORRUPT)<0) goto error;
if(H5Pset_filter(dc, H5Z_FILTER_CORRUPT, 0, (size_t)3, data_corrupt) < 0) goto error;
if(test_filter_internal(file,DSET_FLETCHER32_NAME_3,dc,DISABLE_FLETCHER32,DATA_CORRUPTED,&fletcher32_size)<0) goto error;
@ -1961,6 +1963,7 @@ UNUSED
/* Clean up objects used for this test */
if (H5Pclose (dc)<0) goto error;
#else /* H5_HAVE_FILTER_FLETCHER32 */
TESTING("fletcher32 checksum");
SKIPPED();