mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-03-31 17:10:47 +08:00
[svn-r15015] Description:
Detect chunks that are >4GB before dataset gets created and return error to application. Tweak lots of internal variables that hold the chunk size/dimensions to use an 'uint32_t', instead of a 'size_t', so that the integer size is constant. Correct a number of our tests which were creating datasets with chunks that were >4GB and add some specific tests for >4GB chunk size detection. Minor whitespace & other code cleanups. Tested on: Mac OS X/32 10.5.2 (amazon) Forthcoming testing on other platforms...
This commit is contained in:
parent
afbdbb8e93
commit
22f48585bd
@ -189,16 +189,16 @@ H5D_chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_info
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_BADSELECT, FAIL, "unable to normalize dataspace by offset")
|
||||
|
||||
/* Decide the number of chunks in each dimension*/
|
||||
for(u=0; u<f_ndims; u++) {
|
||||
for(u = 0; u < f_ndims; u++) {
|
||||
/* Keep the size of the chunk dimensions as hsize_t for various routines */
|
||||
fm->chunk_dim[u]=fm->layout->u.chunk.dim[u];
|
||||
fm->chunk_dim[u] = fm->layout->u.chunk.dim[u];
|
||||
|
||||
/* Round up to the next integer # of chunks, to accomodate partial chunks */
|
||||
fm->chunks[u] = ((fm->f_dims[u]+dataset->shared->layout.u.chunk.dim[u])-1) / dataset->shared->layout.u.chunk.dim[u];
|
||||
fm->chunks[u] = ((fm->f_dims[u] + dataset->shared->layout.u.chunk.dim[u]) - 1) / dataset->shared->layout.u.chunk.dim[u];
|
||||
} /* end for */
|
||||
|
||||
/* Compute the "down" size of 'chunks' information */
|
||||
if(H5V_array_down(f_ndims,fm->chunks,fm->down_chunks) < 0)
|
||||
if(H5V_array_down(f_ndims, fm->chunks, fm->down_chunks) < 0)
|
||||
HGOTO_ERROR(H5E_INTERNAL, H5E_BADVALUE, FAIL, "can't compute 'down' sizes")
|
||||
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
@ -591,16 +591,15 @@ H5D_create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection bound info")
|
||||
|
||||
/* Set initial chunk location & hyperslab size */
|
||||
|
||||
for(u=0; u<fm->f_ndims; u++) {
|
||||
start_coords[u]=(sel_start[u]/fm->layout->u.chunk.dim[u])*fm->layout->u.chunk.dim[u];
|
||||
coords[u]=start_coords[u];
|
||||
end[u]=(coords[u]+fm->chunk_dim[u])-1;
|
||||
for(u = 0; u < fm->f_ndims; u++) {
|
||||
start_coords[u] = (sel_start[u] / fm->layout->u.chunk.dim[u]) * fm->layout->u.chunk.dim[u];
|
||||
coords[u] = start_coords[u];
|
||||
end[u] = (coords[u] + fm->chunk_dim[u]) - 1;
|
||||
} /* end for */
|
||||
|
||||
|
||||
/* Calculate the index of this chunk */
|
||||
if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index) < 0)
|
||||
if(H5V_chunk_index(fm->f_ndims, coords, fm->layout->u.chunk.dim, fm->down_chunks, &chunk_index) < 0)
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
|
||||
|
||||
/* Iterate through each chunk in the dataset */
|
||||
@ -679,17 +678,16 @@ H5D_create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
|
||||
} /* end if */
|
||||
|
||||
/* Get number of elements selected in chunk */
|
||||
if((schunk_points=H5S_GET_SELECT_NPOINTS(tmp_fchunk)) < 0)
|
||||
if((schunk_points = H5S_GET_SELECT_NPOINTS(tmp_fchunk)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection # of elements")
|
||||
H5_ASSIGN_OVERFLOW(new_chunk_info->chunk_points,schunk_points,hssize_t,size_t);
|
||||
H5_ASSIGN_OVERFLOW(new_chunk_info->chunk_points, schunk_points, hssize_t, uint32_t);
|
||||
|
||||
/* Decrement # of points left in file selection */
|
||||
sel_points-=(hsize_t)schunk_points;
|
||||
sel_points -= (hsize_t)schunk_points;
|
||||
|
||||
/* Leave if we are done */
|
||||
if(sel_points==0)
|
||||
if(sel_points == 0)
|
||||
HGOTO_DONE(SUCCEED)
|
||||
assert(sel_points>0);
|
||||
} /* end if */
|
||||
|
||||
/* Increment chunk index */
|
||||
@ -719,7 +717,7 @@ H5D_create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
|
||||
} while(coords[curr_dim]>sel_end[curr_dim]);
|
||||
|
||||
/* Re-Calculate the index of this chunk */
|
||||
if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index) < 0)
|
||||
if(H5V_chunk_index(fm->f_ndims, coords, fm->layout->u.chunk.dim, fm->down_chunks, &chunk_index) < 0)
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
|
||||
} /* end if */
|
||||
} /* end while */
|
||||
@ -867,7 +865,7 @@ H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const
|
||||
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_file_cb)
|
||||
|
||||
/* Calculate the index of this chunk */
|
||||
if(H5V_chunk_index(ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index) < 0)
|
||||
if(H5V_chunk_index(ndims, coords, fm->layout->u.chunk.dim, fm->down_chunks, &chunk_index) < 0)
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
|
||||
|
||||
/* Find correct chunk in file & memory skip list */
|
||||
@ -912,18 +910,18 @@ H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const
|
||||
chunk_info->fspace_shared = FALSE;
|
||||
|
||||
/* Set the memory chunk dataspace */
|
||||
chunk_info->mspace=NULL;
|
||||
chunk_info->mspace = NULL;
|
||||
chunk_info->mspace_shared = FALSE;
|
||||
|
||||
/* Set the number of selected elements in chunk to zero */
|
||||
chunk_info->chunk_points=0;
|
||||
chunk_info->chunk_points = 0;
|
||||
|
||||
/* Compute the chunk's coordinates */
|
||||
for(u=0; u<fm->f_ndims; u++) {
|
||||
H5_CHECK_OVERFLOW(fm->layout->u.chunk.dim[u],hsize_t,hssize_t);
|
||||
chunk_info->coords[u]=(coords[u]/(hssize_t)fm->layout->u.chunk.dim[u])*(hssize_t)fm->layout->u.chunk.dim[u];
|
||||
for(u = 0; u < fm->f_ndims; u++) {
|
||||
H5_CHECK_OVERFLOW(fm->layout->u.chunk.dim[u], hsize_t, hssize_t);
|
||||
chunk_info->coords[u] = (coords[u] / (hssize_t)fm->layout->u.chunk.dim[u]) * (hssize_t)fm->layout->u.chunk.dim[u];
|
||||
} /* end for */
|
||||
chunk_info->coords[fm->f_ndims]=0;
|
||||
chunk_info->coords[fm->f_ndims] = 0;
|
||||
|
||||
/* Insert the new chunk into the skip list */
|
||||
if(H5SL_insert(fm->sel_chunks,chunk_info,&chunk_info->index) < 0) {
|
||||
@ -938,8 +936,8 @@ H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const
|
||||
} /* end else */
|
||||
|
||||
/* Get the coordinates of the element in the chunk */
|
||||
for(u=0; u<fm->f_ndims; u++)
|
||||
coords_in_chunk[u]=coords[u]%fm->layout->u.chunk.dim[u];
|
||||
for(u = 0; u < fm->f_ndims; u++)
|
||||
coords_in_chunk[u] = coords[u] % fm->layout->u.chunk.dim[u];
|
||||
|
||||
/* Add point to file selection for chunk */
|
||||
if(H5S_select_elements(chunk_info->fspace, H5S_SELECT_APPEND, (size_t)1, coords_in_chunk) < 0)
|
||||
@ -979,7 +977,7 @@ H5D_chunk_mem_cb(void UNUSED *elem, hid_t UNUSED type_id, unsigned ndims, const
|
||||
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_mem_cb)
|
||||
|
||||
/* Calculate the index of this chunk */
|
||||
if(H5V_chunk_index(ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index) < 0)
|
||||
if(H5V_chunk_index(ndims, coords, fm->layout->u.chunk.dim, fm->down_chunks, &chunk_index) < 0)
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
|
||||
|
||||
/* Find correct chunk in file & memory skip list */
|
||||
@ -1059,7 +1057,7 @@ H5D_chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr)
|
||||
/* Must bring the whole chunk in if there are any filters */
|
||||
if(dataset->shared->dcpl_cache.pline.nused > 0)
|
||||
ret_value = TRUE;
|
||||
else
|
||||
else {
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
/* If MPI based VFD is used and the file is opened for write access, must
|
||||
* bypass the chunk-cache scheme because other MPI processes could
|
||||
@ -1068,17 +1066,22 @@ H5D_chunk_cacheable(const H5D_io_info_t *io_info, haddr_t caddr)
|
||||
*/
|
||||
if(io_info->using_mpi_vfd && (H5F_ACC_RDWR & H5F_INTENT(dataset->oloc.file)))
|
||||
ret_value = FALSE;
|
||||
else
|
||||
else {
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
/* If the chunk is too large to keep in the cache and if the address
|
||||
* for the chunk has been defined, then don't load the chunk into the
|
||||
* cache, just write the data to it directly.
|
||||
*/
|
||||
if(dataset->shared->layout.u.chunk.size > dataset->shared->cache.chunk.nbytes
|
||||
H5_CHECK_OVERFLOW(dataset->shared->layout.u.chunk.size, uint32_t, size_t);
|
||||
if((size_t)dataset->shared->layout.u.chunk.size > dataset->shared->cache.chunk.nbytes
|
||||
&& H5F_addr_defined(caddr))
|
||||
ret_value = FALSE;
|
||||
else
|
||||
ret_value = TRUE;
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
} /* end else */
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
} /* end else */
|
||||
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
} /* end H5D_chunk_cacheable() */
|
||||
@ -1152,7 +1155,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
|
||||
H5D_io_info_t cpt_io_info; /* Compact I/O info object */
|
||||
H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */
|
||||
hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
|
||||
size_t src_accessed_bytes = 0; /* Total accessed size in a chunk */
|
||||
uint32_t src_accessed_bytes = 0; /* Total accessed size in a chunk */
|
||||
hbool_t skip_missing_chunks = FALSE; /* Whether to skip missing chunks */
|
||||
unsigned idx_hint = 0; /* Cache index hint */
|
||||
herr_t ret_value = SUCCEED; /*return value */
|
||||
@ -1175,7 +1178,7 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
|
||||
ctg_io_info.layout_ops = *H5D_LOPS_CONTIG;
|
||||
|
||||
/* Initialize temporary contiguous storage info */
|
||||
ctg_store.contig.dset_size = (hsize_t)io_info->dset->shared->layout.u.chunk.size;
|
||||
H5_ASSIGN_OVERFLOW(ctg_store.contig.dset_size, io_info->dset->shared->layout.u.chunk.size, uint32_t, hsize_t);
|
||||
|
||||
/* Set up compact I/O info object */
|
||||
HDmemcpy(&cpt_io_info, io_info, sizeof(cpt_io_info));
|
||||
@ -1232,8 +1235,11 @@ H5D_chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
|
||||
else {
|
||||
/* Load the chunk into cache and lock it. */
|
||||
if(H5D_chunk_cacheable(io_info, chunk_addr)) {
|
||||
size_t tmp_src_accessed_bytes; /* Total accessed size in a chunk */
|
||||
|
||||
/* Compute # of bytes accessed in chunk */
|
||||
src_accessed_bytes = chunk_info->chunk_points * type_info->src_type_size;
|
||||
tmp_src_accessed_bytes = chunk_info->chunk_points * type_info->src_type_size;
|
||||
H5_ASSIGN_OVERFLOW(src_accessed_bytes, tmp_src_accessed_bytes, size_t, uint32_t);
|
||||
|
||||
/* Lock the chunk into the cache */
|
||||
if(NULL == (chunk = H5D_istore_lock(io_info, &udata, FALSE, &idx_hint)))
|
||||
@ -1301,7 +1307,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
|
||||
H5D_io_info_t cpt_io_info; /* Compact I/O info object */
|
||||
H5D_storage_t cpt_store; /* Chunk storage information as compact dataset */
|
||||
hbool_t cpt_dirty; /* Temporary placeholder for compact storage "dirty" flag */
|
||||
size_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */
|
||||
uint32_t dst_accessed_bytes = 0; /* Total accessed size in a chunk */
|
||||
unsigned idx_hint = 0; /* Cache index hint */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
|
||||
@ -1319,7 +1325,7 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
|
||||
ctg_io_info.layout_ops = *H5D_LOPS_CONTIG;
|
||||
|
||||
/* Initialize temporary contiguous storage info */
|
||||
ctg_store.contig.dset_size = (hsize_t)io_info->dset->shared->layout.u.chunk.size;
|
||||
H5_ASSIGN_OVERFLOW(ctg_store.contig.dset_size, io_info->dset->shared->layout.u.chunk.size, uint32_t, hsize_t);
|
||||
|
||||
/* Set up compact I/O info object */
|
||||
HDmemcpy(&cpt_io_info, io_info, sizeof(cpt_io_info));
|
||||
@ -1350,9 +1356,11 @@ H5D_chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
|
||||
chunk_addr = H5D_istore_get_addr(io_info, &udata);
|
||||
if(H5D_chunk_cacheable(io_info, chunk_addr)) {
|
||||
hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */
|
||||
size_t tmp_dst_accessed_bytes; /* Total accessed size in a chunk */
|
||||
|
||||
/* Compute # of bytes accessed in chunk */
|
||||
dst_accessed_bytes = chunk_info->chunk_points * type_info->dst_type_size;
|
||||
tmp_dst_accessed_bytes = chunk_info->chunk_points * type_info->dst_type_size;
|
||||
H5_ASSIGN_OVERFLOW(dst_accessed_bytes, tmp_dst_accessed_bytes, size_t, uint32_t);
|
||||
|
||||
/* Determine if we will access all the data in the chunk */
|
||||
if(dst_accessed_bytes != ctg_store.contig.dset_size ||
|
||||
|
14
src/H5Dint.c
14
src/H5Dint.c
@ -1174,6 +1174,7 @@ H5D_create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id,
|
||||
case H5D_CHUNKED:
|
||||
{
|
||||
hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */
|
||||
uint64_t chunk_size; /* Size of chunk in bytes */
|
||||
|
||||
/* Set up layout information */
|
||||
if((ndims = H5S_GET_EXTENT_NDIMS(new_dset->shared->space)) < 0)
|
||||
@ -1208,8 +1209,17 @@ H5D_create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id,
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "chunk size must be <= maximum dimension size for fixed-sized dimensions")
|
||||
|
||||
/* Compute the total size of a chunk */
|
||||
for(u = 1, new_dset->shared->layout.u.chunk.size = new_dset->shared->layout.u.chunk.dim[0]; u < new_dset->shared->layout.u.chunk.ndims; u++)
|
||||
new_dset->shared->layout.u.chunk.size *= new_dset->shared->layout.u.chunk.dim[u];
|
||||
/* (Use 64-bit value to ensure that we can detect >4GB chunks) */
|
||||
for(u = 1, chunk_size = (uint64_t)new_dset->shared->layout.u.chunk.dim[0]; u < new_dset->shared->layout.u.chunk.ndims; u++)
|
||||
chunk_size *= (uint64_t)new_dset->shared->layout.u.chunk.dim[u];
|
||||
|
||||
/* Check for chunk larger than can be represented in 32-bits */
|
||||
/* (Chunk size is encoded in 32-bit value in v1 B-tree records) */
|
||||
if(chunk_size > (uint64_t)0xffffffff)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "chunk size must be < 4GB")
|
||||
|
||||
/* Retain computed chunk size */
|
||||
H5_ASSIGN_OVERFLOW(new_dset->shared->layout.u.chunk.size, chunk_size, uint64_t, uint32_t);
|
||||
|
||||
/* Initialize the chunk cache for the dataset */
|
||||
if(H5D_istore_init(file, new_dset) < 0)
|
||||
|
244
src/H5Distore.c
244
src/H5Distore.c
@ -160,7 +160,7 @@ typedef struct H5D_istore_it_ud4_t {
|
||||
H5T_path_t *tpath_mem_dst; /* Datatype conversion path from memory to dest. file */
|
||||
void *reclaim_buf; /* Buffer for reclaiming data */
|
||||
size_t reclaim_buf_size; /* Reclaim buffer size */
|
||||
size_t nelmts; /* Number of elements in buffer */
|
||||
uint32_t nelmts; /* Number of elements in buffer */
|
||||
H5S_t *buf_space; /* Dataspace describing buffer */
|
||||
|
||||
/* needed for compressed variable-length data */
|
||||
@ -431,7 +431,7 @@ H5D_istore_debug_key(FILE *stream, H5F_t UNUSED *f, hid_t UNUSED dxpl_id, int in
|
||||
|
||||
HDassert(key);
|
||||
|
||||
HDfprintf(stream, "%*s%-*s %Zd bytes\n", indent, "", fwidth, "Chunk size:", key->nbytes);
|
||||
HDfprintf(stream, "%*s%-*s %u bytes\n", indent, "", fwidth, "Chunk size:", (unsigned)key->nbytes);
|
||||
HDfprintf(stream, "%*s%-*s 0x%08x\n", indent, "", fwidth, "Filter mask:", key->filter_mask);
|
||||
HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Logical offset:");
|
||||
for(u = 0; u < udata->mesg->u.chunk.ndims; u++)
|
||||
@ -596,7 +596,7 @@ H5D_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
|
||||
|
||||
/* Allocate new storage */
|
||||
HDassert(udata->nbytes > 0);
|
||||
H5_CHECK_OVERFLOW(udata->nbytes, size_t, hsize_t);
|
||||
H5_CHECK_OVERFLOW(udata->nbytes, uint32_t, hsize_t);
|
||||
if(HADDR_UNDEF == (*addr_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes)))
|
||||
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "couldn't allocate new file storage")
|
||||
udata->addr = *addr_p;
|
||||
@ -772,10 +772,10 @@ H5D_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key,
|
||||
(hsize_t)lt_key->nbytes, (hsize_t)udata->nbytes)))
|
||||
HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "unable to reallocate chunk storage")
|
||||
#else /* OLD_WAY */
|
||||
H5_CHECK_OVERFLOW( lt_key->nbytes ,size_t, hsize_t);
|
||||
if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes)<0)
|
||||
H5_CHECK_OVERFLOW(lt_key->nbytes, uint32_t, hsize_t);
|
||||
if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes) < 0)
|
||||
HGOTO_ERROR(H5E_STORAGE, H5E_CANTFREE, H5B_INS_ERROR, "unable to free chunk")
|
||||
H5_CHECK_OVERFLOW(udata->nbytes ,size_t, hsize_t);
|
||||
H5_CHECK_OVERFLOW(udata->nbytes, uint32_t, hsize_t);
|
||||
if(HADDR_UNDEF == (*new_node_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes)))
|
||||
HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "unable to reallocate chunk")
|
||||
#endif /* OLD_WAY */
|
||||
@ -809,7 +809,7 @@ H5D_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key,
|
||||
/*
|
||||
* Allocate storage for the new chunk
|
||||
*/
|
||||
H5_CHECK_OVERFLOW(udata->nbytes, size_t, hsize_t);
|
||||
H5_CHECK_OVERFLOW(udata->nbytes, uint32_t, hsize_t);
|
||||
if(HADDR_UNDEF == (*new_node_p = H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, (hsize_t)udata->nbytes)))
|
||||
HGOTO_ERROR(H5E_STORAGE, H5E_NOSPACE, H5B_INS_ERROR, "file allocation failed")
|
||||
udata->addr = *new_node_p;
|
||||
@ -874,14 +874,12 @@ H5D_istore_iter_chunkmap (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const void *_lt
|
||||
{
|
||||
H5D_istore_it_ud5_t *udata = (H5D_istore_it_ud5_t *)_udata;
|
||||
const H5D_istore_key_t *lt_key = (const H5D_istore_key_t *)_lt_key;
|
||||
unsigned rank;
|
||||
unsigned rank = udata->common.mesg->u.chunk.ndims - 1;
|
||||
hsize_t chunk_index;
|
||||
int ret_value = H5_ITER_CONT; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI_NOINIT(H5D_istore_iter_chunkmap)
|
||||
|
||||
rank = udata->common.mesg->u.chunk.ndims - 1;
|
||||
|
||||
if(H5V_chunk_index(rank, lt_key->offset, udata->common.mesg->u.chunk.dim, udata->down_chunks, &chunk_index) < 0)
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
|
||||
|
||||
@ -968,13 +966,16 @@ H5D_istore_iter_copy(H5F_t *f_src, hid_t dxpl_id, const void *_lt_key,
|
||||
/* needed for commpressed variable length data */
|
||||
hbool_t is_compressed = FALSE;
|
||||
H5Z_EDC_t edc_read = H5Z_NO_EDC;
|
||||
size_t nbytes = lt_key->nbytes;
|
||||
size_t nbytes; /* Size of chunk (in bytes) */
|
||||
H5Z_cb_t cb_struct;
|
||||
|
||||
int ret_value = H5_ITER_CONT; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI_NOINIT(H5D_istore_iter_copy)
|
||||
|
||||
/* Get 'size_t' local value for number of bytes in chunk */
|
||||
H5_ASSIGN_OVERFLOW(nbytes, lt_key->nbytes, uint32_t, size_t);
|
||||
|
||||
/* Check parameter for type conversion */
|
||||
if(udata->do_convert) {
|
||||
if(H5T_detect_class(udata->dt_src, H5T_VLEN) > 0)
|
||||
@ -1033,12 +1034,12 @@ H5D_istore_iter_copy(H5F_t *f_src, hid_t dxpl_id, const void *_lt_key,
|
||||
hid_t tid_src = udata->tid_src;
|
||||
hid_t tid_dst = udata->tid_dst;
|
||||
hid_t tid_mem = udata->tid_mem;
|
||||
size_t nelmts = udata->nelmts;
|
||||
void *reclaim_buf = udata->reclaim_buf;
|
||||
size_t reclaim_buf_size = udata->reclaim_buf_size;
|
||||
|
||||
/* Convert from source file to memory */
|
||||
if(H5T_convert(tpath_src_mem, tid_src, tid_mem, nelmts, (size_t)0, (size_t)0, buf, NULL, dxpl_id) < 0)
|
||||
H5_CHECK_OVERFLOW(udata->nelmts, uint32_t, size_t);
|
||||
if(H5T_convert(tpath_src_mem, tid_src, tid_mem, (size_t)udata->nelmts, (size_t)0, (size_t)0, buf, NULL, dxpl_id) < 0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5_ITER_ERROR, "datatype conversion failed")
|
||||
|
||||
/* Copy into another buffer, to reclaim memory later */
|
||||
@ -1048,7 +1049,7 @@ H5D_istore_iter_copy(H5F_t *f_src, hid_t dxpl_id, const void *_lt_key,
|
||||
HDmemset(bkg, 0, buf_size);
|
||||
|
||||
/* Convert from memory to destination file */
|
||||
if(H5T_convert(tpath_mem_dst, tid_mem, tid_dst, nelmts, (size_t)0, (size_t)0, buf, bkg, dxpl_id) < 0)
|
||||
if(H5T_convert(tpath_mem_dst, tid_mem, tid_dst, udata->nelmts, (size_t)0, (size_t)0, buf, bkg, dxpl_id) < 0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5_ITER_ERROR, "datatype conversion failed")
|
||||
|
||||
/* Reclaim space from variable length data */
|
||||
@ -1085,7 +1086,7 @@ H5D_istore_iter_copy(H5F_t *f_src, hid_t dxpl_id, const void *_lt_key,
|
||||
if(H5Z_pipeline(pline, 0, &(udata_dst.filter_mask), edc_read,
|
||||
cb_struct, &nbytes, &buf_size, &buf) < 0)
|
||||
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "output pipeline failed")
|
||||
udata_dst.nbytes = nbytes;
|
||||
H5_ASSIGN_OVERFLOW(udata_dst.nbytes, nbytes, size_t, uint32_t);
|
||||
udata->buf = buf;
|
||||
udata->buf_size = buf_size;
|
||||
} /* end if */
|
||||
@ -1277,7 +1278,6 @@ static herr_t
|
||||
H5D_istore_flush_entry(const H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_t reset)
|
||||
{
|
||||
void *buf = NULL; /*temporary buffer */
|
||||
size_t alloc; /*bytes allocated for BUF */
|
||||
hbool_t point_of_no_return = FALSE;
|
||||
herr_t ret_value = SUCCEED; /*return value */
|
||||
|
||||
@ -1292,9 +1292,6 @@ H5D_istore_flush_entry(const H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_
|
||||
if(ent->dirty) {
|
||||
H5D_istore_ud1_t udata; /*pass through B-tree */
|
||||
|
||||
/* Initial buffer size */
|
||||
alloc = ent->alloc_size;
|
||||
|
||||
/* Set up user data for B-tree callbacks */
|
||||
udata.common.mesg = &io_info->dset->shared->layout;
|
||||
udata.common.offset = ent->offset;
|
||||
@ -1304,16 +1301,19 @@ H5D_istore_flush_entry(const H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_
|
||||
|
||||
/* Should the chunk be filtered before writing it to disk? */
|
||||
if(io_info->dset->shared->dcpl_cache.pline.nused) {
|
||||
size_t alloc = ent->alloc_size; /* Bytes allocated for BUF */
|
||||
size_t nbytes; /* Chunk size (in bytes) */
|
||||
|
||||
if(!reset) {
|
||||
/*
|
||||
* Copy the chunk to a new buffer before running it through
|
||||
* the pipeline because we'll want to save the original buffer
|
||||
* for later.
|
||||
*/
|
||||
alloc = ent->chunk_size;
|
||||
H5_ASSIGN_OVERFLOW(alloc, ent->chunk_size, uint32_t, size_t);
|
||||
if(NULL == (buf = H5MM_malloc(alloc)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for pipeline")
|
||||
HDmemcpy(buf, ent->chunk, ent->chunk_size);
|
||||
HDmemcpy(buf, ent->chunk, alloc);
|
||||
} /* end if */
|
||||
else {
|
||||
/*
|
||||
@ -1326,9 +1326,11 @@ H5D_istore_flush_entry(const H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_
|
||||
point_of_no_return = TRUE;
|
||||
ent->chunk = NULL;
|
||||
} /* end else */
|
||||
H5_ASSIGN_OVERFLOW(nbytes, udata.nbytes, uint32_t, size_t);
|
||||
if(H5Z_pipeline(&(io_info->dset->shared->dcpl_cache.pline), 0, &(udata.filter_mask), io_info->dxpl_cache->err_detect,
|
||||
io_info->dxpl_cache->filter_cb, &(udata.nbytes), &alloc, &buf) < 0)
|
||||
io_info->dxpl_cache->filter_cb, &nbytes, &alloc, &buf) < 0)
|
||||
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed")
|
||||
H5_ASSIGN_OVERFLOW(udata.nbytes, nbytes, size_t, uint32_t);
|
||||
} /* end if */
|
||||
|
||||
/*
|
||||
@ -1337,7 +1339,8 @@ H5D_istore_flush_entry(const H5D_io_info_t *io_info, H5D_rdcc_ent_t *ent, hbool_
|
||||
*/
|
||||
if(H5B_insert(io_info->dset->oloc.file, io_info->dxpl_id, H5B_ISTORE, io_info->dset->shared->layout.u.chunk.addr, &udata)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk")
|
||||
if(H5F_block_write(io_info->dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.nbytes, io_info->dxpl_id, buf) < 0)
|
||||
H5_CHECK_OVERFLOW(udata.nbytes, uint32_t, size_t);
|
||||
if(H5F_block_write(io_info->dset->oloc.file, H5FD_MEM_DRAW, udata.addr, (size_t)udata.nbytes, io_info->dxpl_id, buf) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
|
||||
|
||||
/* Cache the chunk's info, in case it's accessed again shortly */
|
||||
@ -1688,20 +1691,20 @@ H5D_istore_prune (const H5D_io_info_t *io_info, size_t size)
|
||||
while ((p[0] || p[1]) && rdcc->nbytes+size>total) {
|
||||
|
||||
/* Introduce new pointers */
|
||||
for (i=0; i<nmeth-1; i++)
|
||||
if (0==w[i])
|
||||
p[i+1] = rdcc->head;
|
||||
for(i = 0; i < (nmeth - 1); i++)
|
||||
if(0 == w[i])
|
||||
p[i + 1] = rdcc->head;
|
||||
|
||||
/* Compute next value for each pointer */
|
||||
for (i=0; i<nmeth; i++)
|
||||
for(i = 0; i < nmeth; i++)
|
||||
n[i] = p[i] ? p[i]->next : NULL;
|
||||
|
||||
/* Give each method a chance */
|
||||
for (i=0; i<nmeth && rdcc->nbytes+size>total; i++) {
|
||||
if (0==i && p[0] && !p[0]->locked &&
|
||||
((0==p[0]->rd_count && 0==p[0]->wr_count) ||
|
||||
(0==p[0]->rd_count && p[0]->chunk_size==p[0]->wr_count) ||
|
||||
(p[0]->chunk_size==p[0]->rd_count && 0==p[0]->wr_count))) {
|
||||
for(i = 0; i < nmeth && (rdcc->nbytes + size) > total; i++) {
|
||||
if(0 == i && p[0] && !p[0]->locked &&
|
||||
((0 == p[0]->rd_count && 0 == p[0]->wr_count) ||
|
||||
(0 == p[0]->rd_count && p[0]->chunk_size == p[0]->wr_count) ||
|
||||
(p[0]->chunk_size == p[0]->rd_count && 0 == p[0]->wr_count))) {
|
||||
/*
|
||||
* Method 0: Preempt entries that have been completely written
|
||||
* and/or completely read but not entries that are partially
|
||||
@ -1813,7 +1816,7 @@ H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
|
||||
|
||||
/* Get the chunk's size */
|
||||
HDassert(layout->u.chunk.size > 0);
|
||||
H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, hsize_t, size_t);
|
||||
H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t);
|
||||
|
||||
/* Search for the chunk in the cache */
|
||||
if(rdcc->nslots > 0) {
|
||||
@ -1854,7 +1857,6 @@ H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
|
||||
/* In the case that some dataset functions look through this data,
|
||||
* clear it to all 0s. */
|
||||
HDmemset(chunk, 0, chunk_size);
|
||||
|
||||
} /* end if */
|
||||
else {
|
||||
H5D_istore_ud1_t tmp_udata; /*B-tree pass-through */
|
||||
@ -1873,24 +1875,24 @@ H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
|
||||
chunk_addr = H5D_istore_get_addr(io_info, udata);
|
||||
} /* end else */
|
||||
|
||||
if (H5F_addr_defined(chunk_addr)) {
|
||||
size_t chunk_alloc = 0; /*allocated chunk size */
|
||||
/* Check if the chunk exists on disk */
|
||||
if(H5F_addr_defined(chunk_addr)) {
|
||||
size_t chunk_alloc; /* Allocated chunk size */
|
||||
|
||||
/*
|
||||
* The chunk exists on disk.
|
||||
*/
|
||||
/* Chunk size on disk isn't [likely] the same size as the final chunk
|
||||
* size in memory, so allocate memory big enough. */
|
||||
chunk_alloc = udata->nbytes;
|
||||
if(NULL == (chunk = H5D_istore_chunk_alloc (chunk_alloc, pline)))
|
||||
H5_ASSIGN_OVERFLOW(chunk_alloc, udata->nbytes, uint32_t, size_t);
|
||||
if(NULL == (chunk = H5D_istore_chunk_alloc(chunk_alloc, pline)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
|
||||
if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, udata->nbytes, io_info->dxpl_id, chunk) < 0)
|
||||
if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, chunk_alloc, io_info->dxpl_id, chunk) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
|
||||
|
||||
if(pline->nused)
|
||||
if(pline->nused) {
|
||||
if(H5Z_pipeline(pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), io_info->dxpl_cache->err_detect,
|
||||
io_info->dxpl_cache->filter_cb, &(udata->nbytes), &chunk_alloc, &chunk) < 0)
|
||||
io_info->dxpl_cache->filter_cb, &chunk_alloc, &chunk_alloc, &chunk) < 0)
|
||||
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, NULL, "data pipeline read failed")
|
||||
H5_ASSIGN_OVERFLOW(udata->nbytes, chunk_alloc, size_t, uint32_t);
|
||||
} /* end if */
|
||||
#ifdef H5D_ISTORE_DEBUG
|
||||
rdcc->nmisses++;
|
||||
#endif /* H5D_ISTORE_DEBUG */
|
||||
@ -1904,7 +1906,7 @@ H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
|
||||
|
||||
/* Chunk size on disk isn't [likely] the same size as the final chunk
|
||||
* size in memory, so allocate memory big enough. */
|
||||
if(NULL == (chunk = H5D_istore_chunk_alloc (chunk_size, pline)))
|
||||
if(NULL == (chunk = H5D_istore_chunk_alloc(chunk_size, pline)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk")
|
||||
|
||||
if(H5P_is_fill_value_defined(fill, &fill_status) < 0)
|
||||
@ -1947,31 +1949,31 @@ H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
|
||||
* Add the chunk to the cache only if the slot is not already locked.
|
||||
* Preempt enough things from the cache to make room.
|
||||
*/
|
||||
if (ent) {
|
||||
if(ent) {
|
||||
#ifdef H5D_ISTORE_DEBUG
|
||||
HDputc('#', stderr);
|
||||
HDfflush(stderr);
|
||||
#endif
|
||||
if (H5D_istore_preempt(io_info, ent, TRUE)<0)
|
||||
if(H5D_istore_preempt(io_info, ent, TRUE) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk from cache")
|
||||
}
|
||||
if (H5D_istore_prune(io_info, chunk_size)<0)
|
||||
} /* end if */
|
||||
if(H5D_istore_prune(io_info, chunk_size) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, NULL, "unable to preempt chunk(s) from cache")
|
||||
|
||||
/* Create a new entry */
|
||||
ent = H5FL_MALLOC(H5D_rdcc_ent_t);
|
||||
ent->locked = 0;
|
||||
ent->dirty = FALSE;
|
||||
ent->chunk_size = chunk_size;
|
||||
H5_ASSIGN_OVERFLOW(ent->chunk_size, chunk_size, size_t, uint32_t);
|
||||
ent->alloc_size = chunk_size;
|
||||
for (u=0; u<layout->u.chunk.ndims; u++)
|
||||
for(u = 0; u < layout->u.chunk.ndims; u++)
|
||||
ent->offset[u] = io_info->store->chunk.offset[u];
|
||||
ent->rd_count = chunk_size;
|
||||
ent->wr_count = chunk_size;
|
||||
ent->chunk = (uint8_t*)chunk;
|
||||
H5_ASSIGN_OVERFLOW(ent->rd_count, chunk_size, size_t, uint32_t);
|
||||
H5_ASSIGN_OVERFLOW(ent->wr_count, chunk_size, size_t, uint32_t);
|
||||
ent->chunk = (uint8_t *)chunk;
|
||||
|
||||
/* Add it to the cache */
|
||||
assert(NULL==rdcc->slot[idx]);
|
||||
HDassert(NULL == rdcc->slot[idx]);
|
||||
rdcc->slot[idx] = ent;
|
||||
ent->idx = idx;
|
||||
rdcc->nbytes += chunk_size;
|
||||
@ -1979,16 +1981,17 @@ H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
|
||||
|
||||
/* Add it to the linked list */
|
||||
ent->next = NULL;
|
||||
if (rdcc->tail) {
|
||||
if(rdcc->tail) {
|
||||
rdcc->tail->next = ent;
|
||||
ent->prev = rdcc->tail;
|
||||
rdcc->tail = ent;
|
||||
} else {
|
||||
} /* end if */
|
||||
else {
|
||||
rdcc->head = rdcc->tail = ent;
|
||||
ent->prev = NULL;
|
||||
}
|
||||
} /* end else */
|
||||
found = TRUE;
|
||||
} else if (!found) {
|
||||
} else if(!found) {
|
||||
/*
|
||||
* The chunk is larger than the entire cache so we don't cache it.
|
||||
* This is the reason all those arguments have to be repeated for the
|
||||
@ -1996,38 +1999,37 @@ H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
|
||||
*/
|
||||
ent = NULL;
|
||||
idx = UINT_MAX;
|
||||
|
||||
} else {
|
||||
/*
|
||||
* The chunk is not at the beginning of the cache; move it backward
|
||||
* by one slot. This is how we implement the LRU preemption
|
||||
* algorithm.
|
||||
*/
|
||||
assert(ent);
|
||||
if (ent->next) {
|
||||
if (ent->next->next)
|
||||
HDassert(ent);
|
||||
if(ent->next) {
|
||||
if(ent->next->next)
|
||||
ent->next->next->prev = ent;
|
||||
else
|
||||
rdcc->tail = ent;
|
||||
ent->next->prev = ent->prev;
|
||||
if (ent->prev)
|
||||
if(ent->prev)
|
||||
ent->prev->next = ent->next;
|
||||
else
|
||||
rdcc->head = ent->next;
|
||||
ent->prev = ent->next;
|
||||
ent->next = ent->next->next;
|
||||
ent->prev->next = ent;
|
||||
}
|
||||
}
|
||||
} /* end if */
|
||||
} /* end else */
|
||||
|
||||
/* Lock the chunk into the cache */
|
||||
if (ent) {
|
||||
assert (!ent->locked);
|
||||
if(ent) {
|
||||
HDassert(!ent->locked);
|
||||
ent->locked = TRUE;
|
||||
chunk = ent->chunk;
|
||||
}
|
||||
} /* end if */
|
||||
|
||||
if (idx_hint)
|
||||
if(idx_hint)
|
||||
*idx_hint = idx;
|
||||
|
||||
/* Set return value */
|
||||
@ -2071,19 +2073,18 @@ done:
|
||||
*/
|
||||
herr_t
|
||||
H5D_istore_unlock(const H5D_io_info_t *io_info,
|
||||
hbool_t dirty, unsigned idx_hint, void *chunk, size_t naccessed)
|
||||
hbool_t dirty, unsigned idx_hint, void *chunk, uint32_t naccessed)
|
||||
{
|
||||
const H5O_layout_t *layout=&(io_info->dset->shared->layout); /* Dataset layout */
|
||||
const H5D_rdcc_t *rdcc = &(io_info->dset->shared->cache.chunk);
|
||||
H5D_rdcc_ent_t *ent = NULL;
|
||||
unsigned u;
|
||||
herr_t ret_value=SUCCEED; /* Return value */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI_NOINIT(H5D_istore_unlock)
|
||||
|
||||
assert(io_info);
|
||||
|
||||
if (UINT_MAX==idx_hint) {
|
||||
if(UINT_MAX == idx_hint) {
|
||||
/*
|
||||
* It's not in the cache, probably because it's too big. If it's
|
||||
* dirty then flush it to disk. In any case, free the chunk.
|
||||
@ -2093,40 +2094,42 @@ H5D_istore_unlock(const H5D_io_info_t *io_info,
|
||||
if (dirty) {
|
||||
H5D_rdcc_ent_t x;
|
||||
|
||||
HDmemset (&x, 0, sizeof x);
|
||||
HDmemset(&x, 0, sizeof(x));
|
||||
x.dirty = TRUE;
|
||||
for (u=0; u<layout->u.chunk.ndims; u++)
|
||||
x.offset[u] = io_info->store->chunk.offset[u];
|
||||
assert(layout->u.chunk.size>0);
|
||||
H5_ASSIGN_OVERFLOW(x.chunk_size,layout->u.chunk.size,hsize_t,size_t);
|
||||
x.alloc_size = x.chunk_size;
|
||||
x.chunk = (uint8_t*)chunk;
|
||||
HDassert(sizeof(x.offset[0]) == sizeof(io_info->store->chunk.offset[0]));
|
||||
HDmemcpy(x.offset, io_info->store->chunk.offset, layout->u.chunk.ndims * sizeof(x.offset[0]));
|
||||
HDassert(layout->u.chunk.size > 0);
|
||||
x.chunk_size = layout->u.chunk.size;
|
||||
H5_ASSIGN_OVERFLOW(x.alloc_size, x.chunk_size, uint32_t, size_t);
|
||||
x.chunk = (uint8_t *)chunk;
|
||||
|
||||
if (H5D_istore_flush_entry(io_info, &x, TRUE)<0)
|
||||
if(H5D_istore_flush_entry(io_info, &x, TRUE) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer")
|
||||
} else {
|
||||
} /* end if */
|
||||
else {
|
||||
if(chunk)
|
||||
chunk=H5D_istore_chunk_xfree (chunk,&(io_info->dset->shared->dcpl_cache.pline));
|
||||
}
|
||||
} else {
|
||||
chunk = H5D_istore_chunk_xfree(chunk, &(io_info->dset->shared->dcpl_cache.pline));
|
||||
} /* end else */
|
||||
} /* end if */
|
||||
else {
|
||||
/* Sanity check */
|
||||
assert(idx_hint<rdcc->nslots);
|
||||
assert(rdcc->slot[idx_hint]);
|
||||
assert(rdcc->slot[idx_hint]->chunk==chunk);
|
||||
HDassert(idx_hint < rdcc->nslots);
|
||||
HDassert(rdcc->slot[idx_hint]);
|
||||
HDassert(rdcc->slot[idx_hint]->chunk == chunk);
|
||||
|
||||
/*
|
||||
* It's in the cache so unlock it.
|
||||
*/
|
||||
ent = rdcc->slot[idx_hint];
|
||||
assert (ent->locked);
|
||||
if (dirty) {
|
||||
HDassert(ent->locked);
|
||||
if(dirty) {
|
||||
ent->dirty = TRUE;
|
||||
ent->wr_count -= MIN (ent->wr_count, naccessed);
|
||||
} else {
|
||||
ent->rd_count -= MIN (ent->rd_count, naccessed);
|
||||
}
|
||||
ent->wr_count -= MIN(ent->wr_count, naccessed);
|
||||
} /* end if */
|
||||
else
|
||||
ent->rd_count -= MIN(ent->rd_count, naccessed);
|
||||
ent->locked = FALSE;
|
||||
}
|
||||
} /* end else */
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
@ -2510,8 +2513,7 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
|
||||
|
||||
/* Get original chunk size */
|
||||
H5_CHECK_OVERFLOW(layout->u.chunk.size, hsize_t, size_t);
|
||||
orig_chunk_size = (size_t)layout->u.chunk.size;
|
||||
H5_ASSIGN_OVERFLOW(orig_chunk_size, layout->u.chunk.size, uint32_t, size_t);
|
||||
|
||||
/* Check the dataset's fill-value status */
|
||||
if(H5P_is_fill_value_defined(fill, &fill_status) < 0)
|
||||
@ -2613,7 +2615,7 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
|
||||
chunk_size = nbytes;
|
||||
} /* end if */
|
||||
else
|
||||
chunk_size = (size_t)layout->u.chunk.size;
|
||||
H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t);
|
||||
} /* end if */
|
||||
else
|
||||
chunk_size = orig_chunk_size;
|
||||
@ -2621,7 +2623,7 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
|
||||
/* Initialize the chunk information */
|
||||
udata.common.mesg = layout;
|
||||
udata.common.offset = chunk_offset;
|
||||
udata.nbytes = chunk_size;
|
||||
H5_ASSIGN_OVERFLOW(udata.nbytes, chunk_size, size_t, uint32_t);
|
||||
udata.filter_mask = filter_mask;
|
||||
udata.addr = HADDR_UNDEF;
|
||||
|
||||
@ -2639,16 +2641,19 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
|
||||
if(using_mpi) {
|
||||
/* Write the chunks out from only one process */
|
||||
/* !! Use the internal "independent" DXPL!! -QAK */
|
||||
if(H5_PAR_META_WRITE == mpi_rank)
|
||||
if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.nbytes, data_dxpl_id, fb_info.fill_buf) < 0)
|
||||
if(H5_PAR_META_WRITE == mpi_rank) {
|
||||
HDassert(udata.nbytes == chunk_size);
|
||||
if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
|
||||
} /* end if */
|
||||
|
||||
/* Indicate that blocks are being written */
|
||||
blocks_written = TRUE;
|
||||
} /* end if */
|
||||
else {
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.nbytes, data_dxpl_id, fb_info.fill_buf) < 0)
|
||||
HDassert(udata.nbytes == chunk_size);
|
||||
if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, data_dxpl_id, fb_info.fill_buf) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
} /* end else */
|
||||
@ -3029,12 +3034,13 @@ H5D_istore_remove(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key /*in,out
|
||||
hbool_t *rt_key_changed /*out */ )
|
||||
{
|
||||
H5D_istore_key_t *lt_key = (H5D_istore_key_t *)_lt_key;
|
||||
H5B_ins_t ret_value=H5B_INS_REMOVE; /* Return value */
|
||||
H5B_ins_t ret_value = H5B_INS_REMOVE; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI_NOINIT(H5D_istore_remove)
|
||||
|
||||
/* Remove raw data chunk from file */
|
||||
if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes)<0)
|
||||
H5_CHECK_OVERFLOW(lt_key->nbytes, uint32_t, hsize_t);
|
||||
if(H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, addr, (hsize_t)lt_key->nbytes) < 0)
|
||||
HGOTO_ERROR(H5E_STORAGE, H5E_CANTFREE, H5B_INS_ERROR, "unable to free chunk")
|
||||
|
||||
/* Mark keys as unchanged */
|
||||
@ -3082,7 +3088,7 @@ H5D_istore_initialize_by_extent(H5D_io_info_t *io_info)
|
||||
hsize_t hyper_start[H5O_LAYOUT_NDIMS]; /* Starting location of hyperslab */
|
||||
hsize_t nchunks[H5O_LAYOUT_NDIMS]; /* Current number of chunks in each dimension */
|
||||
hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of elements in each dimension */
|
||||
hsize_t bytes_per_chunk; /* Bytes in chunk */
|
||||
uint32_t elmts_per_chunk; /* Elements in a chunk */
|
||||
int srank; /* # of chunk dimensions (signed) */
|
||||
unsigned rank; /* # of chunk dimensions */
|
||||
hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
|
||||
@ -3105,11 +3111,11 @@ H5D_istore_initialize_by_extent(H5D_io_info_t *io_info)
|
||||
/* Set size of lowest chunk dimension (the dataset element size) */
|
||||
dset_dims[rank] = layout->u.chunk.dim[rank];
|
||||
|
||||
/* Compute the number of chunks in dataset & the # of bytes in a chunk */
|
||||
/* Compute the number of chunks in dataset & the # of elements in a chunk */
|
||||
/* (round up to the next integer # of chunks, to accomodate partial chunks) */
|
||||
for(u = 0, bytes_per_chunk = layout->u.chunk.dim[rank]; u < rank; u++) {
|
||||
for(u = 0, elmts_per_chunk = 1; u < rank; u++) {
|
||||
nchunks[u] = ((dset_dims[u] - 1) / layout->u.chunk.dim[u]) + 1;
|
||||
bytes_per_chunk *= layout->u.chunk.dim[u];
|
||||
elmts_per_chunk *= layout->u.chunk.dim[u];
|
||||
} /* end for */
|
||||
|
||||
/* Get the "down" sizes for each dimension */
|
||||
@ -3159,9 +3165,10 @@ H5D_istore_initialize_by_extent(H5D_io_info_t *io_info)
|
||||
|
||||
/* Initialize the fill value buffer, if necessary */
|
||||
if(!fb_info_init) {
|
||||
H5_CHECK_OVERFLOW(elmts_per_chunk, uint32_t, size_t);
|
||||
if(H5D_fill_init(&fb_info, NULL, FALSE, NULL, NULL, NULL, NULL,
|
||||
&dset->shared->dcpl_cache.fill,
|
||||
dset->shared->type, dset->shared->type_id, (size_t)(bytes_per_chunk / layout->u.chunk.dim[rank]),
|
||||
dset->shared->type, dset->shared->type_id, (size_t)elmts_per_chunk,
|
||||
io_info->dxpl_cache->max_temp_buf, io_info->dxpl_id) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info")
|
||||
fb_info_init = TRUE;
|
||||
@ -3217,7 +3224,8 @@ H5D_istore_initialize_by_extent(H5D_io_info_t *io_info)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "can't refill fill value buffer")
|
||||
|
||||
/* Create a selection iterator for scattering the elements to memory buffer */
|
||||
if(H5S_select_iter_init(&chunk_iter, space_chunk, layout->u.chunk.dim[rank]) < 0)
|
||||
H5_CHECK_OVERFLOW(layout->u.chunk.dim[rank], uint32_t, size_t);
|
||||
if(H5S_select_iter_init(&chunk_iter, space_chunk, (size_t)layout->u.chunk.dim[rank]) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize chunk selection information")
|
||||
|
||||
/* Scatter the data into memory */
|
||||
@ -3236,8 +3244,8 @@ H5D_istore_initialize_by_extent(H5D_io_info_t *io_info)
|
||||
bytes_accessed = nelmts * layout->u.chunk.dim[rank];
|
||||
|
||||
/* Release lock on chunk */
|
||||
H5_CHECK_OVERFLOW(bytes_accessed, hsize_t, size_t);
|
||||
if(H5D_istore_unlock(io_info, TRUE, idx_hint, chunk, (size_t)bytes_accessed) < 0)
|
||||
H5_CHECK_OVERFLOW(bytes_accessed, hsize_t, uint32_t);
|
||||
if(H5D_istore_unlock(io_info, TRUE, idx_hint, chunk, (uint32_t)bytes_accessed) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to unlock raw data chunk")
|
||||
} /* end if */
|
||||
|
||||
@ -3386,12 +3394,12 @@ H5D_istore_update_cache(H5D_t *dset, hid_t dxpl_id)
|
||||
next = ent->next;
|
||||
|
||||
/* Calculate the index of this chunk */
|
||||
if(H5V_chunk_index(rank,ent->offset,dset->shared->layout.u.chunk.dim,down_chunks,&idx)<0)
|
||||
if(H5V_chunk_index(rank, ent->offset, dset->shared->layout.u.chunk.dim, down_chunks, &idx) < 0)
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
|
||||
|
||||
/* Compute the index for the chunk entry */
|
||||
old_idx=ent->idx; /* Save for later */
|
||||
ent->idx=H5D_CHUNK_HASH(dset->shared, idx);
|
||||
old_idx = ent->idx; /* Save for later */
|
||||
ent->idx = H5D_CHUNK_HASH(dset->shared, idx);
|
||||
|
||||
if(old_idx != ent->idx) {
|
||||
/* Check if there is already a chunk at this chunk's new location */
|
||||
@ -3450,7 +3458,7 @@ H5D_istore_copy(H5F_t *f_src, H5O_layout_t *layout_src, H5F_t *f_dst,
|
||||
void *reclaim_buf = NULL; /* Buffer for reclaiming data */
|
||||
H5S_t *buf_space = NULL; /* Dataspace describing buffer */
|
||||
hid_t sid_buf = -1; /* ID for buffer dataspace */
|
||||
size_t nelmts = 0; /* Number of elements in buffer */
|
||||
uint32_t nelmts = 0; /* Number of elements in buffer */
|
||||
hbool_t do_convert = FALSE; /* Indicate that type conversions should be performed */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
|
||||
@ -3553,7 +3561,7 @@ H5D_istore_copy(H5F_t *f_src, H5O_layout_t *layout_src, H5F_t *f_dst,
|
||||
do_convert = TRUE;
|
||||
} /* end if */
|
||||
|
||||
buf_size = layout_src->u.chunk.size;
|
||||
H5_ASSIGN_OVERFLOW(buf_size, layout_src->u.chunk.size, uint32_t, size_t);
|
||||
reclaim_buf_size = 0;
|
||||
} /* end else */
|
||||
|
||||
|
@ -1183,7 +1183,7 @@ if(H5DEBUG(D))
|
||||
H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */
|
||||
H5D_istore_ud1_t udata; /* B-tree pass-through */
|
||||
void *chunk; /* Pointer to the data chunk in cache */
|
||||
size_t accessed_bytes; /* Total accessed size in a chunk */
|
||||
uint32_t accessed_bytes; /* Total accessed size in a chunk */
|
||||
unsigned idx_hint = 0; /* Cache index hint */
|
||||
haddr_t caddr; /* Address of the cached chunk */
|
||||
|
||||
@ -1203,9 +1203,11 @@ if(H5DEBUG(D))
|
||||
/* Load the chunk into cache and lock it. */
|
||||
if(H5D_chunk_cacheable(io_info, caddr)) {
|
||||
hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */
|
||||
size_t tmp_accessed_bytes; /* Total accessed size in a chunk */
|
||||
|
||||
/* Compute # of bytes accessed in chunk */
|
||||
accessed_bytes = chunk_info->chunk_points * type_info->src_type_size;
|
||||
tmp_accessed_bytes = chunk_info->chunk_points * type_info->src_type_size;
|
||||
H5_ASSIGN_OVERFLOW(accessed_bytes, tmp_accessed_bytes, size_t, uint32_t);
|
||||
|
||||
/* Determine if we will access all the data in the chunk */
|
||||
if(((io_info->op_type == H5D_IO_OP_WRITE) && (accessed_bytes != ctg_store.contig.dset_size))
|
||||
@ -1419,7 +1421,7 @@ if(H5DEBUG(D)) {
|
||||
if(make_ind) {
|
||||
void *chunk; /* Pointer to the data chunk in cache */
|
||||
H5D_io_info_t *chk_io_info; /* Pointer to I/O info object for this chunk */
|
||||
size_t accessed_bytes = 0; /* Total accessed size in a chunk */
|
||||
uint32_t accessed_bytes = 0; /* Total accessed size in a chunk */
|
||||
unsigned idx_hint = 0; /* Cache index hint */
|
||||
|
||||
/* Switch to independent I/O */
|
||||
@ -1429,9 +1431,11 @@ if(H5DEBUG(D)) {
|
||||
/* Load the chunk into cache and lock it. */
|
||||
if(H5D_chunk_cacheable(io_info, chunk_addr)) {
|
||||
hbool_t entire_chunk = TRUE; /* Whether whole chunk is selected */
|
||||
size_t tmp_accessed_bytes; /* Total accessed size in a chunk */
|
||||
|
||||
/* Compute # of bytes accessed in chunk */
|
||||
accessed_bytes = chunk_info->chunk_points * type_info->src_type_size;
|
||||
tmp_accessed_bytes = chunk_info->chunk_points * type_info->src_type_size;
|
||||
H5_ASSIGN_OVERFLOW(accessed_bytes, tmp_accessed_bytes, size_t, uint32_t);
|
||||
|
||||
/* Determine if we will access all the data in the chunk */
|
||||
if(((io_info->op_type == H5D_IO_OP_WRITE) && (accessed_bytes != ctg_store.contig.dset_size))
|
||||
|
18
src/H5Dpkg.h
18
src/H5Dpkg.h
@ -204,7 +204,7 @@ typedef struct H5D_io_info_t {
|
||||
/* Structure holding information about a chunk's selection for mapping */
|
||||
typedef struct H5D_chunk_info_t {
|
||||
hsize_t index; /* "Index" of chunk in dataset */
|
||||
size_t chunk_points; /* Number of elements selected in chunk */
|
||||
uint32_t chunk_points; /* Number of elements selected in chunk */
|
||||
hsize_t coords[H5O_LAYOUT_NDIMS]; /* Coordinates of chunk in file dataset's dataspace */
|
||||
H5S_t *fspace; /* Dataspace describing chunk & selection in it */
|
||||
unsigned fspace_shared; /* Indicate that the file space for a chunk is shared and shouldn't be freed */
|
||||
@ -213,10 +213,10 @@ typedef struct H5D_chunk_info_t {
|
||||
} H5D_chunk_info_t;
|
||||
|
||||
/* Cached information about a particular chunk */
|
||||
typedef struct {
|
||||
typedef struct H5D_chunk_cached_t{
|
||||
hbool_t valid; /*whether cache info is valid*/
|
||||
hsize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/
|
||||
size_t nbytes; /*size of stored data */
|
||||
uint32_t nbytes; /*size of stored data */
|
||||
unsigned filter_mask; /*excluded filters */
|
||||
haddr_t addr; /*file address of chunk */
|
||||
} H5D_chunk_cached_t;
|
||||
@ -379,7 +379,7 @@ typedef struct {
|
||||
* The chunk's file address is part of the B-tree and not part of the key.
|
||||
*/
|
||||
typedef struct H5D_istore_key_t {
|
||||
size_t nbytes; /*size of stored data */
|
||||
uint32_t nbytes; /*size of stored data */
|
||||
hsize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/
|
||||
unsigned filter_mask; /*excluded filters */
|
||||
} H5D_istore_key_t;
|
||||
@ -401,7 +401,7 @@ typedef struct H5D_istore_ud1_t {
|
||||
H5D_istore_bt_ud_common_t common; /* Common info for B-tree user data (must be first) */
|
||||
|
||||
/* Upward */
|
||||
size_t nbytes; /*size of stored data */
|
||||
uint32_t nbytes; /*size of stored data */
|
||||
unsigned filter_mask; /*excluded filters */
|
||||
haddr_t addr; /*file address of chunk */
|
||||
} H5D_istore_ud1_t;
|
||||
@ -422,9 +422,9 @@ typedef struct H5D_rdcc_ent_t {
|
||||
hbool_t locked; /*entry is locked in cache */
|
||||
hbool_t dirty; /*needs to be written to disk? */
|
||||
hsize_t offset[H5O_LAYOUT_NDIMS]; /*chunk name */
|
||||
size_t rd_count; /*bytes remaining to be read */
|
||||
size_t wr_count; /*bytes remaining to be written */
|
||||
size_t chunk_size; /*size of a chunk */
|
||||
uint32_t rd_count; /*bytes remaining to be read */
|
||||
uint32_t wr_count; /*bytes remaining to be written */
|
||||
uint32_t chunk_size; /*size of a chunk */
|
||||
size_t alloc_size; /*amount allocated for the chunk */
|
||||
uint8_t *chunk; /*the unfiltered chunk data */
|
||||
unsigned idx; /*index in hash table */
|
||||
@ -547,7 +547,7 @@ H5_DLL herr_t H5D_istore_copy(H5F_t *f_src, H5O_layout_t *layout_src,
|
||||
H5_DLL void * H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
|
||||
hbool_t relax, unsigned *idx_hint/*in,out*/);
|
||||
H5_DLL herr_t H5D_istore_unlock(const H5D_io_info_t *io_info,
|
||||
hbool_t dirty, unsigned idx_hint, void *chunk, size_t naccessed);
|
||||
hbool_t dirty, unsigned idx_hint, void *chunk, uint32_t naccessed);
|
||||
|
||||
/* Functions that perform fill value operations on datasets */
|
||||
H5_DLL herr_t H5D_fill(const void *fill, const H5T_t *fill_type, void *buf,
|
||||
|
@ -334,8 +334,8 @@ typedef struct H5O_layout_contig_t {
|
||||
typedef struct H5O_layout_chunk_t {
|
||||
haddr_t addr; /* File address of B-tree */
|
||||
unsigned ndims; /* Num dimensions in chunk */
|
||||
size_t dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in elements */
|
||||
size_t size; /* Size of chunk in bytes */
|
||||
uint32_t dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in elements */
|
||||
uint32_t size; /* Size of chunk in bytes */
|
||||
H5RC_t *btree_shared; /* Ref-counted info for B-tree nodes */
|
||||
} H5O_layout_chunk_t;
|
||||
|
||||
|
@ -55,7 +55,7 @@
|
||||
#define H5D_CRT_CHUNK_DIM_SIZE sizeof(unsigned)
|
||||
#define H5D_CRT_CHUNK_DIM_DEF 1
|
||||
/* Definitions for chunk size */
|
||||
#define H5D_CRT_CHUNK_SIZE_SIZE sizeof(size_t[H5O_LAYOUT_NDIMS])
|
||||
#define H5D_CRT_CHUNK_SIZE_SIZE sizeof(uint32_t[H5O_LAYOUT_NDIMS])
|
||||
#define H5D_CRT_CHUNK_SIZE_DEF {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,\
|
||||
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}
|
||||
/* Definitions for fill value. size=0 means fill value will be 0 as
|
||||
@ -148,7 +148,7 @@ H5P_dcrt_reg_prop(H5P_genclass_t *pclass)
|
||||
{
|
||||
H5D_layout_t layout = H5D_CRT_LAYOUT_DEF; /* Default storage layout */
|
||||
unsigned chunk_ndims = H5D_CRT_CHUNK_DIM_DEF; /* Default rank for chunks */
|
||||
size_t chunk_size[H5O_LAYOUT_NDIMS] = H5D_CRT_CHUNK_SIZE_DEF; /* Default chunk size */
|
||||
uint32_t chunk_size[H5O_LAYOUT_NDIMS] = H5D_CRT_CHUNK_SIZE_DEF; /* Default chunk size */
|
||||
H5O_fill_t fill = H5D_CRT_FILL_VALUE_DEF; /* Default fill value */
|
||||
unsigned alloc_time_state = H5D_CRT_ALLOC_TIME_STATE_DEF; /* Default allocation time state */
|
||||
H5O_efl_t efl = H5D_CRT_EXT_FILE_LIST_DEF; /* Default external file list */
|
||||
@ -737,37 +737,43 @@ done:
|
||||
herr_t
|
||||
H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/])
|
||||
{
|
||||
int i;
|
||||
size_t real_dims[H5O_LAYOUT_NDIMS]; /* Full-sized array to hold chunk dims */
|
||||
H5P_genplist_t *plist; /* Property list pointer */
|
||||
herr_t ret_value=SUCCEED; /* return value */
|
||||
uint32_t real_dims[H5O_LAYOUT_NDIMS]; /* Full-sized array to hold chunk dims */
|
||||
uint64_t chunk_nelmts; /* Number of elements in chunk */
|
||||
unsigned u; /* Local index variable */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
|
||||
FUNC_ENTER_API(H5Pset_chunk, FAIL)
|
||||
H5TRACE3("e", "iIs*[a1]h", plist_id, ndims, dim);
|
||||
|
||||
/* Check arguments */
|
||||
if (ndims <= 0)
|
||||
if(ndims <= 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "chunk dimensionality must be positive")
|
||||
if (ndims > H5S_MAX_RANK)
|
||||
if(ndims > H5S_MAX_RANK)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "chunk dimensionality is too large")
|
||||
if (!dim)
|
||||
if(!dim)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no chunk dimensions specified")
|
||||
|
||||
/* Get the plist structure */
|
||||
if(NULL == (plist = H5P_object_verify(plist_id,H5P_DATASET_CREATE)))
|
||||
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
|
||||
|
||||
/* Initialize chunk dims to 0s */
|
||||
HDmemset(real_dims,0,sizeof(real_dims));
|
||||
for (i=0; i<ndims; i++) {
|
||||
if (dim[i] == 0)
|
||||
/* Verify & initialize internal chunk dims */
|
||||
HDmemset(real_dims, 0, sizeof(real_dims));
|
||||
chunk_nelmts = 1;
|
||||
for(u = 0; u < (unsigned)ndims; u++) {
|
||||
if(dim[u] == 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "all chunk dimensions must be positive")
|
||||
if (dim[i] != (dim[i]&0xffffffff))
|
||||
if(dim[u] != (dim[u] & 0xffffffff))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "all chunk dimensions must be less than 2^32")
|
||||
real_dims[i]=(size_t)dim[i]; /* Store user's chunk dimensions */
|
||||
chunk_nelmts *= dim[u];
|
||||
if(chunk_nelmts > (uint64_t)0xffffffff)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "number of elements in chunk must be < 4GB")
|
||||
real_dims[u] = (uint32_t)dim[u]; /* Store user's chunk dimensions */
|
||||
} /* end for */
|
||||
|
||||
if(H5P_set_layout (plist, H5D_CHUNKED) < 0)
|
||||
/* Get the plist structure */
|
||||
if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE)))
|
||||
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
|
||||
|
||||
/* Set chunk information in property list */
|
||||
if(H5P_set_layout(plist, H5D_CHUNKED) < 0)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set layout")
|
||||
if(H5P_set(plist, H5D_CRT_CHUNK_DIM_NAME, &ndims) < 0)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set chunk dimensionanlity")
|
||||
@ -776,7 +782,7 @@ H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/])
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
}
|
||||
} /* end H5Pset_chunk() */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
@ -828,22 +834,22 @@ H5Pget_chunk(hid_t plist_id, int max_ndims, hsize_t dim[]/*out*/)
|
||||
|
||||
if(dim) {
|
||||
int i;
|
||||
size_t chunk_size[H5O_LAYOUT_NDIMS];
|
||||
uint32_t chunk_size[H5O_LAYOUT_NDIMS];
|
||||
|
||||
if(H5P_get(plist, H5D_CRT_CHUNK_SIZE_NAME, chunk_size) < 0)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get chunk size")
|
||||
|
||||
/* Get the dimension sizes */
|
||||
for (i=0; i<ndims && i<max_ndims; i++)
|
||||
for(i = 0; i < ndims && i < max_ndims; i++)
|
||||
dim[i] = chunk_size[i];
|
||||
} /* end if */
|
||||
|
||||
/* Set the return value */
|
||||
ret_value=ndims;
|
||||
ret_value = ndims;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
}
|
||||
} /* end H5Pget_chunk() */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
|
31
src/H5V.c
31
src/H5V.c
@ -399,33 +399,34 @@ done:
|
||||
*/
|
||||
htri_t
|
||||
H5V_hyper_disjointp(unsigned n,
|
||||
const hsize_t *offset1, const size_t *size1,
|
||||
const hsize_t *offset2, const size_t *size2)
|
||||
const hsize_t *offset1, const uint32_t *size1,
|
||||
const hsize_t *offset2, const uint32_t *size2)
|
||||
{
|
||||
unsigned u;
|
||||
htri_t ret_value=FALSE; /* Return value */
|
||||
htri_t ret_value = FALSE; /* Return value */
|
||||
|
||||
/* Use FUNC_ENTER_NOAPI_NOINIT_NOFUNC here to avoid performance issues */
|
||||
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5V_hyper_disjointp)
|
||||
|
||||
if (!n || !size1 || !size2) HGOTO_DONE(TRUE)
|
||||
if(!n || !size1 || !size2)
|
||||
HGOTO_DONE(TRUE)
|
||||
|
||||
for (u=0; u<n; u++) {
|
||||
assert (size1[u]<HSIZET_MAX);
|
||||
assert (size2[u]<HSIZET_MAX);
|
||||
for(u = 0; u < n; u++) {
|
||||
HDassert(size1[u] < HSIZET_MAX);
|
||||
HDassert(size2[u] < HSIZET_MAX);
|
||||
|
||||
if (0==size1[u] || 0==size2[u])
|
||||
if(0 == size1[u] || 0 == size2[u])
|
||||
HGOTO_DONE(TRUE)
|
||||
if (((offset1?offset1[u]:0) < (offset2?offset2[u]:0) &&
|
||||
((offset1?offset1[u]:0) + size1[u] <= (offset2?offset2[u]:0))) ||
|
||||
((offset2?offset2[u]:0) < (offset1?offset1[u]:0) &&
|
||||
((offset2?offset2[u]:0) + size2[u] <= (offset1?offset1[u]:0))))
|
||||
if(((offset1 ? offset1[u] : 0) < (offset2 ? offset2[u] : 0) &&
|
||||
((offset1 ? offset1[u] : 0) + size1[u] <= (offset2 ? offset2[u] : 0))) ||
|
||||
((offset2 ? offset2[u] : 0) < (offset1 ? offset1[u] : 0) &&
|
||||
((offset2 ? offset2[u] : 0) + size2[u] <= (offset1 ? offset1[u] : 0))))
|
||||
HGOTO_DONE(TRUE)
|
||||
}
|
||||
} /* end for */
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
}
|
||||
} /* end H5V_hyper_disjointp() */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
@ -1225,7 +1226,7 @@ H5V_array_calc(hsize_t offset, unsigned n, const hsize_t *total_size, hsize_t *c
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
herr_t
|
||||
H5V_chunk_index(unsigned ndims, const hsize_t *coord, const size_t *chunk,
|
||||
H5V_chunk_index(unsigned ndims, const hsize_t *coord, const uint32_t *chunk,
|
||||
const hsize_t *down_nchunks, hsize_t *chunk_idx)
|
||||
{
|
||||
hsize_t scaled_coord[H5V_HYPER_NDIMS]; /* Scaled, coordinates, in terms of chunks */
|
||||
|
@ -53,9 +53,7 @@ H5_DLL hsize_t H5V_hyper_stride(unsigned n, const hsize_t *size,
|
||||
const hsize_t *offset,
|
||||
hsize_t *stride);
|
||||
H5_DLL htri_t H5V_hyper_disjointp(unsigned n, const hsize_t *offset1,
|
||||
const size_t *size1,
|
||||
const hsize_t *offset2,
|
||||
const size_t *size2);
|
||||
const uint32_t *size1, const hsize_t *offset2, const uint32_t *size2);
|
||||
H5_DLL htri_t H5V_hyper_eq(unsigned n, const hsize_t *offset1,
|
||||
const hsize_t *size1, const hsize_t *offset2,
|
||||
const hsize_t *size2);
|
||||
@ -88,7 +86,7 @@ H5_DLL hsize_t H5V_array_offset(unsigned n, const hsize_t *total_size,
|
||||
H5_DLL herr_t H5V_array_calc(hsize_t offset, unsigned n,
|
||||
const hsize_t *total_size, hsize_t *coords);
|
||||
H5_DLL herr_t H5V_chunk_index(unsigned ndims, const hsize_t *coord,
|
||||
const size_t *chunk, const hsize_t *down_nchunks, hsize_t *chunk_idx);
|
||||
const uint32_t *chunk, const hsize_t *down_nchunks, hsize_t *chunk_idx);
|
||||
H5_DLL ssize_t H5V_memcpyvv(void *_dst,
|
||||
size_t dst_max_nseq, size_t *dst_curr_seq, size_t dst_len_arr[], hsize_t dst_off_arr[],
|
||||
const void *_src,
|
||||
|
@ -104,7 +104,7 @@ flush2.chkexe_: flush1.chkexe_
|
||||
# the temporary file name in ways that the makefile is not aware of.
|
||||
CHECK_CLEANFILES+=cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offset.h5 \
|
||||
max_compact_dataset.h5 simple.h5 set_local.h5 random_chunks.h5 \
|
||||
extend.h5 istore.h5 \
|
||||
huge_chunks.h5 extend.h5 istore.h5 \
|
||||
tfile[1-4].h5 th5s[1-3].h5 lheap.h5 fheap.h5 ohdr.h5 stab.h5 \
|
||||
extern_[1-3].h5 extern_[1-4][ab].raw gheap[0-4].h5 dt_arith[1-2] \
|
||||
links.h5 links[0-6]*.h5 extlinks[0-15].h5 tmp \
|
||||
|
150
test/dsets.c
150
test/dsets.c
@ -42,6 +42,7 @@ const char *FILENAME[] = {
|
||||
"simple",
|
||||
"set_local",
|
||||
"random_chunks",
|
||||
"huge_chunks",
|
||||
NULL
|
||||
};
|
||||
#define FILENAME_BUF_SIZE 1024
|
||||
@ -145,6 +146,22 @@ const char *FILENAME[] = {
|
||||
/* Names for random chunks test */
|
||||
#define NPOINTS 50
|
||||
|
||||
/* Parameters for huge chunks test */
|
||||
#define HUGE_DATASET "Dataset"
|
||||
#define HUGE_DIM ((hsize_t)16 * 1024 * 1024 * 1024)
|
||||
#define HUGE_CHUNK_DIM ((hsize_t)2 * 1024 * 1024 * 1024)
|
||||
#define TOO_HUGE_CHUNK_DIM ((hsize_t)4 * 1024 * 1024 * 1024)
|
||||
#define HUGE_DATASET2 "Dataset2"
|
||||
#define HUGE_DIM2_0 ((hsize_t)16 * 1024)
|
||||
#define HUGE_DIM2_1 ((hsize_t)16 * 1024)
|
||||
#define HUGE_DIM2_2 ((hsize_t)16 * 1024)
|
||||
#define HUGE_CHUNK_DIM2_0 ((hsize_t)2 * 1024)
|
||||
#define HUGE_CHUNK_DIM2_1 ((hsize_t)1024)
|
||||
#define HUGE_CHUNK_DIM2_2 ((hsize_t)1024)
|
||||
#define TOO_HUGE_CHUNK_DIM2_0 ((hsize_t)4 * 1024)
|
||||
#define TOO_HUGE_CHUNK_DIM2_1 ((hsize_t)1024)
|
||||
#define TOO_HUGE_CHUNK_DIM2_2 ((hsize_t)1024)
|
||||
|
||||
/* Shared global arrays */
|
||||
#define DSET_DIM1 100
|
||||
#define DSET_DIM2 200
|
||||
@ -1941,8 +1958,6 @@ UNUSED
|
||||
data_corrupt[1] = 33;
|
||||
data_corrupt[2] = 27;
|
||||
|
||||
/* Temporarily disable this test because the changes in chunk caching conflicts with
|
||||
* the way this test is conducted. -slu 2007/7/20 */
|
||||
if(H5Zregister (H5Z_CORRUPT) < 0) goto error;
|
||||
if(H5Pset_filter(dc, H5Z_FILTER_CORRUPT, 0, (size_t)3, data_corrupt) < 0) goto error;
|
||||
if(test_filter_internal(file,DSET_FLETCHER32_NAME_3,dc,DISABLE_FLETCHER32,DATA_CORRUPTED,&fletcher32_size) < 0) goto error;
|
||||
@ -6357,6 +6372,118 @@ test_deprec(hid_t file)
|
||||
} /* end test_deprec() */
|
||||
#endif /* H5_NO_DEPRECATED_SYMBOLS */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: test_huge_chunks
|
||||
*
|
||||
* Purpose: Tests that datasets with chunks >4GB can't be created.
|
||||
*
|
||||
* Return: Success: 0
|
||||
* Failure: -1
|
||||
*
|
||||
* Programmer: Quincey Koziol
|
||||
* Thursday, May 1, 2008
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
static herr_t
|
||||
test_huge_chunks(hid_t fapl)
|
||||
{
|
||||
char filename[FILENAME_BUF_SIZE];
|
||||
hid_t fid = -1; /* File ID */
|
||||
hid_t dcpl = -1; /* Dataset creation property list ID */
|
||||
hid_t sid = -1; /* Dataspace ID */
|
||||
hid_t dsid = -1; /* Dataset ID */
|
||||
hsize_t dim, chunk_dim; /* Dataset and chunk dimensions */
|
||||
hsize_t dim2[3], chunk_dim2[3]; /* Dataset and chunk dimensions */
|
||||
herr_t ret; /* Generic return value */
|
||||
|
||||
TESTING("creating dataset with >4GB chunks");
|
||||
|
||||
h5_fixname(FILENAME[7], fapl, filename, sizeof filename);
|
||||
|
||||
/* Create file */
|
||||
if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR
|
||||
|
||||
/* Create dataset creation property list */
|
||||
if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
|
||||
|
||||
/* Try to set too large of a chunk for 1-D dataset (# of elements) */
|
||||
chunk_dim = TOO_HUGE_CHUNK_DIM;
|
||||
H5E_BEGIN_TRY {
|
||||
ret = H5Pset_chunk(dcpl, 1, &chunk_dim);
|
||||
} H5E_END_TRY;
|
||||
if(ret >= 0)
|
||||
FAIL_PUTS_ERROR(" Set chunk size with too large of chunk dimensions.")
|
||||
|
||||
/* Try to set too large of a chunk for n-D dataset (# of elements) */
|
||||
chunk_dim2[0] = TOO_HUGE_CHUNK_DIM2_0;
|
||||
chunk_dim2[1] = TOO_HUGE_CHUNK_DIM2_1;
|
||||
chunk_dim2[2] = TOO_HUGE_CHUNK_DIM2_2;
|
||||
H5E_BEGIN_TRY {
|
||||
ret = H5Pset_chunk(dcpl, 3, chunk_dim2);
|
||||
} H5E_END_TRY;
|
||||
if(ret >= 0)
|
||||
FAIL_PUTS_ERROR(" Set chunk size with too large of chunk dimensions.")
|
||||
|
||||
/* Set 1-D chunk size */
|
||||
chunk_dim = HUGE_CHUNK_DIM;
|
||||
if(H5Pset_chunk(dcpl, 1, &chunk_dim) < 0) FAIL_STACK_ERROR
|
||||
|
||||
/* Create 1-D dataspace */
|
||||
dim = HUGE_DIM;
|
||||
if((sid = H5Screate_simple(1, &dim, NULL)) < 0) FAIL_STACK_ERROR
|
||||
|
||||
/* Try to create dataset */
|
||||
H5E_BEGIN_TRY {
|
||||
dsid = H5Dcreate2(fid, HUGE_DATASET, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
|
||||
} H5E_END_TRY;
|
||||
if(dsid >= 0)
|
||||
FAIL_PUTS_ERROR(" 1-D Dataset with too large of chunk dimensions created.")
|
||||
|
||||
/* Close 1-D dataspace */
|
||||
if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
|
||||
|
||||
|
||||
/* Set n-D chunk size */
|
||||
chunk_dim2[0] = HUGE_CHUNK_DIM2_0;
|
||||
chunk_dim2[1] = HUGE_CHUNK_DIM2_1;
|
||||
chunk_dim2[2] = HUGE_CHUNK_DIM2_2;
|
||||
if(H5Pset_chunk(dcpl, 3, chunk_dim2) < 0) FAIL_STACK_ERROR
|
||||
|
||||
/* Create n-D dataspace */
|
||||
dim2[0] = HUGE_DIM2_0;
|
||||
dim2[1] = HUGE_DIM2_1;
|
||||
dim2[2] = HUGE_DIM2_2;
|
||||
if((sid = H5Screate_simple(3, dim2, NULL)) < 0) FAIL_STACK_ERROR
|
||||
|
||||
/* Try to create dataset */
|
||||
H5E_BEGIN_TRY {
|
||||
dsid = H5Dcreate2(fid, HUGE_DATASET2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
|
||||
} H5E_END_TRY;
|
||||
if(dsid >= 0)
|
||||
FAIL_PUTS_ERROR(" n-D Dataset with too large of chunk dimensions created.")
|
||||
|
||||
/* Close n-D dataspace */
|
||||
if(H5Sclose(sid) < 0) FAIL_STACK_ERROR
|
||||
|
||||
/* Close everything else */
|
||||
if(H5Pclose(dcpl) < 0) FAIL_STACK_ERROR
|
||||
if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
|
||||
|
||||
PASSED();
|
||||
return 0;
|
||||
|
||||
error:
|
||||
H5E_BEGIN_TRY {
|
||||
H5Pclose(dcpl);
|
||||
H5Dclose(dsid);
|
||||
H5Sclose(sid);
|
||||
H5Fclose(fid);
|
||||
} H5E_END_TRY;
|
||||
return -1;
|
||||
} /* end test_huge_chunks() */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: main
|
||||
@ -6393,12 +6520,19 @@ main(void)
|
||||
size_t rdcc_nbytes;
|
||||
double rdcc_w0;
|
||||
|
||||
/* Set the random # seed */
|
||||
HDsrandom((unsigned long)HDtime(NULL));
|
||||
|
||||
/* Testing setup */
|
||||
h5_reset();
|
||||
fapl = h5_fileaccess();
|
||||
|
||||
/* Set the random # seed */
|
||||
HDsrandom((unsigned long)HDtime(NULL));
|
||||
/* Turn off the chunk cache, so all the chunks are immediately written to disk */
|
||||
if(H5Pget_cache(fapl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0) < 0)
|
||||
goto error;
|
||||
rdcc_nbytes = 0;
|
||||
if(H5Pset_cache(fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0) < 0)
|
||||
goto error;
|
||||
|
||||
/* Copy the file access property list */
|
||||
if((fapl2 = H5Pcopy(fapl)) < 0) TEST_ERROR
|
||||
@ -6422,13 +6556,6 @@ main(void)
|
||||
my_fapl = fapl;
|
||||
} /* end else */
|
||||
|
||||
/* Turn off the chunk cache, so all the chunks are immediately written to disk */
|
||||
if(H5Pget_cache(my_fapl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0) < 0)
|
||||
goto error;
|
||||
rdcc_nbytes = 0;
|
||||
if(H5Pset_cache(my_fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0) < 0)
|
||||
goto error;
|
||||
|
||||
/* Create the file for this test */
|
||||
if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, my_fapl)) < 0)
|
||||
goto error;
|
||||
@ -6478,6 +6605,7 @@ main(void)
|
||||
#ifndef H5_NO_DEPRECATED_SYMBOLS
|
||||
nerrors += (test_deprec(file) < 0 ? 1 : 0);
|
||||
#endif /* H5_NO_DEPRECATED_SYMBOLS */
|
||||
nerrors += (test_huge_chunks(my_fapl) < 0 ? 1 : 0);
|
||||
|
||||
if(H5Fclose(file) < 0)
|
||||
goto error;
|
||||
|
@ -171,19 +171,20 @@ test_create(hid_t f, const char *prefix)
|
||||
{
|
||||
hid_t dataset; /* Dataset ID */
|
||||
hsize_t dims[H5O_LAYOUT_NDIMS+1]; /* Dimensions of dataset */
|
||||
hsize_t my_chunk_dims[H5O_LAYOUT_NDIMS+1]; /* Dimensions of chunks */
|
||||
char name[256]; /* Dataset name */
|
||||
unsigned u; /* Local index variable */
|
||||
|
||||
TESTING("istore create");
|
||||
|
||||
dims[0]=TEST_CHUNK_SIZE;
|
||||
dims[0] = my_chunk_dims[0] = 1;
|
||||
for (u = 1; u <= H5S_MAX_RANK; u++) {
|
||||
/* Initialize the dimension size in this new dimension */
|
||||
dims[u]=TEST_CHUNK_SIZE;
|
||||
dims[u] = my_chunk_dims[u] = 2;
|
||||
|
||||
/* Create chunked dataset of this dimensionality */
|
||||
HDsnprintf(name, sizeof name, "%s_%02u", prefix, u);
|
||||
if ((dataset=new_object(f, name, (int)u, dims, chunk_dims)) < 0)
|
||||
if ((dataset=new_object(f, name, (int)u, dims, my_chunk_dims)) < 0)
|
||||
return FAIL;
|
||||
|
||||
/* Close dataset created */
|
||||
|
12
test/tsohm.c
12
test/tsohm.c
@ -121,9 +121,9 @@ const int ENUM_VAL[] = {
|
||||
20480, 10,
|
||||
-1001, -10
|
||||
};
|
||||
#define SIZE2_RANK1 10
|
||||
#define SIZE2_RANK2 20
|
||||
#define SIZE2_DIMS {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
|
||||
#define SIZE2_RANK1 6
|
||||
#define SIZE2_RANK2 10
|
||||
#define SIZE2_DIMS {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||
|
||||
#define LONG_STRING "00 index. A long string used for testing. To create new strings, set the first two characters to be some ASCII number other than 00, such as 01."
|
||||
|
||||
@ -1376,7 +1376,7 @@ size2_helper(hid_t fcpl_id, int test_file_closing, size2_helper_struct *ret_size
|
||||
/* Constants used in this function */
|
||||
const int rank1 = SIZE2_RANK1;
|
||||
const int rank2 = SIZE2_RANK2;
|
||||
const hsize_t dims[20] = SIZE2_DIMS;
|
||||
const hsize_t dims[SIZE2_RANK2] = SIZE2_DIMS;
|
||||
dtype1_struct fill1;
|
||||
char fill2[DTYPE2_SIZE];
|
||||
|
||||
@ -1745,8 +1745,8 @@ static void size2_verify(void)
|
||||
char attr_correct_string[NAME_BUF_SIZE];
|
||||
char attr_name[NAME_BUF_SIZE];
|
||||
int ndims;
|
||||
hsize_t dims[20];
|
||||
hsize_t correct_dims[20] = SIZE2_DIMS;
|
||||
hsize_t dims[SIZE2_RANK2];
|
||||
hsize_t correct_dims[SIZE2_RANK2] = SIZE2_DIMS;
|
||||
|
||||
file_id = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
|
||||
CHECK_I(file_id, "H5Fopen");
|
||||
|
Loading…
x
Reference in New Issue
Block a user