mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-04-12 17:31:09 +08:00
[svn-r29614] Description:
Bring updated code for setting chunk size information from revise_chunks branch. Tested on: MacOSX/64 10.11.4 (amazon) w/serial, parallel & production (h5committest forthcoming)
This commit is contained in:
parent
3cd705e56e
commit
86f401d740
@ -555,6 +555,69 @@ done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
} /* end H5D__chunk_set_info() */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: H5D__chunk_set_sizes
|
||||
*
|
||||
* Purpose: Sets chunk and type sizes.
|
||||
*
|
||||
* Return: SUCCEED/FAIL
|
||||
*
|
||||
* Programmer: Dana Robinson
|
||||
* December 2015
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
herr_t
|
||||
H5D__chunk_set_sizes(H5D_t *dset)
|
||||
{
|
||||
uint64_t chunk_size; /* Size of chunk in bytes */
|
||||
unsigned max_enc_bytes_per_dim; /* Max. number of bytes required to encode this dimension */
|
||||
unsigned u; /* Iterator */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI(FAIL)
|
||||
|
||||
/* Sanity checks */
|
||||
HDassert(dset);
|
||||
|
||||
/* Increment # of chunk dimensions, to account for datatype size as last element */
|
||||
dset->shared->layout.u.chunk.ndims++;
|
||||
|
||||
/* Set the last dimension of the chunk size to the size of the datatype */
|
||||
dset->shared->layout.u.chunk.dim[dset->shared->layout.u.chunk.ndims - 1] = (uint32_t)H5T_GET_SIZE(dset->shared->type);
|
||||
|
||||
/* Compute number of bytes to use for encoding chunk dimensions */
|
||||
max_enc_bytes_per_dim = 0;
|
||||
for(u = 0; u < (unsigned)dset->shared->layout.u.chunk.ndims; u++) {
|
||||
unsigned enc_bytes_per_dim; /* Number of bytes required to encode this dimension */
|
||||
|
||||
/* Get encoded size of dim, in bytes */
|
||||
enc_bytes_per_dim = (H5VM_log2_gen(dset->shared->layout.u.chunk.dim[u]) + 8) / 8;
|
||||
|
||||
/* Check if this is the largest value so far */
|
||||
if(enc_bytes_per_dim > max_enc_bytes_per_dim)
|
||||
max_enc_bytes_per_dim = enc_bytes_per_dim;
|
||||
} /* end for */
|
||||
HDassert(max_enc_bytes_per_dim > 0 && max_enc_bytes_per_dim <= 8);
|
||||
dset->shared->layout.u.chunk.enc_bytes_per_dim = max_enc_bytes_per_dim;
|
||||
|
||||
/* Compute and store the total size of a chunk */
|
||||
/* (Use 64-bit value to ensure that we can detect >4GB chunks) */
|
||||
for(u = 1, chunk_size = (uint64_t)dset->shared->layout.u.chunk.dim[0]; u < dset->shared->layout.u.chunk.ndims; u++)
|
||||
chunk_size *= (uint64_t)dset->shared->layout.u.chunk.dim[u];
|
||||
|
||||
/* Check for chunk larger than can be represented in 32-bits */
|
||||
/* (Chunk size is encoded in 32-bit value in v1 B-tree records) */
|
||||
if(chunk_size > (uint64_t)0xffffffff)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be < 4GB")
|
||||
|
||||
H5_CHECKED_ASSIGN(dset->shared->layout.u.chunk.size, uint32_t, chunk_size, uint64_t);
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
} /* end H5D__chunk_set_sizes */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: H5D__chunk_construct
|
||||
@ -571,8 +634,6 @@ done:
|
||||
static herr_t
|
||||
H5D__chunk_construct(H5F_t H5_ATTR_UNUSED *f, H5D_t *dset)
|
||||
{
|
||||
const H5T_t *type = dset->shared->type; /* Convenience pointer to dataset's datatype */
|
||||
uint64_t chunk_size; /* Size of chunk in bytes */
|
||||
unsigned u; /* Local index variable */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
|
||||
@ -585,22 +646,18 @@ H5D__chunk_construct(H5F_t H5_ATTR_UNUSED *f, H5D_t *dset)
|
||||
/* Check for invalid chunk dimension rank */
|
||||
if(0 == dset->shared->layout.u.chunk.ndims)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "no chunk information set?")
|
||||
|
||||
/* Set up layout information */
|
||||
if(dset->shared->layout.u.chunk.ndims != dset->shared->ndims)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "dimensionality of chunks doesn't match the dataspace")
|
||||
|
||||
/* Increment # of chunk dimensions, to account for datatype size as last element */
|
||||
dset->shared->layout.u.chunk.ndims++;
|
||||
/* Set chunk sizes */
|
||||
if(H5D__chunk_set_sizes(dset) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "unable to set chunk sizes")
|
||||
HDassert((unsigned)(dset->shared->layout.u.chunk.ndims) <= NELMTS(dset->shared->layout.u.chunk.dim));
|
||||
|
||||
/* Chunked storage is not compatible with external storage (currently) */
|
||||
if(dset->shared->dcpl_cache.efl.nused > 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "external storage not supported with chunked layout")
|
||||
|
||||
/* Set the last dimension of the chunk size to the size of the datatype */
|
||||
dset->shared->layout.u.chunk.dim[dset->shared->layout.u.chunk.ndims - 1] = (uint32_t)H5T_GET_SIZE(type);
|
||||
|
||||
/* Sanity check dimensions */
|
||||
for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++) {
|
||||
/* Don't allow zero-sized chunk dimensions */
|
||||
@ -616,19 +673,6 @@ H5D__chunk_construct(H5F_t H5_ATTR_UNUSED *f, H5D_t *dset)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be <= maximum dimension size for fixed-sized dimensions")
|
||||
} /* end for */
|
||||
|
||||
/* Compute the total size of a chunk */
|
||||
/* (Use 64-bit value to ensure that we can detect >4GB chunks) */
|
||||
for(u = 1, chunk_size = (uint64_t)dset->shared->layout.u.chunk.dim[0]; u < dset->shared->layout.u.chunk.ndims; u++)
|
||||
chunk_size *= (uint64_t)dset->shared->layout.u.chunk.dim[u];
|
||||
|
||||
/* Check for chunk larger than can be represented in 32-bits */
|
||||
/* (Chunk size is encoded in 32-bit value in v1 B-tree records) */
|
||||
if(chunk_size > (uint64_t)0xffffffff)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be < 4GB")
|
||||
|
||||
/* Retain computed chunk size */
|
||||
H5_CHECKED_ASSIGN(dset->shared->layout.u.chunk.size, uint32_t, chunk_size, uint64_t);
|
||||
|
||||
/* Reset address and pointer of the array struct for the chunked storage index */
|
||||
if(H5D_chunk_idx_reset(&dset->shared->layout.storage.u.chunk, TRUE) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to reset chunked storage index")
|
||||
|
@ -453,9 +453,10 @@ H5D__layout_oh_read(H5D_t *dataset, hid_t dxpl_id, hid_t dapl_id, H5P_genplist_t
|
||||
/* Copy layout to the DCPL */
|
||||
if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &dataset->shared->layout) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set layout")
|
||||
/* Adjust chunk dimensions back again (*sigh*) */
|
||||
if(H5D_CHUNKED == dataset->shared->layout.type)
|
||||
dataset->shared->layout.u.chunk.ndims++;
|
||||
|
||||
/* Set chunk sizes */
|
||||
if(H5D__chunk_set_sizes(dataset) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "unable to set chunk sizes")
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
|
@ -622,6 +622,7 @@ H5_DLL herr_t H5D__chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id,
|
||||
hsize_t old_dim[]);
|
||||
H5_DLL herr_t H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id,
|
||||
const hsize_t *old_dim);
|
||||
H5_DLL herr_t H5D__chunk_set_sizes(H5D_t *dset);
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
H5_DLL herr_t H5D__chunk_addrmap(const H5D_io_info_t *io_info, haddr_t chunk_addr[]);
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
|
@ -1984,7 +1984,6 @@ H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/])
|
||||
H5P_genplist_t *plist; /* Property list pointer */
|
||||
H5O_layout_t chunk_layout; /* Layout information for setting chunk info */
|
||||
uint64_t chunk_nelmts; /* Number of elements in chunk */
|
||||
unsigned max_enc_bytes_per_dim; /* Max. number of bytes required to encode this dimension */
|
||||
unsigned u; /* Local index variable */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
|
||||
@ -2012,10 +2011,7 @@ H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/])
|
||||
HDmemcpy(&chunk_layout, &H5D_def_layout_chunk_g, sizeof(H5D_def_layout_chunk_g));
|
||||
HDmemset(&chunk_layout.u.chunk.dim, 0, sizeof(chunk_layout.u.chunk.dim));
|
||||
chunk_nelmts = 1;
|
||||
max_enc_bytes_per_dim = 0;
|
||||
for(u = 0; u < (unsigned)ndims; u++) {
|
||||
unsigned enc_bytes_per_dim; /* Number of bytes required to encode this dimension */
|
||||
|
||||
if(dim[u] == 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "all chunk dimensions must be positive")
|
||||
if(dim[u] != (dim[u] & 0xffffffff))
|
||||
@ -2024,16 +2020,7 @@ H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/])
|
||||
if(chunk_nelmts > (uint64_t)0xffffffff)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "number of elements in chunk must be < 4GB")
|
||||
chunk_layout.u.chunk.dim[u] = (uint32_t)dim[u]; /* Store user's chunk dimensions */
|
||||
|
||||
/* Get encoded size of dim, in bytes */
|
||||
enc_bytes_per_dim = (H5VM_log2_gen(dim[u]) + 8) / 8;
|
||||
|
||||
/* Check if this is the largest value so far */
|
||||
if(enc_bytes_per_dim > max_enc_bytes_per_dim)
|
||||
max_enc_bytes_per_dim = enc_bytes_per_dim;
|
||||
} /* end for */
|
||||
HDassert(max_enc_bytes_per_dim > 0 && max_enc_bytes_per_dim <= 8);
|
||||
chunk_layout.u.chunk.enc_bytes_per_dim = max_enc_bytes_per_dim;
|
||||
|
||||
/* Get the plist structure */
|
||||
if(NULL == (plist = H5P_object_verify(plist_id, H5P_DATASET_CREATE)))
|
||||
|
Loading…
x
Reference in New Issue
Block a user