mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-02-17 16:10:24 +08:00
[svn-r27045] Renamed H5_ASSIGN_OVERFLOW() to H5_CHECKED_ASSIGN() and re-ordered
the arguments to be in a more logical order. Tested on: h5committest
This commit is contained in:
parent
83d37604d3
commit
67ba6cb57d
@ -538,7 +538,7 @@ H5A_dense_insert(H5F_t *f, hid_t dxpl_id, const H5O_ainfo_t *ainfo, H5A_t *attr)
|
||||
udata.common.shared_fheap = shared_fheap;
|
||||
udata.common.name = attr->shared->name;
|
||||
udata.common.name_hash = H5_checksum_lookup3(attr->shared->name, HDstrlen(attr->shared->name), 0);
|
||||
H5_ASSIGN_OVERFLOW(udata.common.flags, mesg_flags, unsigned, uint8_t);
|
||||
H5_CHECKED_ASSIGN(udata.common.flags, uint8_t, mesg_flags, unsigned);
|
||||
udata.common.corder = attr->shared->crt_idx;
|
||||
udata.common.found_op = NULL;
|
||||
udata.common.found_op_data = NULL;
|
||||
|
14
src/H5Aint.c
14
src/H5Aint.c
@ -255,7 +255,7 @@ H5A_create(const H5G_loc_t *loc, const char *name, const H5T_t *type,
|
||||
/* Get # of elements for attribute's dataspace */
|
||||
if((snelmts = H5S_GET_EXTENT_NPOINTS(attr->shared->ds)) < 0)
|
||||
HGOTO_ERROR(H5E_ATTR, H5E_CANTCOUNT, NULL, "dataspace is invalid")
|
||||
H5_ASSIGN_OVERFLOW(nelmts, snelmts, hssize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(nelmts, size_t, snelmts, hssize_t);
|
||||
|
||||
HDassert(attr->shared->dt_size > 0);
|
||||
HDassert(attr->shared->ds_size > 0);
|
||||
@ -508,7 +508,7 @@ H5A_write(H5A_t *attr, const H5T_t *mem_type, const void *buf, hid_t dxpl_id)
|
||||
/* Get # of elements for attribute's dataspace */
|
||||
if((snelmts = H5S_GET_EXTENT_NPOINTS(attr->shared->ds)) < 0)
|
||||
HGOTO_ERROR(H5E_ATTR, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
|
||||
H5_ASSIGN_OVERFLOW(nelmts, snelmts, hssize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(nelmts, size_t, snelmts, hssize_t);
|
||||
|
||||
/* If there's actually data elements for the attribute, make a copy of the data passed in */
|
||||
if(nelmts > 0) {
|
||||
@ -621,7 +621,7 @@ H5A_read(const H5A_t *attr, const H5T_t *mem_type, void *buf, hid_t dxpl_id)
|
||||
/* Create buffer for data to store on disk */
|
||||
if((snelmts = H5S_GET_EXTENT_NPOINTS(attr->shared->ds)) < 0)
|
||||
HGOTO_ERROR(H5E_ATTR, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
|
||||
H5_ASSIGN_OVERFLOW(nelmts, snelmts, hssize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(nelmts, size_t, snelmts, hssize_t);
|
||||
|
||||
if(nelmts > 0) {
|
||||
/* Get the memory and file datatype sizes */
|
||||
@ -1680,7 +1680,7 @@ H5A_attr_iterate_table(const H5A_attr_table_t *atable, hsize_t skip,
|
||||
*last_attr = skip;
|
||||
|
||||
/* Iterate over attribute messages */
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ u, /* From: */ skip, /* From: */ hsize_t, /* To: */ size_t)
|
||||
H5_CHECKED_ASSIGN(u, size_t, skip, hsize_t)
|
||||
for(; u < atable->nattrs && !ret_value; u++) {
|
||||
/* Check which type of callback to make */
|
||||
switch(attr_op->op_type) {
|
||||
@ -2012,7 +2012,11 @@ H5A_attr_copy_file(const H5A_t *attr_src, H5F_t *file_dst, hbool_t *recompute_si
|
||||
*recompute_size = TRUE;
|
||||
|
||||
/* Compute the size of the data */
|
||||
H5_ASSIGN_OVERFLOW(attr_dst->shared->data_size, H5S_GET_EXTENT_NPOINTS(attr_dst->shared->ds) * H5T_get_size(attr_dst->shared->dt), hssize_t, size_t);
|
||||
/* NOTE: This raises warnings. If we are going to be serious about
|
||||
* expecting overflow here, we should implement testing similar to
|
||||
* that described in CERT bulletins INT30-C and INT32-C.
|
||||
*/
|
||||
H5_CHECKED_ASSIGN(attr_dst->shared->data_size, size_t, H5S_GET_EXTENT_NPOINTS(attr_dst->shared->ds) * H5T_get_size(attr_dst->shared->dt), hssize_t);
|
||||
|
||||
/* Copy (& convert) the data, if necessary */
|
||||
if(attr_src->shared->data) {
|
||||
|
@ -160,7 +160,7 @@ HDmemset(hdr->page, 0, hdr->node_size);
|
||||
|
||||
/* Initialize leaf node info */
|
||||
sz_max_nrec = H5B2_NUM_LEAF_REC(hdr->node_size, hdr->rrec_size);
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ hdr->node_info[0].max_nrec, /* From: */ sz_max_nrec, /* From: */ size_t, /* To: */ unsigned)
|
||||
H5_CHECKED_ASSIGN(hdr->node_info[0].max_nrec, unsigned, sz_max_nrec, size_t)
|
||||
hdr->node_info[0].split_nrec = (hdr->node_info[0].max_nrec * hdr->split_percent) / 100;
|
||||
hdr->node_info[0].merge_nrec = (hdr->node_info[0].max_nrec * hdr->merge_percent) / 100;
|
||||
hdr->node_info[0].cum_max_nrec = hdr->node_info[0].max_nrec;
|
||||
@ -182,14 +182,14 @@ HDmemset(hdr->page, 0, hdr->node_size);
|
||||
/* Compute size to store # of records in each node */
|
||||
/* (uses leaf # of records because its the largest) */
|
||||
u_max_nrec_size = H5VM_limit_enc_size((uint64_t)hdr->node_info[0].max_nrec);
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ hdr->max_nrec_size, /* From: */ u_max_nrec_size, /* From: */ unsigned, /* To: */ uint8_t)
|
||||
H5_CHECKED_ASSIGN(hdr->max_nrec_size, uint8_t, u_max_nrec_size, unsigned)
|
||||
HDassert(hdr->max_nrec_size <= H5B2_SIZEOF_RECORDS_PER_NODE);
|
||||
|
||||
/* Initialize internal node info */
|
||||
if(depth > 0) {
|
||||
for(u = 1; u < (unsigned)(depth + 1); u++) {
|
||||
sz_max_nrec = H5B2_NUM_INT_REC(hdr, u);
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ hdr->node_info[u].max_nrec, /* From: */ sz_max_nrec, /* From: */ size_t, /* To: */ unsigned)
|
||||
H5_CHECKED_ASSIGN(hdr->node_info[u].max_nrec, unsigned, sz_max_nrec, size_t)
|
||||
HDassert(hdr->node_info[u].max_nrec <= hdr->node_info[u - 1].max_nrec);
|
||||
|
||||
hdr->node_info[u].split_nrec = (hdr->node_info[u].max_nrec * hdr->split_percent) / 100;
|
||||
@ -198,7 +198,7 @@ HDmemset(hdr->page, 0, hdr->node_size);
|
||||
hdr->node_info[u].cum_max_nrec = ((hdr->node_info[u].max_nrec + 1) *
|
||||
hdr->node_info[u - 1].cum_max_nrec) + hdr->node_info[u].max_nrec;
|
||||
u_max_nrec_size = H5VM_limit_enc_size((uint64_t)hdr->node_info[u].cum_max_nrec);
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ hdr->node_info[u].cum_max_nrec_size, /* From: */ u_max_nrec_size, /* From: */ unsigned, /* To: */ uint8_t)
|
||||
H5_CHECKED_ASSIGN(hdr->node_info[u].cum_max_nrec_size, uint8_t, u_max_nrec_size, unsigned)
|
||||
|
||||
if(NULL == (hdr->node_info[u].nat_rec_fac = H5FL_fac_init(hdr->cls->nrec_size * hdr->node_info[u].max_nrec)))
|
||||
HGOTO_ERROR(H5E_BTREE, H5E_CANTINIT, FAIL, "can't create node native key block factory")
|
||||
|
@ -382,13 +382,13 @@ H5B2_split_root(H5B2_hdr_t *hdr, hid_t dxpl_id)
|
||||
|
||||
/* Update node info for new depth of tree */
|
||||
sz_max_nrec = H5B2_NUM_INT_REC(hdr, hdr->depth);
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ hdr->node_info[hdr->depth].max_nrec, /* From: */ sz_max_nrec, /* From: */ size_t, /* To: */ unsigned)
|
||||
H5_CHECKED_ASSIGN(hdr->node_info[hdr->depth].max_nrec, unsigned, sz_max_nrec, size_t)
|
||||
hdr->node_info[hdr->depth].split_nrec = (hdr->node_info[hdr->depth].max_nrec * hdr->split_percent) / 100;
|
||||
hdr->node_info[hdr->depth].merge_nrec = (hdr->node_info[hdr->depth].max_nrec * hdr->merge_percent) / 100;
|
||||
hdr->node_info[hdr->depth].cum_max_nrec = ((hdr->node_info[hdr->depth].max_nrec + 1) *
|
||||
hdr->node_info[hdr->depth - 1].cum_max_nrec) + hdr->node_info[hdr->depth].max_nrec;
|
||||
u_max_nrec_size = H5VM_limit_enc_size((uint64_t)hdr->node_info[hdr->depth].cum_max_nrec);
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ hdr->node_info[hdr->depth].cum_max_nrec_size, /* From: */ u_max_nrec_size, /* From: */ unsigned, /* To: */ uint8_t)
|
||||
H5_CHECKED_ASSIGN(hdr->node_info[hdr->depth].cum_max_nrec_size, uint8_t, u_max_nrec_size, unsigned)
|
||||
if(NULL == (hdr->node_info[hdr->depth].nat_rec_fac = H5FL_fac_init(hdr->cls->nrec_size * hdr->node_info[hdr->depth].max_nrec)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create node native key block factory")
|
||||
if(NULL == (hdr->node_info[hdr->depth].node_ptr_fac = H5FL_fac_init(sizeof(H5B2_node_ptr_t) * (hdr->node_info[hdr->depth].max_nrec + 1))))
|
||||
@ -546,7 +546,7 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
|
||||
/* Count the number of records being moved */
|
||||
for(u = 0; u < move_nrec; u++)
|
||||
moved_nrec += right_node_ptrs[u].all_nrec;
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ left_moved_nrec, /* From: */ moved_nrec, /* From: */ hsize_t, /* To: */ hssize_t)
|
||||
H5_CHECKED_ASSIGN(left_moved_nrec, hssize_t, moved_nrec, hsize_t)
|
||||
right_moved_nrec -= (hssize_t)moved_nrec;
|
||||
|
||||
/* Copy node pointers from right node to left */
|
||||
@ -600,7 +600,7 @@ H5B2_redistribute2(H5B2_hdr_t *hdr, hid_t dxpl_id, unsigned depth,
|
||||
for(u = 0; u < move_nrec; u++)
|
||||
moved_nrec += right_node_ptrs[u].all_nrec;
|
||||
left_moved_nrec -= (hssize_t)moved_nrec;
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ right_moved_nrec, /* From: */ moved_nrec, /* From: */ hsize_t, /* To: */ hssize_t)
|
||||
H5_CHECKED_ASSIGN(right_moved_nrec, hssize_t, moved_nrec, hsize_t)
|
||||
} /* end if */
|
||||
|
||||
/* Update number of records in child nodes */
|
||||
@ -1848,7 +1848,7 @@ H5B2_protect_leaf(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr, unsigned nrec,
|
||||
/* Set up user data for callback */
|
||||
udata.f = hdr->f;
|
||||
udata.hdr = hdr;
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ udata.nrec, /* From: */ nrec, /* From: */ unsigned, /* To: */ uint16_t)
|
||||
H5_CHECKED_ASSIGN(udata.nrec, uint16_t, nrec, unsigned)
|
||||
|
||||
/* Protect the leaf node */
|
||||
if(NULL == (ret_value = (H5B2_leaf_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_LEAF, addr, &udata, rw)))
|
||||
@ -1968,8 +1968,8 @@ H5B2_protect_internal(H5B2_hdr_t *hdr, hid_t dxpl_id, haddr_t addr,
|
||||
/* Set up user data for callback */
|
||||
udata.f = hdr->f;
|
||||
udata.hdr = hdr;
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ udata.nrec, /* From: */ nrec, /* From: */ unsigned, /* To: */ uint16_t)
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ udata.depth, /* From: */ depth, /* From: */ unsigned, /* To: */ uint16_t)
|
||||
H5_CHECKED_ASSIGN(udata.nrec, uint16_t, nrec, unsigned)
|
||||
H5_CHECKED_ASSIGN(udata.depth, uint16_t, depth, unsigned)
|
||||
|
||||
/* Protect the internal node */
|
||||
if(NULL == (ret_value = (H5B2_internal_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_BT2_INT, addr, &udata, rw)))
|
||||
|
@ -284,7 +284,7 @@ H5D__btree_new_node(H5F_t *f, hid_t UNUSED dxpl_id, H5B_ins_t op,
|
||||
* The left key describes the storage of the UDATA chunk being
|
||||
* inserted into the tree.
|
||||
*/
|
||||
H5_ASSIGN_OVERFLOW(lt_key->nbytes, udata->chunk_block.length, hsize_t, uint32_t);
|
||||
H5_CHECKED_ASSIGN(lt_key->nbytes, uint32_t, udata->chunk_block.length, hsize_t);
|
||||
lt_key->filter_mask = udata->filter_mask;
|
||||
for(u = 0; u < udata->common.layout->ndims; u++)
|
||||
lt_key->offset[u] = udata->common.offset[u];
|
||||
@ -548,7 +548,7 @@ H5D__btree_insert(H5F_t *f, hid_t UNUSED dxpl_id, haddr_t addr, void *_lt_key,
|
||||
/* Set node's address (already re-allocated by main chunk routines) */
|
||||
HDassert(H5F_addr_defined(udata->chunk_block.offset));
|
||||
*new_node_p = udata->chunk_block.offset;
|
||||
H5_ASSIGN_OVERFLOW(lt_key->nbytes, udata->chunk_block.length, hsize_t, uint32_t);
|
||||
H5_CHECKED_ASSIGN(lt_key->nbytes, uint32_t, udata->chunk_block.length, hsize_t);
|
||||
lt_key->filter_mask = udata->filter_mask;
|
||||
*lt_key_changed = TRUE;
|
||||
ret_value = H5B_INS_CHANGE;
|
||||
@ -568,7 +568,7 @@ H5D__btree_insert(H5F_t *f, hid_t UNUSED dxpl_id, haddr_t addr, void *_lt_key,
|
||||
* Split this node, inserting the new new node to the right of the
|
||||
* current node. The MD_KEY is where the split occurs.
|
||||
*/
|
||||
H5_ASSIGN_OVERFLOW(md_key->nbytes, udata->chunk_block.length, hsize_t, uint32_t);
|
||||
H5_CHECKED_ASSIGN(md_key->nbytes, uint32_t, udata->chunk_block.length, hsize_t);
|
||||
md_key->filter_mask = udata->filter_mask;
|
||||
for(u = 0; u < udata->common.layout->ndims; u++) {
|
||||
HDassert(0 == udata->common.offset[u] % udata->common.layout->dim[u]);
|
||||
|
@ -558,7 +558,7 @@ H5D__chunk_construct(H5F_t UNUSED *f, H5D_t *dset)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "chunk size must be < 4GB")
|
||||
|
||||
/* Retain computed chunk size */
|
||||
H5_ASSIGN_OVERFLOW(dset->shared->layout.u.chunk.size, chunk_size, uint64_t, uint32_t);
|
||||
H5_CHECKED_ASSIGN(dset->shared->layout.u.chunk.size, uint32_t, chunk_size, uint64_t);
|
||||
|
||||
/* Reset address and pointer of the array struct for the chunked storage index */
|
||||
if(H5D_chunk_idx_reset(&dset->shared->layout.storage.u.chunk, TRUE) < 0)
|
||||
@ -718,7 +718,7 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf
|
||||
if((sm_ndims = H5S_GET_EXTENT_NDIMS(mem_space)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimension number")
|
||||
/* Set the number of dimensions for the memory dataspace */
|
||||
H5_ASSIGN_OVERFLOW(fm->m_ndims, sm_ndims, int, unsigned);
|
||||
H5_CHECKED_ASSIGN(fm->m_ndims, unsigned, sm_ndims, int);
|
||||
|
||||
/* Get rank for file dataspace */
|
||||
fm->f_ndims = f_ndims = dataset->shared->layout.u.chunk.ndims - 1;
|
||||
@ -1317,7 +1317,7 @@ H5D__create_chunk_file_map_hyper(H5D_chunk_map_t *fm, const H5D_io_info_t
|
||||
/* Get number of elements selected in chunk */
|
||||
if((schunk_points = H5S_GET_SELECT_NPOINTS(tmp_fchunk)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't get file selection # of elements")
|
||||
H5_ASSIGN_OVERFLOW(new_chunk_info->chunk_points, schunk_points, hssize_t, uint32_t);
|
||||
H5_CHECKED_ASSIGN(new_chunk_info->chunk_points, uint32_t, schunk_points, hssize_t);
|
||||
|
||||
/* Decrement # of points left in file selection */
|
||||
sel_points -= (hsize_t)schunk_points;
|
||||
@ -1793,7 +1793,7 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
|
||||
ctg_io_info.layout_ops = *H5D_LOPS_CONTIG;
|
||||
|
||||
/* Initialize temporary contiguous storage info */
|
||||
H5_ASSIGN_OVERFLOW(ctg_store.contig.dset_size, io_info->dset->shared->layout.u.chunk.size, uint32_t, hsize_t);
|
||||
H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size, uint32_t);
|
||||
|
||||
/* Set up compact I/O info object */
|
||||
HDmemcpy(&cpt_io_info, io_info, sizeof(cpt_io_info));
|
||||
@ -1938,7 +1938,7 @@ H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
|
||||
ctg_io_info.layout_ops = *H5D_LOPS_CONTIG;
|
||||
|
||||
/* Initialize temporary contiguous storage info */
|
||||
H5_ASSIGN_OVERFLOW(ctg_store.contig.dset_size, io_info->dset->shared->layout.u.chunk.size, uint32_t, hsize_t);
|
||||
H5_CHECKED_ASSIGN(ctg_store.contig.dset_size, hsize_t, io_info->dset->shared->layout.u.chunk.size, uint32_t);
|
||||
|
||||
/* Set up compact I/O info object */
|
||||
HDmemcpy(&cpt_io_info, io_info, sizeof(cpt_io_info));
|
||||
@ -2248,7 +2248,7 @@ H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_chunk_ud_t *ud
|
||||
/* Stored the information to cache */
|
||||
HDmemcpy(last->offset, udata->common.offset, sizeof(hsize_t) * udata->common.layout->ndims);
|
||||
last->addr = udata->chunk_block.offset;
|
||||
H5_ASSIGN_OVERFLOW(last->nbytes, udata->chunk_block.length, hsize_t, uint32_t);
|
||||
H5_CHECKED_ASSIGN(last->nbytes, uint32_t, udata->chunk_block.length, hsize_t);
|
||||
last->filter_mask = udata->filter_mask;
|
||||
|
||||
/* Indicate that the cached info is valid */
|
||||
@ -2517,7 +2517,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
|
||||
point_of_no_return = TRUE;
|
||||
ent->chunk = NULL;
|
||||
} /* end else */
|
||||
H5_ASSIGN_OVERFLOW(nbytes, udata.chunk_block.length, hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(nbytes, size_t, udata.chunk_block.length, hsize_t);
|
||||
if(H5Z_pipeline(&(dset->shared->dcpl_cache.pline), 0, &(udata.filter_mask), dxpl_cache->err_detect,
|
||||
dxpl_cache->filter_cb, &nbytes, &alloc, &buf) < 0)
|
||||
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed")
|
||||
@ -2526,7 +2526,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
|
||||
if(nbytes > ((size_t)0xffffffff))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, FAIL, "chunk too large for 32-bit length")
|
||||
#endif /* H5_SIZEOF_SIZE_T > 4 */
|
||||
H5_ASSIGN_OVERFLOW(udata.chunk_block.length, nbytes, size_t, hsize_t);
|
||||
H5_CHECKED_ASSIGN(udata.chunk_block.length, hsize_t, nbytes, size_t);
|
||||
|
||||
/* Indicate that the chunk must be allocated */
|
||||
must_alloc = TRUE;
|
||||
@ -2835,7 +2835,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
|
||||
|
||||
/* Get the chunk's size */
|
||||
HDassert(layout->u.chunk.size > 0);
|
||||
H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t);
|
||||
H5_CHECKED_ASSIGN(chunk_size, size_t, layout->u.chunk.size, uint32_t);
|
||||
|
||||
/* Check if the chunk is in the cache */
|
||||
if(UINT_MAX != udata->idx_hint) {
|
||||
@ -3000,8 +3000,8 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
|
||||
ent->chunk_block.offset = chunk_addr;
|
||||
ent->chunk_block.length = chunk_alloc;
|
||||
HDmemcpy(ent->offset, io_info->store->chunk.offset, sizeof(hsize_t) * layout->u.chunk.ndims);
|
||||
H5_ASSIGN_OVERFLOW(ent->rd_count, chunk_size, size_t, uint32_t);
|
||||
H5_ASSIGN_OVERFLOW(ent->wr_count, chunk_size, size_t, uint32_t);
|
||||
H5_CHECKED_ASSIGN(ent->rd_count, uint32_t, chunk_size, size_t);
|
||||
H5_CHECKED_ASSIGN(ent->wr_count, uint32_t, chunk_size, size_t);
|
||||
ent->chunk = (uint8_t *)chunk;
|
||||
|
||||
/* Add it to the cache */
|
||||
@ -3321,7 +3321,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
|
||||
|
||||
/* Get original chunk size */
|
||||
H5_ASSIGN_OVERFLOW(orig_chunk_size, layout->u.chunk.size, uint32_t, size_t);
|
||||
H5_CHECKED_ASSIGN(orig_chunk_size, size_t, layout->u.chunk.size, uint32_t);
|
||||
|
||||
/* Check the dataset's fill-value status */
|
||||
if(H5P_is_fill_value_defined(fill, &fill_status) < 0)
|
||||
@ -3498,7 +3498,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
|
||||
udata.common.storage = &layout->storage.u.chunk;
|
||||
udata.common.offset = chunk_offset;
|
||||
udata.chunk_block.offset = HADDR_UNDEF;
|
||||
H5_ASSIGN_OVERFLOW(udata.chunk_block.length, chunk_size, size_t, uint32_t);
|
||||
H5_CHECKED_ASSIGN(udata.chunk_block.length, uint32_t, chunk_size, size_t);
|
||||
udata.filter_mask = filter_mask;
|
||||
|
||||
/* Allocate the chunk (with all processes) */
|
||||
@ -3656,9 +3656,9 @@ H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id,
|
||||
leftover_blocks = chunk_info->num_io % mpi_size;
|
||||
|
||||
/* Cast values to types needed by MPI */
|
||||
H5_ASSIGN_OVERFLOW(blocks, num_blocks, size_t, int);
|
||||
H5_ASSIGN_OVERFLOW(leftover, leftover_blocks, size_t, int);
|
||||
H5_ASSIGN_OVERFLOW(block_len, chunk_size, size_t, int);
|
||||
H5_CHECKED_ASSIGN(blocks, int, num_blocks, size_t);
|
||||
H5_CHECKED_ASSIGN(leftover, int, leftover_blocks, size_t);
|
||||
H5_CHECKED_ASSIGN(block_len, int, chunk_size, size_t);
|
||||
|
||||
/* Allocate buffers */
|
||||
/* (MSC - should not need block_lens if MPI_type_create_hindexed_block is working) */
|
||||
@ -3787,7 +3787,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
|
||||
|
||||
/* Get the chunk's size */
|
||||
HDassert(layout->u.chunk.size > 0);
|
||||
H5_ASSIGN_OVERFLOW(chunk_size, layout->u.chunk.size, uint32_t, size_t);
|
||||
H5_CHECKED_ASSIGN(chunk_size, size_t, layout->u.chunk.size, uint32_t);
|
||||
|
||||
/* Get the info for the chunk in the file */
|
||||
if(H5D__chunk_lookup(dset, io_info->dxpl_id, chunk_offset, io_info->store->chunk.index, &chk_udata) < 0)
|
||||
@ -4558,7 +4558,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
|
||||
FUNC_ENTER_STATIC
|
||||
|
||||
/* Get 'size_t' local value for number of bytes in chunk */
|
||||
H5_ASSIGN_OVERFLOW(nbytes, chunk_rec->nbytes, uint32_t, size_t);
|
||||
H5_CHECKED_ASSIGN(nbytes, size_t, chunk_rec->nbytes, uint32_t);
|
||||
|
||||
/* Check parameter for type conversion */
|
||||
if(udata->do_convert) {
|
||||
@ -4675,7 +4675,7 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
|
||||
if(nbytes > ((size_t)0xffffffff))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, H5_ITER_ERROR, "chunk too large for 32-bit length")
|
||||
#endif /* H5_SIZEOF_SIZE_T > 4 */
|
||||
H5_ASSIGN_OVERFLOW(udata_dst.chunk_block.length, nbytes, size_t, uint32_t);
|
||||
H5_CHECKED_ASSIGN(udata_dst.chunk_block.length, uint32_t, nbytes, size_t);
|
||||
udata->buf = buf;
|
||||
udata->buf_size = buf_size;
|
||||
} /* end if */
|
||||
@ -4777,7 +4777,7 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
|
||||
/* Get the dim info for dataset */
|
||||
if((sndims = H5S_extent_get_dims(ds_extent_src, curr_dims, NULL)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace dimensions")
|
||||
H5_ASSIGN_OVERFLOW(ndims, sndims, int, unsigned);
|
||||
H5_CHECKED_ASSIGN(ndims, unsigned, sndims, int);
|
||||
|
||||
/* Set the source layout chunk information */
|
||||
if(H5D__chunk_set_info_real(layout_src, ndims, curr_dims) < 0)
|
||||
@ -4885,7 +4885,7 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
|
||||
do_convert = TRUE;
|
||||
} /* end if */
|
||||
|
||||
H5_ASSIGN_OVERFLOW(buf_size, layout_src->size, uint32_t, size_t);
|
||||
H5_CHECKED_ASSIGN(buf_size, size_t, layout_src->size, uint32_t);
|
||||
reclaim_buf_size = 0;
|
||||
} /* end else */
|
||||
|
||||
|
@ -197,7 +197,7 @@ H5D__compact_construct(H5F_t *f, H5D_t *dset)
|
||||
tmp_size = H5T_get_size(dset->shared->type);
|
||||
HDassert(tmp_size > 0);
|
||||
tmp_size = tmp_size * (hsize_t)stmp_size;
|
||||
H5_ASSIGN_OVERFLOW(dset->shared->layout.storage.u.compact.size, tmp_size, hssize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(dset->shared->layout.storage.u.compact.size, size_t, tmp_size, hssize_t);
|
||||
|
||||
/* Verify data size is smaller than maximum header message size
|
||||
* (64KB) minus other layout message fields.
|
||||
|
@ -259,7 +259,7 @@ H5D__contig_fill(const H5D_t *dset, hid_t dxpl_id)
|
||||
/* Get the number of elements in the dataset's dataspace */
|
||||
if((snpoints = H5S_GET_EXTENT_NPOINTS(dset->shared->space)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "dataset has negative number of elements")
|
||||
H5_ASSIGN_OVERFLOW(npoints, snpoints, hssize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(npoints, size_t, snpoints, hssize_t);
|
||||
|
||||
/* Initialize the fill value buffer */
|
||||
if(H5D__fill_init(&fb_info, NULL, NULL, NULL, NULL, NULL,
|
||||
@ -644,6 +644,7 @@ H5D__contig_readvv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len,
|
||||
size_t sieve_size = (size_t)-1; /* Size of sieve buffer */
|
||||
haddr_t rel_eoa; /* Relative end of file address */
|
||||
hsize_t max_data; /* Actual maximum size of data to cache */
|
||||
hsize_t min; /* temporary minimum value (avoids some ugly macro nesting) */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
|
||||
FUNC_ENTER_STATIC
|
||||
@ -684,7 +685,8 @@ H5D__contig_readvv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len,
|
||||
max_data = store_contig->dset_size - dst_off;
|
||||
|
||||
/* Compute the size of the sieve buffer */
|
||||
H5_ASSIGN_OVERFLOW(dset_contig->sieve_size, MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size), hsize_t, size_t);
|
||||
min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
|
||||
H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t);
|
||||
|
||||
/* Read the new sieve buffer */
|
||||
if(H5F_block_read(file, H5FD_MEM_DRAW, dset_contig->sieve_loc, dset_contig->sieve_size, udata->dxpl_id, dset_contig->sieve_buf) < 0)
|
||||
@ -757,9 +759,13 @@ H5D__contig_readvv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len,
|
||||
/* Only need this when resizing sieve buffer */
|
||||
max_data = store_contig->dset_size - dst_off;
|
||||
|
||||
/* Compute the size of the sieve buffer */
|
||||
/* Don't read off the end of the file, don't read past the end of the data element and don't read more than the buffer size */
|
||||
H5_ASSIGN_OVERFLOW(dset_contig->sieve_size, MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size), hsize_t, size_t);
|
||||
/* Compute the size of the sieve buffer.
|
||||
* Don't read off the end of the file, don't read past
|
||||
* the end of the data element, and don't read more than
|
||||
* the buffer size.
|
||||
*/
|
||||
min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
|
||||
H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t);
|
||||
|
||||
/* Update local copies of sieve information */
|
||||
sieve_start = dset_contig->sieve_loc;
|
||||
@ -915,6 +921,7 @@ H5D__contig_writevv_sieve_cb(hsize_t dst_off, hsize_t src_off, size_t len,
|
||||
size_t sieve_size = (size_t)-1; /* size of sieve buffer */
|
||||
haddr_t rel_eoa; /* Relative end of file address */
|
||||
hsize_t max_data; /* Actual maximum size of data to cache */
|
||||
hsize_t min; /* temporary minimum value (avoids some ugly macro nesting) */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
|
||||
FUNC_ENTER_STATIC
|
||||
@ -960,7 +967,8 @@ if(dset_contig->sieve_size > len)
|
||||
max_data = store_contig->dset_size - dst_off;
|
||||
|
||||
/* Compute the size of the sieve buffer */
|
||||
H5_ASSIGN_OVERFLOW(dset_contig->sieve_size, MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size), hsize_t, size_t);
|
||||
min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
|
||||
H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t);
|
||||
|
||||
/* Check if there is any point in reading the data from the file */
|
||||
if(dset_contig->sieve_size > len) {
|
||||
@ -1075,9 +1083,13 @@ if(dset_contig->sieve_size > len)
|
||||
/* Only need this when resizing sieve buffer */
|
||||
max_data = store_contig->dset_size - dst_off;
|
||||
|
||||
/* Compute the size of the sieve buffer */
|
||||
/* Don't read off the end of the file, don't read past the end of the data element and don't read more than the buffer size */
|
||||
H5_ASSIGN_OVERFLOW(dset_contig->sieve_size, MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size), hsize_t, size_t);
|
||||
/* Compute the size of the sieve buffer.
|
||||
* Don't read off the end of the file, don't read past
|
||||
* the end of the data element, and don't read more than
|
||||
* the buffer size.
|
||||
*/
|
||||
min = MIN3(rel_eoa - dset_contig->sieve_loc, max_data, dset_contig->sieve_buf_size);
|
||||
H5_CHECKED_ASSIGN(dset_contig->sieve_size, size_t, min, hsize_t);
|
||||
|
||||
/* Update local copies of sieve information */
|
||||
sieve_start = dset_contig->sieve_loc;
|
||||
|
@ -170,7 +170,7 @@ H5D__efl_construct(H5F_t *f, H5D_t *dset)
|
||||
stmp_size = H5S_GET_EXTENT_NPOINTS(dset->shared->space);
|
||||
HDassert(stmp_size >= 0);
|
||||
tmp_size = (hsize_t)stmp_size * dt_size;
|
||||
H5_ASSIGN_OVERFLOW(dset->shared->layout.storage.u.contig.size, tmp_size, hssize_t, hsize_t);
|
||||
H5_CHECKED_ASSIGN(dset->shared->layout.storage.u.contig.size, hsize_t, tmp_size, hssize_t);
|
||||
|
||||
/* Get the sieve buffer size for this dataset */
|
||||
dset->shared->cache.contig.sieve_buf_size = H5F_SIEVE_BUF_SIZE(f);
|
||||
|
@ -409,7 +409,7 @@ H5D__read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
mem_space = file_space;
|
||||
if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space)) < 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "dst dataspace has invalid selection")
|
||||
H5_ASSIGN_OVERFLOW(nelmts,snelmts,hssize_t,hsize_t);
|
||||
H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t);
|
||||
|
||||
/* Fill the DXPL cache values for later use */
|
||||
if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
|
||||
@ -679,7 +679,7 @@ H5D__write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
|
||||
if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space)) < 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection")
|
||||
H5_ASSIGN_OVERFLOW(nelmts, snelmts, hssize_t, hsize_t);
|
||||
H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t);
|
||||
|
||||
/* Make certain that the number of elements in each selection is the same */
|
||||
if(nelmts != (hsize_t)H5S_GET_SELECT_NPOINTS(file_space))
|
||||
|
@ -438,7 +438,7 @@ H5D__mpio_get_sum_chunk(const H5D_io_info_t *io_info, const H5D_chunk_map_t *fm,
|
||||
/* Get the number of chunks to perform I/O on */
|
||||
num_chunkf = 0;
|
||||
ori_num_chunkf = H5SL_count(fm->sel_chunks);
|
||||
H5_ASSIGN_OVERFLOW(num_chunkf, ori_num_chunkf, size_t, int);
|
||||
H5_CHECKED_ASSIGN(num_chunkf, int, ori_num_chunkf, size_t);
|
||||
|
||||
/* Determine the summation of number of chunks for all processes */
|
||||
if(MPI_SUCCESS != (mpi_code = MPI_Allreduce(&num_chunkf, sum_chunkf, 1, MPI_INT, MPI_SUM, io_info->comm)))
|
||||
@ -826,7 +826,7 @@ H5D__link_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *typ
|
||||
} /* end if */
|
||||
|
||||
/* Retrieve total # of chunks in dataset */
|
||||
H5_ASSIGN_OVERFLOW(total_chunks, fm->layout->u.chunk.nchunks, hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(total_chunks, size_t, fm->layout->u.chunk.nchunks, hsize_t);
|
||||
|
||||
/* Handle special case when dataspace dimensions only allow one chunk in
|
||||
* the dataset. [This sometimes is used by developers who want the
|
||||
@ -1148,7 +1148,7 @@ H5D__multi_chunk_collective_io(H5D_io_info_t *io_info, const H5D_type_info_t *ty
|
||||
#endif
|
||||
|
||||
/* Retrieve total # of chunks in dataset */
|
||||
H5_ASSIGN_OVERFLOW(total_chunk, fm->layout->u.chunk.nchunks, hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(total_chunk, size_t, fm->layout->u.chunk.nchunks, hsize_t);
|
||||
HDassert(total_chunk != 0);
|
||||
|
||||
/* Allocate memories */
|
||||
@ -1699,7 +1699,7 @@ H5D__obtain_mpio_mode(H5D_io_info_t* io_info, H5D_chunk_map_t *fm,
|
||||
HGOTO_ERROR(H5E_IO, H5E_MPI, FAIL, "unable to obtain mpi size")
|
||||
|
||||
/* Setup parameters */
|
||||
H5_ASSIGN_OVERFLOW(total_chunks, fm->layout->u.chunk.nchunks, hsize_t, int);
|
||||
H5_CHECKED_ASSIGN(total_chunks, int, fm->layout->u.chunk.nchunks, hsize_t);
|
||||
percent_nproc_per_chunk = H5P_peek_unsigned(dx_plist, H5D_XFER_MPIO_CHUNK_OPT_RATIO_NAME);
|
||||
/* if ratio is 0, perform collective io */
|
||||
if(0 == percent_nproc_per_chunk) {
|
||||
|
@ -129,7 +129,7 @@ H5FA__dblock_alloc(H5FA_hdr_t *hdr, hsize_t nelmts))
|
||||
hsize_t npages = ((nelmts + dblock->dblk_page_nelmts) - 1) / dblock->dblk_page_nelmts;
|
||||
|
||||
/* Safely assign the number of pages */
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ dblock->npages, /* From: */ npages, /* From: */ hsize_t, /* To: */ size_t);
|
||||
H5_CHECKED_ASSIGN(dblock->npages, size_t, npages, hsize_t);
|
||||
|
||||
/* Sanity check that we have at least 1 page */
|
||||
HDassert(dblock->npages > 0);
|
||||
|
@ -1300,7 +1300,7 @@ H5FD_core_write(H5FD_t *_file, H5FD_mem_t UNUSED type, hid_t UNUSED dxpl_id, had
|
||||
size_t new_eof;
|
||||
|
||||
/* Determine new size of memory buffer */
|
||||
H5_ASSIGN_OVERFLOW(new_eof, file->increment * ((addr + size) / file->increment), hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(new_eof, size_t, file->increment * ((addr + size) / file->increment), hsize_t);
|
||||
if((addr + size) % file->increment)
|
||||
new_eof += file->increment;
|
||||
|
||||
@ -1469,7 +1469,7 @@ H5FD_core_truncate(H5FD_t *_file, hid_t UNUSED dxpl_id, hbool_t closing)
|
||||
new_eof = file->eoa;
|
||||
else { /* set eof to smallest multiple of increment that exceeds eoa */
|
||||
/* Determine new size of memory buffer */
|
||||
H5_ASSIGN_OVERFLOW(new_eof, file->increment * (file->eoa / file->increment), hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(new_eof, size_t, file->increment * (file->eoa / file->increment), hsize_t);
|
||||
if(file->eoa % file->increment)
|
||||
new_eof += file->increment;
|
||||
} /* end else */
|
||||
|
@ -515,7 +515,7 @@ H5FD_direct_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxadd
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, NULL, "bad VFL driver info")
|
||||
|
||||
file->fd = fd;
|
||||
H5_ASSIGN_OVERFLOW(file->eof,sb.st_size,h5_stat_size_t,haddr_t);
|
||||
H5_CHECKED_ASSIGN(file->eof, haddr_t, sb.st_size, h5_stat_size_t);
|
||||
file->pos = HADDR_UNDEF;
|
||||
file->op = OP_UNKNOWN;
|
||||
#ifdef H5_HAVE_WIN32_API
|
||||
|
@ -1170,7 +1170,7 @@ H5FD_family_read(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, si
|
||||
|
||||
/* Read from each member */
|
||||
while(size > 0) {
|
||||
H5_ASSIGN_OVERFLOW(u,addr /file->memb_size,hsize_t,unsigned);
|
||||
H5_CHECKED_ASSIGN(u, unsigned, addr / file->memb_size, hsize_t);
|
||||
|
||||
sub = addr % file->memb_size;
|
||||
|
||||
@ -1239,7 +1239,7 @@ H5FD_family_write(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, s
|
||||
|
||||
/* Write to each member */
|
||||
while (size>0) {
|
||||
H5_ASSIGN_OVERFLOW(u,addr /file->memb_size,hsize_t,unsigned);
|
||||
H5_CHECKED_ASSIGN(u, unsigned, addr / file->memb_size, hsize_t);
|
||||
|
||||
sub = addr % file->memb_size;
|
||||
|
||||
|
@ -571,7 +571,7 @@ H5FD_log_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr)
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "unable to allocate file struct")
|
||||
|
||||
file->fd = fd;
|
||||
H5_ASSIGN_OVERFLOW(file->eof, sb.st_size, h5_stat_size_t, haddr_t);
|
||||
H5_CHECKED_ASSIGN(file->eof, haddr_t, sb.st_size, h5_stat_size_t);
|
||||
file->pos = HADDR_UNDEF;
|
||||
file->op = OP_UNKNOWN;
|
||||
#ifdef H5_HAVE_WIN32_API
|
||||
|
@ -358,7 +358,7 @@ H5FD_sec2_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr)
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "unable to allocate file struct")
|
||||
|
||||
file->fd = fd;
|
||||
H5_ASSIGN_OVERFLOW(file->eof, sb.st_size, h5_stat_size_t, haddr_t);
|
||||
H5_CHECKED_ASSIGN(file->eof, haddr_t, sb.st_size, h5_stat_size_t);
|
||||
file->pos = HADDR_UNDEF;
|
||||
file->op = OP_UNKNOWN;
|
||||
#ifdef H5_HAVE_WIN32_API
|
||||
|
@ -578,7 +578,7 @@ H5FS_cache_sinfo_load(H5F_t *f, hid_t dxpl_id, haddr_t UNUSED addr, void *_udata
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
|
||||
|
||||
/* Allocate space for the buffer to serialize the sections into */
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ old_sect_size, /* From: */ udata->fspace->sect_size, /* From: */ hsize_t, /* To: */ size_t);
|
||||
H5_CHECKED_ASSIGN(old_sect_size, size_t, udata->fspace->sect_size, hsize_t);
|
||||
if(NULL == (buf = H5FL_BLK_MALLOC(sect_block, (size_t)udata->fspace->sect_size)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
|
||||
|
||||
@ -1038,7 +1038,7 @@ H5FS_cache_sinfo_size(const H5F_t UNUSED *f, const H5FS_sinfo_t *sinfo, size_t *
|
||||
HDassert(size_ptr);
|
||||
|
||||
/* Set size value */
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ *size_ptr, /* From: */ sinfo->fspace->alloc_sect_size, /* From: */ hsize_t, /* To: */ size_t);
|
||||
H5_CHECKED_ASSIGN(*size_ptr, size_t, sinfo->fspace->alloc_sect_size, hsize_t);
|
||||
|
||||
FUNC_LEAVE_NOAPI(SUCCEED)
|
||||
} /* H5FS_cache_sinfo_size() */
|
||||
|
@ -172,7 +172,7 @@ H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t type, haddr_t addr,
|
||||
/* Read the part before the metadata accumulator */
|
||||
if(addr < accum->loc) {
|
||||
/* Set the amount to read */
|
||||
H5_ASSIGN_OVERFLOW(amount_before, (accum->loc - addr), hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(amount_before, size_t, (accum->loc - addr), hsize_t);
|
||||
|
||||
/* Make room for the metadata to read in */
|
||||
HDmemmove(accum->buf + amount_before, accum->buf, accum->size);
|
||||
@ -193,7 +193,7 @@ H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t type, haddr_t addr,
|
||||
size_t amount_after; /* Amount to read at a time */
|
||||
|
||||
/* Set the amount to read */
|
||||
H5_ASSIGN_OVERFLOW(amount_after, ((addr + size) - (accum->loc + accum->size)), hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(amount_after, size_t, ((addr + size) - (accum->loc + accum->size)), hsize_t);
|
||||
|
||||
/* Dispatch to driver */
|
||||
if(H5FD_read(fio_info->f->shared->lf, fio_info->dxpl, map_type, (accum->loc + accum->size), amount_after, (accum->buf + accum->size + amount_before)) < 0)
|
||||
@ -536,14 +536,14 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t type, haddr_t addr,
|
||||
size_t old_offset; /* Offset of old data within the accumulator buffer */
|
||||
|
||||
/* Calculate the amount we will need to add to the accumulator size, based on the amount of overlap */
|
||||
H5_ASSIGN_OVERFLOW(add_size, (accum->loc - addr), hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(add_size, size_t, (accum->loc - addr), hsize_t);
|
||||
|
||||
/* Check if we need to adjust accumulator size */
|
||||
if(H5F__accum_adjust(accum, fio_info, H5F_ACCUM_PREPEND, add_size) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
|
||||
|
||||
/* Calculate the proper offset of the existing metadata */
|
||||
H5_ASSIGN_OVERFLOW(old_offset, (addr + size) - accum->loc, hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(old_offset, size_t, (addr + size) - accum->loc, hsize_t);
|
||||
|
||||
/* Move the existing metadata to the proper location */
|
||||
HDmemmove(accum->buf + size, accum->buf + old_offset, (accum->size - old_offset));
|
||||
@ -576,7 +576,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t type, haddr_t addr,
|
||||
size_t dirty_off; /* Offset of dirty region */
|
||||
|
||||
/* Calculate the amount we will need to add to the accumulator size, based on the amount of overlap */
|
||||
H5_ASSIGN_OVERFLOW(add_size, (addr + size) - (accum->loc + accum->size), hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(add_size, size_t, (addr + size) - (accum->loc + accum->size), hsize_t);
|
||||
|
||||
/* Check if we need to adjust accumulator size */
|
||||
if(H5F__accum_adjust(accum, fio_info, H5F_ACCUM_APPEND, add_size) < 0)
|
||||
@ -885,7 +885,7 @@ H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t UNUSED type, haddr_t a
|
||||
size_t new_accum_size; /* Size of new accumulator buffer */
|
||||
|
||||
/* Calculate the size of the overlap with the accumulator, etc. */
|
||||
H5_ASSIGN_OVERFLOW(overlap_size, (addr + size) - accum->loc, haddr_t, size_t);
|
||||
H5_CHECKED_ASSIGN(overlap_size, size_t, (addr + size) - accum->loc, haddr_t);
|
||||
new_accum_size = accum->size - overlap_size;
|
||||
|
||||
/* Move the accumulator buffer information to eliminate the freed block */
|
||||
@ -919,7 +919,7 @@ H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t UNUSED type, haddr_t a
|
||||
haddr_t dirty_start = accum->loc + accum->dirty_off;
|
||||
|
||||
/* Calculate the size of the overlap with the accumulator */
|
||||
H5_ASSIGN_OVERFLOW(overlap_size, (accum->loc + accum->size) - addr, haddr_t, size_t);
|
||||
H5_CHECKED_ASSIGN(overlap_size, size_t, (accum->loc + accum->size) - addr, haddr_t);
|
||||
|
||||
/* Check if block to free begins before end of dirty region */
|
||||
if(accum->dirty && H5F_addr_lt(addr, dirty_end)) {
|
||||
|
@ -434,7 +434,7 @@ H5F_super_init(H5F_t *f, hid_t dxpl_id)
|
||||
superblock_size = (hsize_t)H5F_SUPERBLOCK_SIZE(super_vers, f);
|
||||
|
||||
/* Compute the size of the driver information block */
|
||||
H5_ASSIGN_OVERFLOW(driver_size, H5FD_sb_size(f->shared->lf), hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
|
||||
if(driver_size > 0) {
|
||||
driver_size += H5F_DRVINFOBLOCK_HDR_SIZE;
|
||||
|
||||
|
@ -723,7 +723,7 @@ H5F_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t UNUSED addr,
|
||||
HGOTO_ERROR(H5E_FILE, H5E_CANTENCODE, FAIL, "can't encode root group symbol table entry")
|
||||
|
||||
/* Encode the driver information block. */
|
||||
H5_ASSIGN_OVERFLOW(driver_size, H5FD_sb_size(f->shared->lf), hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
|
||||
|
||||
/* Checking whether driver block address is defined here is to handle backward
|
||||
* compatibility. If the file was created with v1.6 library or earlier and no
|
||||
@ -799,7 +799,7 @@ H5F_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t UNUSED addr,
|
||||
} /* end else */
|
||||
|
||||
/* Retrieve the total size of the superblock info */
|
||||
H5_ASSIGN_OVERFLOW(superblock_size, (p - buf), ptrdiff_t, size_t);
|
||||
H5_CHECKED_ASSIGN(superblock_size, size_t, (p - buf), ptrdiff_t);
|
||||
|
||||
/* Double check we didn't overrun the block (unlikely) */
|
||||
HDassert(superblock_size <= sizeof(buf));
|
||||
@ -824,7 +824,7 @@ H5F_sblock_flush(H5F_t *f, hid_t dxpl_id, hbool_t destroy, haddr_t UNUSED addr,
|
||||
/* Check for ignoring the driver info for this file */
|
||||
if(!H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO)) {
|
||||
/* Check for driver info message */
|
||||
H5_ASSIGN_OVERFLOW(driver_size, H5FD_sb_size(f->shared->lf), hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
|
||||
if(driver_size > 0) {
|
||||
H5O_drvinfo_t drvinfo; /* Driver info */
|
||||
uint8_t dbuf[H5F_MAX_DRVINFOBLOCK_SIZE]; /* Driver info block encoding buffer */
|
||||
|
@ -261,7 +261,7 @@ H5Gcreate1(hid_t loc_id, const char *name, size_t size_hint)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get group info")
|
||||
|
||||
/* Set the non-default local heap size hint */
|
||||
H5_ASSIGN_OVERFLOW(ginfo.lheap_size_hint, size_hint, size_t, uint32_t);
|
||||
H5_CHECKED_ASSIGN(ginfo.lheap_size_hint, uint32_t, size_hint, size_t);
|
||||
if(H5P_set(gc_plist, H5G_CRT_GROUP_INFO_NAME, &ginfo) < 0)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set group info")
|
||||
} /* end if */
|
||||
|
@ -467,7 +467,7 @@ H5G__link_iterate_table(const H5G_link_table_t *ltable, hsize_t skip,
|
||||
*last_lnk += skip;
|
||||
|
||||
/* Iterate over link messages */
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ u, /* From: */ skip, /* From: */ hsize_t, /* To: */ size_t)
|
||||
H5_CHECKED_ASSIGN(u, size_t, skip, hsize_t)
|
||||
for(; u < ltable->nlinks && !ret_value; u++) {
|
||||
/* Make the callback */
|
||||
ret_value = (op)(&(ltable->lnks[u]), op_data);
|
||||
|
@ -387,11 +387,11 @@ H5HF_dblock_debug_cb(H5FS_section_info_t *_sect, void *_udata)
|
||||
if(sect_start < dblock_start)
|
||||
start = 0;
|
||||
else
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ start, /* From: */ (sect_start - dblock_start), /* From: */ hsize_t, /* To: */ size_t)
|
||||
H5_CHECKED_ASSIGN(start, size_t, (sect_start - dblock_start), hsize_t)
|
||||
if(sect_end > dblock_end)
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ end, /* From: */ udata->dblock_size, /* From: */ hsize_t, /* To: */ size_t)
|
||||
H5_CHECKED_ASSIGN(end, size_t, udata->dblock_size, hsize_t)
|
||||
else
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ end, /* From: */ ((sect_end - dblock_start) + 1), /* From: */ hsize_t, /* To: */ size_t)
|
||||
H5_CHECKED_ASSIGN(end, size_t, ((sect_end - dblock_start) + 1), hsize_t)
|
||||
|
||||
/* Calculate the length */
|
||||
len = end - start;
|
||||
|
@ -131,7 +131,7 @@ H5HF_man_dblock_create(hid_t dxpl_id, H5HF_hdr_t *hdr, H5HF_indirect_t *par_iblo
|
||||
dblock->block_off = par_iblock->block_off;
|
||||
dblock->block_off += hdr->man_dtable.row_block_off[par_row];
|
||||
dblock->block_off += hdr->man_dtable.row_block_size[par_row] * (par_entry % hdr->man_dtable.cparam.width);
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ dblock->size, /* From: */ hdr->man_dtable.row_block_size[par_row], /* From: */ hsize_t, /* To: */ size_t);
|
||||
H5_CHECKED_ASSIGN(dblock->size, size_t, hdr->man_dtable.row_block_size[par_row], hsize_t);
|
||||
} /* end if */
|
||||
else {
|
||||
/* Must be the root direct block */
|
||||
@ -397,7 +397,7 @@ H5HF_man_dblock_new(H5HF_hdr_t *hdr, hid_t dxpl_id, size_t request,
|
||||
if(H5HF_man_iter_curr(&hdr->next_block, &next_row, NULL, &next_entry, &iblock) < 0)
|
||||
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "unable to retrieve current block iterator location")
|
||||
HDassert(next_row < iblock->nrows);
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ next_size, /* From: */ hdr->man_dtable.row_block_size[next_row], /* From: */ hsize_t, /* To: */ size_t);
|
||||
H5_CHECKED_ASSIGN(next_size, size_t, hdr->man_dtable.row_block_size[next_row], hsize_t);
|
||||
|
||||
/* Check for skipping over blocks */
|
||||
if(min_dblock_size > next_size) {
|
||||
|
@ -168,7 +168,7 @@ HDfprintf(stderr, "%s: off = %Hu\n", "H5HF_dtable_lookup", off);
|
||||
/* Check for offset in first row */
|
||||
if(off < dtable->num_id_first_row) {
|
||||
*row = 0;
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ *col, /* From: */ (off / dtable->cparam.start_block_size), /* From: */ hsize_t, /* To: */ unsigned);
|
||||
H5_CHECKED_ASSIGN(*col, unsigned, (off / dtable->cparam.start_block_size), hsize_t);
|
||||
} /* end if */
|
||||
else {
|
||||
unsigned high_bit = H5VM_log2_gen(off); /* Determine the high bit in the offset */
|
||||
@ -178,7 +178,7 @@ HDfprintf(stderr, "%s: off = %Hu\n", "H5HF_dtable_lookup", off);
|
||||
HDfprintf(stderr, "%s: high_bit = %u, off_mask = %Hu\n", "H5HF_dtable_lookup", high_bit, off_mask);
|
||||
#endif /* QAK */
|
||||
*row = (high_bit - dtable->first_row_bits) + 1;
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ *col, /* From: */ ((off - off_mask) / dtable->row_block_size[*row]), /* From: */ hsize_t, /* To: */ unsigned);
|
||||
H5_CHECKED_ASSIGN(*col, unsigned, ((off - off_mask) / dtable->row_block_size[*row]), hsize_t);
|
||||
} /* end else */
|
||||
|
||||
FUNC_LEAVE_NOAPI(SUCCEED)
|
||||
|
@ -262,7 +262,7 @@ H5HF_hdr_finish_init_phase2(H5HF_hdr_t *hdr)
|
||||
if(u < hdr->man_dtable.max_direct_rows) {
|
||||
hdr->man_dtable.row_tot_dblock_free[u] = hdr->man_dtable.row_block_size[u] -
|
||||
H5HF_MAN_ABS_DIRECT_OVERHEAD(hdr);
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ hdr->man_dtable.row_max_dblock_free[u], /* From: */ hdr->man_dtable.row_tot_dblock_free[u], /* From: */ hsize_t, /* To: */ size_t);
|
||||
H5_CHECKED_ASSIGN(hdr->man_dtable.row_max_dblock_free[u], size_t, hdr->man_dtable.row_tot_dblock_free[u], hsize_t);
|
||||
} /* end if */
|
||||
else
|
||||
if(H5HF_hdr_compute_free_space(hdr, u) < 0)
|
||||
|
@ -653,7 +653,7 @@ H5HF_huge_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
|
||||
|
||||
/* Retrieve the object's address & length */
|
||||
obj_addr = found_rec.addr;
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ obj_size, /* From: */ found_rec.len, /* From: */ hsize_t, /* To: */ size_t);
|
||||
H5_CHECKED_ASSIGN(obj_size, size_t, found_rec.len, hsize_t);
|
||||
filter_mask = found_rec.filter_mask;
|
||||
} /* end if */
|
||||
else {
|
||||
@ -669,7 +669,7 @@ H5HF_huge_op_real(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
|
||||
|
||||
/* Retrieve the object's address & length */
|
||||
obj_addr = found_rec.addr;
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ obj_size, /* From: */ found_rec.len, /* From: */ hsize_t, /* To: */ size_t);
|
||||
H5_CHECKED_ASSIGN(obj_size, size_t, found_rec.len, hsize_t);
|
||||
} /* end else */
|
||||
} /* end else */
|
||||
|
||||
@ -797,7 +797,7 @@ H5HF_huge_write(H5HF_hdr_t *hdr, hid_t dxpl_id, const uint8_t *id,
|
||||
|
||||
/* Retrieve the object's address & length */
|
||||
obj_addr = found_rec.addr;
|
||||
H5_ASSIGN_OVERFLOW(/* To: */ obj_size, /* From: */ found_rec.len, /* From: */ hsize_t, /* To: */ size_t);
|
||||
H5_CHECKED_ASSIGN(obj_size, size_t, found_rec.len, hsize_t);
|
||||
} /* end else */
|
||||
|
||||
/* Write the object's data to the file */
|
||||
|
@ -258,6 +258,7 @@ H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
|
||||
size_t spec_read_size; /* Size of buffer to speculatively read in */
|
||||
const uint8_t *p; /* Pointer into decoding buffer */
|
||||
haddr_t eoa; /* Relative end of file address */
|
||||
hsize_t min; /* temp min value to avoid macro nesting */
|
||||
H5HL_prfx_t *ret_value; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI_NOINIT
|
||||
@ -276,7 +277,8 @@ H5HL_prefix_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
|
||||
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, NULL, "unable to determine file size")
|
||||
|
||||
/* Compute the size of the speculative local heap prefix buffer */
|
||||
H5_ASSIGN_OVERFLOW(spec_read_size, MIN(eoa - addr, H5HL_SPEC_READ_SIZE), /* From: */ hsize_t, /* To: */ size_t);
|
||||
min = MIN(eoa - addr, H5HL_SPEC_READ_SIZE);
|
||||
H5_CHECKED_ASSIGN(spec_read_size, size_t, min, hsize_t);
|
||||
HDassert(spec_read_size >= udata->sizeof_prfx);
|
||||
|
||||
/* Attempt to speculatively read both local heap prefix and heap data */
|
||||
|
@ -433,7 +433,7 @@ H5Inmembers(H5I_type_t type, hsize_t *num_members)
|
||||
if((members = H5I_nmembers(type)) < 0)
|
||||
HGOTO_ERROR(H5E_ATOM, H5E_CANTCOUNT, FAIL, "can't compute number of members")
|
||||
|
||||
H5_ASSIGN_OVERFLOW(*num_members, members, int64_t, hsize_t);
|
||||
H5_CHECKED_ASSIGN(*num_members, hsize_t, members, int64_t);
|
||||
} /* end if */
|
||||
|
||||
done:
|
||||
@ -460,7 +460,7 @@ int64_t
|
||||
H5I_nmembers(H5I_type_t type)
|
||||
{
|
||||
H5I_id_type_t *type_ptr = NULL;
|
||||
int ret_value;
|
||||
int64_t ret_value;
|
||||
|
||||
FUNC_ENTER_NOAPI(FAIL)
|
||||
|
||||
@ -470,7 +470,7 @@ H5I_nmembers(H5I_type_t type)
|
||||
HGOTO_DONE(0);
|
||||
|
||||
/* Set return value */
|
||||
H5_ASSIGN_OVERFLOW(ret_value, type_ptr->id_count, uint64_t, int64_t);
|
||||
H5_CHECKED_ASSIGN(ret_value, int64_t, type_ptr->id_count, uint64_t);
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
|
@ -1383,7 +1383,7 @@ H5MF_get_free_sections(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, size_t nsects,
|
||||
/* Query how many sections of this type */
|
||||
if(H5FS_sect_stats(f->shared->fs_man[ty], NULL, &hnums) < 0)
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query free space stats")
|
||||
H5_ASSIGN_OVERFLOW(nums, hnums, hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(nums, size_t, hnums, hsize_t);
|
||||
|
||||
/* Increment total # of sections */
|
||||
total_sects += nums;
|
||||
|
@ -2716,8 +2716,8 @@ H5O_get_hdr_info_real(const H5O_t *oh, H5O_hdr_info_t *hdr)
|
||||
hdr->version = oh->version;
|
||||
|
||||
/* Set the number of messages & chunks */
|
||||
H5_ASSIGN_OVERFLOW(hdr->nmesgs, oh->nmesgs, size_t, unsigned);
|
||||
H5_ASSIGN_OVERFLOW(hdr->nchunks, oh->nchunks, size_t, unsigned);
|
||||
H5_CHECKED_ASSIGN(hdr->nmesgs, unsigned, oh->nmesgs, size_t);
|
||||
H5_CHECKED_ASSIGN(hdr->nchunks, unsigned, oh->nchunks, size_t);
|
||||
|
||||
/* Set the status flags */
|
||||
hdr->flags = oh->flags;
|
||||
|
@ -218,7 +218,7 @@ H5O_attr_decode(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, unsigned UNUSED mesg_fl
|
||||
p += attr->shared->ds_size;
|
||||
|
||||
/* Compute the size of the data */
|
||||
H5_ASSIGN_OVERFLOW(attr->shared->data_size, H5S_GET_EXTENT_NPOINTS(attr->shared->ds) * H5T_get_size(attr->shared->dt), hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(attr->shared->data_size, size_t, H5S_GET_EXTENT_NPOINTS(attr->shared->ds) * H5T_get_size(attr->shared->dt), hsize_t);
|
||||
|
||||
/* Go get the data */
|
||||
if(attr->shared->data_size) {
|
||||
|
@ -184,7 +184,7 @@ H5O_load(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_udata)
|
||||
HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, NULL, "unable to determine file size")
|
||||
|
||||
/* Compute the size of the speculative object header buffer */
|
||||
H5_ASSIGN_OVERFLOW(spec_read_size, MIN(eoa - addr, H5O_SPEC_READ_SIZE), /* From: */ hsize_t, /* To: */ size_t);
|
||||
H5_CHECKED_ASSIGN(spec_read_size, size_t, MIN(eoa - addr, H5O_SPEC_READ_SIZE), hsize_t);
|
||||
|
||||
/* Attempt to speculatively read both object header prefix and first chunk */
|
||||
if(H5F_block_read(f, H5FD_MEM_OHDR, addr, spec_read_size, dxpl_id, read_buf) < 0)
|
||||
|
@ -977,7 +977,7 @@ H5O_fill_convert(H5O_fill_t *fill, H5T_t *dset_type, hbool_t *fill_changed, hid_
|
||||
} /* end if */
|
||||
H5T_close(fill->type);
|
||||
fill->type = NULL;
|
||||
H5_ASSIGN_OVERFLOW(fill->size, H5T_get_size(dset_type), size_t, ssize_t);
|
||||
H5_CHECKED_ASSIGN(fill->size, ssize_t, H5T_get_size(dset_type), size_t);
|
||||
|
||||
/* Note that the fill value info has changed */
|
||||
*fill_changed = TRUE;
|
||||
|
@ -807,7 +807,7 @@ H5O_msg_count(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
|
||||
|
||||
/* Count the messages of the correct type */
|
||||
msg_count = H5O_msg_count_real(oh, type);
|
||||
H5_ASSIGN_OVERFLOW(ret_value, msg_count, unsigned, int);
|
||||
H5_CHECKED_ASSIGN(ret_value, int, msg_count, unsigned);
|
||||
|
||||
done:
|
||||
if(oh && H5O_unprotect(loc, dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
|
||||
@ -2300,7 +2300,7 @@ H5O_msg_get_chunkno(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id)
|
||||
HGOTO_ERROR(H5E_OHDR, H5E_NOTFOUND, FAIL, "message type not found")
|
||||
|
||||
/* Set return value */
|
||||
H5_ASSIGN_OVERFLOW(ret_value, idx_msg->chunkno, unsigned, int);
|
||||
H5_CHECKED_ASSIGN(ret_value, int, idx_msg->chunkno, unsigned);
|
||||
|
||||
done:
|
||||
if(oh && H5O_unprotect(loc, dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
|
||||
|
@ -509,7 +509,7 @@ H5P__decode_size_t(const void **_pp, void *_value)
|
||||
|
||||
/* Decode the value */
|
||||
UINT64DECODE_VAR(*pp, enc_value, enc_size);
|
||||
H5_ASSIGN_OVERFLOW(*value, enc_value, uint64_t, size_t);
|
||||
H5_CHECKED_ASSIGN(*value, size_t, enc_value, uint64_t);
|
||||
|
||||
FUNC_LEAVE_NOAPI(SUCCEED)
|
||||
} /* end H5P__decode_size_t() */
|
||||
@ -550,7 +550,7 @@ H5P__decode_hsize_t(const void **_pp, void *_value)
|
||||
|
||||
/* Decode the value */
|
||||
UINT64DECODE_VAR(*pp, enc_value, enc_size);
|
||||
H5_ASSIGN_OVERFLOW(*value, enc_value, uint64_t, hsize_t);
|
||||
H5_CHECKED_ASSIGN(*value, hsize_t, enc_value, uint64_t);
|
||||
|
||||
FUNC_LEAVE_NOAPI(SUCCEED)
|
||||
} /* end H5P__decode_hsize_t() */
|
||||
|
@ -179,7 +179,7 @@ H5Pset_local_heap_size_hint(hid_t plist_id, size_t size_hint)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get group info")
|
||||
|
||||
/* Update field */
|
||||
H5_ASSIGN_OVERFLOW(ginfo.lheap_size_hint, size_hint, size_t, uint32_t);
|
||||
H5_CHECKED_ASSIGN(ginfo.lheap_size_hint, uint32_t, size_hint, size_t);
|
||||
|
||||
/* Set value */
|
||||
if(H5P_set(plist, H5G_CRT_GROUP_INFO_NAME, &ginfo) < 0)
|
||||
|
@ -1521,7 +1521,7 @@ H5S_encode(H5S_t *obj, unsigned char *buf, size_t *nalloc)
|
||||
/* Find out the size of buffer needed for selection */
|
||||
if((sselect_size = H5S_SELECT_SERIAL_SIZE(obj)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_BADSIZE, FAIL, "can't find dataspace selection size")
|
||||
H5_ASSIGN_OVERFLOW(select_size, sselect_size, hssize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(select_size, size_t, sselect_size, hssize_t);
|
||||
|
||||
/* Verify the size of buffer. If it's not big enough, simply return the
|
||||
* right size without filling the buffer. */
|
||||
|
@ -7635,7 +7635,7 @@ H5S_hyper_get_seq_list_gen(const H5S_t *space,H5S_sel_iter_t *iter,
|
||||
/* Finish the span in the fastest changing dimension */
|
||||
|
||||
/* Compute the number of bytes to attempt in this span */
|
||||
H5_ASSIGN_OVERFLOW(span_size,((curr_span->high-abs_arr[fast_dim])+1)*elem_size,hsize_t,size_t);
|
||||
H5_CHECKED_ASSIGN(span_size, size_t, ((curr_span->high-abs_arr[fast_dim])+1)*elem_size, hsize_t);
|
||||
|
||||
/* Check number of bytes against upper bounds allowed */
|
||||
if(span_size>io_bytes_left)
|
||||
@ -7778,7 +7778,7 @@ H5S_hyper_get_seq_list_gen(const H5S_t *space,H5S_sel_iter_t *iter,
|
||||
loc_off += curr_span->pstride;
|
||||
|
||||
/* Compute the number of elements to attempt in this span */
|
||||
H5_ASSIGN_OVERFLOW(span_size, curr_span->nelem, hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(span_size, size_t, curr_span->nelem, hsize_t);
|
||||
|
||||
/* Check number of elements against upper bounds allowed */
|
||||
if(span_size >= io_bytes_left) {
|
||||
@ -8108,7 +8108,7 @@ H5S_hyper_get_seq_list_opt(const H5S_t *space, H5S_sel_iter_t *iter,
|
||||
loc += offset[u] * slab[u];
|
||||
|
||||
/* Set the number of elements to write each time */
|
||||
H5_ASSIGN_OVERFLOW(actual_elem, tdiminfo[fast_dim].block, hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(actual_elem, size_t, tdiminfo[fast_dim].block, hsize_t);
|
||||
|
||||
/* Set the number of actual bytes */
|
||||
actual_bytes = actual_elem * elem_size;
|
||||
@ -8117,7 +8117,7 @@ H5S_hyper_get_seq_list_opt(const H5S_t *space, H5S_sel_iter_t *iter,
|
||||
fast_dim_start = tdiminfo[fast_dim].start;
|
||||
fast_dim_stride = tdiminfo[fast_dim].stride;
|
||||
fast_dim_block = tdiminfo[fast_dim].block;
|
||||
H5_ASSIGN_OVERFLOW(fast_dim_buf_off, slab[fast_dim] * fast_dim_stride, hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(fast_dim_buf_off, size_t, slab[fast_dim] * fast_dim_stride, hsize_t);
|
||||
fast_dim_offset = (hsize_t)((hssize_t)fast_dim_start + sel_off[fast_dim]);
|
||||
|
||||
/* Compute the number of blocks which would fit into the buffer */
|
||||
@ -8138,7 +8138,7 @@ H5S_hyper_get_seq_list_opt(const H5S_t *space, H5S_sel_iter_t *iter,
|
||||
/* Check if there is a partial row left (with full blocks) */
|
||||
if(tmp_count[fast_dim] > 0) {
|
||||
/* Get number of blocks in fastest dimension */
|
||||
H5_ASSIGN_OVERFLOW(fast_dim_count, tdiminfo[fast_dim].count - tmp_count[fast_dim], hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(fast_dim_count, size_t, tdiminfo[fast_dim].count - tmp_count[fast_dim], hsize_t);
|
||||
|
||||
/* Make certain this entire row will fit into buffer */
|
||||
fast_dim_count = MIN(fast_dim_count, tot_blk_count);
|
||||
@ -8223,7 +8223,7 @@ H5S_hyper_get_seq_list_opt(const H5S_t *space, H5S_sel_iter_t *iter,
|
||||
curr_rows = total_rows = (size_t)(tot_blk_count / tdiminfo[fast_dim].count);
|
||||
|
||||
/* Reset copy of number of blocks in fastest dimension */
|
||||
H5_ASSIGN_OVERFLOW(fast_dim_count, tdiminfo[fast_dim].count, hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(fast_dim_count, size_t, tdiminfo[fast_dim].count, hsize_t);
|
||||
|
||||
/* Read in data until an entire sequence can't be written out any longer */
|
||||
while(curr_rows > 0) {
|
||||
@ -8528,7 +8528,7 @@ H5S_hyper_get_seq_list_single(const H5S_t *space, H5S_sel_iter_t *iter,
|
||||
tot_blk_count = MIN(tot_blk_count, maxseq);
|
||||
|
||||
/* Set the number of elements to write each time */
|
||||
H5_ASSIGN_OVERFLOW(actual_elem, fast_dim_block, hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(actual_elem, size_t, fast_dim_block, hsize_t);
|
||||
|
||||
/* Check for blocks to operate on */
|
||||
if(tot_blk_count > 0) {
|
||||
@ -8785,10 +8785,10 @@ H5S_hyper_get_seq_list(const H5S_t *space, unsigned UNUSED flags, H5S_sel_iter_t
|
||||
|
||||
/* Calculate the number of elements left in the sequence */
|
||||
if(tdiminfo[fast_dim].count == 1) {
|
||||
H5_ASSIGN_OVERFLOW(leftover, tdiminfo[fast_dim].block - (iter->u.hyp.off[fast_dim] - tdiminfo[fast_dim].start), hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(leftover, size_t, tdiminfo[fast_dim].block - (iter->u.hyp.off[fast_dim] - tdiminfo[fast_dim].start), hsize_t);
|
||||
} /* end if */
|
||||
else {
|
||||
H5_ASSIGN_OVERFLOW(leftover, tdiminfo[fast_dim].block - ((iter->u.hyp.off[fast_dim] - tdiminfo[fast_dim].start) % tdiminfo[fast_dim].stride), hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(leftover, size_t, tdiminfo[fast_dim].block - ((iter->u.hyp.off[fast_dim] - tdiminfo[fast_dim].start) % tdiminfo[fast_dim].stride), hsize_t);
|
||||
} /* end else */
|
||||
|
||||
/* Make certain that we don't write too many */
|
||||
@ -8807,7 +8807,7 @@ H5S_hyper_get_seq_list(const H5S_t *space, unsigned UNUSED flags, H5S_sel_iter_t
|
||||
|
||||
/* Add a new sequence */
|
||||
off[0] = loc;
|
||||
H5_ASSIGN_OVERFLOW(len[0], actual_elem * elem_size, hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(len[0], size_t, actual_elem * elem_size, hsize_t);
|
||||
|
||||
/* Increment sequence array locations */
|
||||
off++;
|
||||
|
@ -94,13 +94,13 @@ H5S_mpio_all_type(const H5S_t *space, size_t elmt_size,
|
||||
/* Just treat the entire extent as a block of bytes */
|
||||
if((snelmts = (hssize_t)H5S_GET_EXTENT_NPOINTS(space)) < 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection")
|
||||
H5_ASSIGN_OVERFLOW(nelmts, snelmts, hssize_t, hsize_t);
|
||||
H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t);
|
||||
|
||||
total_bytes = (hsize_t)elmt_size * nelmts;
|
||||
|
||||
/* fill in the return values */
|
||||
*new_type = MPI_BYTE;
|
||||
H5_ASSIGN_OVERFLOW(*count, total_bytes, hsize_t, int);
|
||||
H5_CHECKED_ASSIGN(*count, int, total_bytes, hsize_t);
|
||||
*is_derived_type = FALSE;
|
||||
|
||||
done:
|
||||
@ -388,7 +388,7 @@ H5S_mpio_permute_type(const H5S_t *space, size_t elmt_size, hsize_t **permute,
|
||||
sel_iter_init = TRUE; /* Selection iteration info has been initialized */
|
||||
|
||||
/* Set the number of elements to iterate over */
|
||||
H5_ASSIGN_OVERFLOW(max_elem, num_points, hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(max_elem, size_t, num_points, hsize_t);
|
||||
|
||||
/* Loop, while elements left in selection */
|
||||
u = 0;
|
||||
|
@ -1304,7 +1304,7 @@ H5S_select_iterate(void *buf, hid_t type_id, const H5S_t *space, H5D_operator_t
|
||||
space_size[ndims] = elmt_size;
|
||||
|
||||
/* Compute the maximum number of bytes required */
|
||||
H5_ASSIGN_OVERFLOW(max_elem, nelmts, hssize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(max_elem, size_t, nelmts, hssize_t);
|
||||
|
||||
/* Loop, while elements left in selection */
|
||||
while(max_elem > 0 && user_ret == 0) {
|
||||
@ -2063,7 +2063,7 @@ H5S_select_fill(const void *fill, size_t fill_size, const H5S_t *space, void *_b
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCOUNT, FAIL, "can't get number of elements selected")
|
||||
|
||||
/* Compute the number of bytes to process */
|
||||
H5_ASSIGN_OVERFLOW(max_elem, nelmts, hssize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(max_elem, size_t, nelmts, hssize_t);
|
||||
|
||||
/* Loop, while elements left in selection */
|
||||
while(max_elem > 0) {
|
||||
|
@ -202,7 +202,7 @@ H5T__array_create(H5T_t *base, unsigned ndims, const hsize_t dim[/* ndims */])
|
||||
|
||||
/* Copy the array dimensions & compute the # of elements in the array */
|
||||
for(u = 0, ret_value->shared->u.array.nelem = 1; u < ndims; u++) {
|
||||
H5_ASSIGN_OVERFLOW(ret_value->shared->u.array.dim[u], dim[u], hsize_t, size_t);
|
||||
H5_CHECKED_ASSIGN(ret_value->shared->u.array.dim[u], size_t, dim[u], hsize_t);
|
||||
ret_value->shared->u.array.nelem *= (size_t)dim[u];
|
||||
} /* end for */
|
||||
|
||||
|
@ -1952,7 +1952,7 @@ H5T_conv_struct_init(H5T_t *src, H5T_t *dst, H5T_cdata_t *cdata, hid_t dxpl_id)
|
||||
src2dst[i] = -1;
|
||||
for(j = 0; j < dst_nmembs; j++) {
|
||||
if(!HDstrcmp(src->shared->u.compnd.memb[i].name, dst->shared->u.compnd.memb[j].name)) {
|
||||
H5_ASSIGN_OVERFLOW(src2dst[i],j,unsigned,int);
|
||||
H5_CHECKED_ASSIGN(src2dst[i], int, j, unsigned);
|
||||
break;
|
||||
} /* end if */
|
||||
} /* end for */
|
||||
@ -2187,16 +2187,16 @@ H5T__conv_struct(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
|
||||
* Direction of conversion and striding through background.
|
||||
*/
|
||||
if(buf_stride) {
|
||||
H5_ASSIGN_OVERFLOW(src_delta, buf_stride, size_t, ssize_t);
|
||||
H5_CHECKED_ASSIGN(src_delta, ssize_t, buf_stride, size_t);
|
||||
if(!bkg_stride) {
|
||||
H5_ASSIGN_OVERFLOW(bkg_delta, dst->shared->size, size_t, ssize_t);
|
||||
H5_CHECKED_ASSIGN(bkg_delta, ssize_t, dst->shared->size, size_t);
|
||||
} /* end if */
|
||||
else
|
||||
H5_ASSIGN_OVERFLOW(bkg_delta, bkg_stride, size_t, ssize_t);
|
||||
H5_CHECKED_ASSIGN(bkg_delta, ssize_t, bkg_stride, size_t);
|
||||
} /* end if */
|
||||
else if(dst->shared->size <= src->shared->size) {
|
||||
H5_ASSIGN_OVERFLOW(src_delta, src->shared->size, size_t, ssize_t);
|
||||
H5_ASSIGN_OVERFLOW(bkg_delta, dst->shared->size, size_t, ssize_t);
|
||||
H5_CHECKED_ASSIGN(src_delta, ssize_t, src->shared->size, size_t);
|
||||
H5_CHECKED_ASSIGN(bkg_delta, ssize_t, dst->shared->size, size_t);
|
||||
} /* end else-if */
|
||||
else {
|
||||
H5_CHECK_OVERFLOW(src->shared->size, size_t, ssize_t);
|
||||
@ -2278,7 +2278,7 @@ H5T__conv_struct(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
|
||||
|
||||
/* If the bkg_delta was set to -(dst->shared->size), make it positive now */
|
||||
if(buf_stride == 0 && dst->shared->size > src->shared->size)
|
||||
H5_ASSIGN_OVERFLOW(bkg_delta, dst->shared->size, size_t, ssize_t);
|
||||
H5_CHECKED_ASSIGN(bkg_delta, ssize_t, dst->shared->size, size_t);
|
||||
|
||||
/*
|
||||
* Copy the background buffer back into the in-place conversion
|
||||
@ -2833,8 +2833,8 @@ H5T__conv_enum(hid_t src_id, hid_t dst_id, H5T_cdata_t *cdata, size_t nelmts,
|
||||
src_delta = dst_delta = (ssize_t)buf_stride;
|
||||
s = d = buf;
|
||||
} else if(dst->shared->size <= src->shared->size) {
|
||||
H5_ASSIGN_OVERFLOW(src_delta, src->shared->size, size_t, ssize_t);
|
||||
H5_ASSIGN_OVERFLOW(dst_delta, dst->shared->size, size_t, ssize_t);
|
||||
H5_CHECKED_ASSIGN(src_delta, ssize_t, src->shared->size, size_t);
|
||||
H5_CHECKED_ASSIGN(dst_delta, ssize_t, dst->shared->size, size_t);
|
||||
s = d = buf;
|
||||
} else {
|
||||
H5_CHECK_OVERFLOW(src->shared->size, size_t, ssize_t);
|
||||
|
@ -203,7 +203,7 @@ H5Tget_ebias(hid_t type_id)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_BADTYPE, 0, "operation not defined for datatype class")
|
||||
|
||||
/* bias */
|
||||
H5_ASSIGN_OVERFLOW(ret_value, dt->shared->u.atomic.u.f.ebias, uint64_t, size_t);
|
||||
H5_CHECKED_ASSIGN(ret_value, size_t, dt->shared->u.atomic.u.f.ebias, uint64_t);
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
|
@ -276,7 +276,7 @@ H5T_get_native_type(H5T_t *dtype, H5T_direction_t direction, size_t *struct_alig
|
||||
|
||||
if((snmemb = H5T_get_nmembers(dtype)) <= 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "compound data type doesn't have any member")
|
||||
H5_ASSIGN_OVERFLOW(nmemb, snmemb, int, unsigned);
|
||||
H5_CHECKED_ASSIGN(nmemb, unsigned, snmemb, int);
|
||||
|
||||
if(NULL == (memb_list = (H5T_t **)H5MM_calloc(nmemb * sizeof(H5T_t *))))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "cannot allocate memory")
|
||||
@ -388,7 +388,7 @@ H5T_get_native_type(H5T_t *dtype, H5T_direction_t direction, size_t *struct_alig
|
||||
/* Retrieve member info and insert members into new enum type */
|
||||
if((snmemb = H5T_get_nmembers(dtype)) <= 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "enumerate data type doesn't have any member")
|
||||
H5_ASSIGN_OVERFLOW(nmemb, snmemb, int, unsigned);
|
||||
H5_CHECKED_ASSIGN(nmemb, unsigned, snmemb, int);
|
||||
for(u = 0; u < nmemb; u++) {
|
||||
if(NULL == (memb_name = H5T__get_member_name(dtype, u)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "cannot get member name")
|
||||
@ -429,7 +429,7 @@ H5T_get_native_type(H5T_t *dtype, H5T_direction_t direction, size_t *struct_alig
|
||||
/* Retrieve dimension information for array data type */
|
||||
if((sarray_rank = H5T__get_array_ndims(dtype)) <= 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "cannot get dimension rank")
|
||||
H5_ASSIGN_OVERFLOW(array_rank, sarray_rank, int, unsigned);
|
||||
H5_CHECKED_ASSIGN(array_rank, unsigned, sarray_rank, int);
|
||||
if(NULL == (dims = (hsize_t*)H5MM_malloc(array_rank * sizeof(hsize_t))))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "cannot allocate memory")
|
||||
if(H5T__get_array_dims(dtype, dims) < 0)
|
||||
|
@ -102,9 +102,9 @@ H5Z_filter_deflate (unsigned flags, size_t cd_nelmts,
|
||||
/* Set the uncompression parameters */
|
||||
HDmemset(&z_strm, 0, sizeof(z_strm));
|
||||
z_strm.next_in = (Bytef *)*buf;
|
||||
H5_ASSIGN_OVERFLOW(z_strm.avail_in,nbytes,size_t,unsigned);
|
||||
H5_CHECKED_ASSIGN(z_strm.avail_in, unsigned, nbytes, size_t);
|
||||
z_strm.next_out = (Bytef *)outbuf;
|
||||
H5_ASSIGN_OVERFLOW(z_strm.avail_out,nalloc,size_t,unsigned);
|
||||
H5_CHECKED_ASSIGN(z_strm.avail_out, unsigned, nalloc, size_t);
|
||||
|
||||
/* Initialize the uncompression routines */
|
||||
if (Z_OK!=inflateInit(&z_strm))
|
||||
@ -169,7 +169,7 @@ H5Z_filter_deflate (unsigned flags, size_t cd_nelmts,
|
||||
int aggression; /* Compression aggression setting */
|
||||
|
||||
/* Set the compression aggression level */
|
||||
H5_ASSIGN_OVERFLOW(aggression,cd_values[0],unsigned,int);
|
||||
H5_CHECKED_ASSIGN(aggression, int, cd_values[0], unsigned);
|
||||
|
||||
/* Allocate output (compressed) buffer */
|
||||
if(NULL == (outbuf = H5MM_malloc(z_dst_nbytes)))
|
||||
|
@ -803,7 +803,7 @@ H5Z_set_local_nbit(hid_t dcpl_id, hid_t type_id, hid_t space_id)
|
||||
cd_values_index = 2;
|
||||
|
||||
/* Set "local" parameter for number of elements in the chunk */
|
||||
H5_ASSIGN_OVERFLOW(cd_values[cd_values_index++], npoints, hssize_t, unsigned);
|
||||
H5_CHECKED_ASSIGN(cd_values[cd_values_index++], unsigned, npoints, hssize_t);
|
||||
|
||||
/* Assume no need to compress now, will be changed to FALSE later if not */
|
||||
need_not_compress = TRUE;
|
||||
|
@ -901,7 +901,7 @@ H5Z_set_local_scaleoffset(hid_t dcpl_id, hid_t type_id, hid_t space_id)
|
||||
HGOTO_ERROR(H5E_PLINE, H5E_CANTGET, FAIL, "unable to get number of points in the dataspace")
|
||||
|
||||
/* Set "local" parameter for this dataset's number of elements */
|
||||
H5_ASSIGN_OVERFLOW(cd_values[H5Z_SCALEOFFSET_PARM_NELMTS],npoints,hssize_t,unsigned);
|
||||
H5_CHECKED_ASSIGN(cd_values[H5Z_SCALEOFFSET_PARM_NELMTS], unsigned, npoints, hssize_t);
|
||||
|
||||
/* Get datatype's class */
|
||||
if((dtype_class = H5T_get_class(type, TRUE)) == H5T_NO_CLASS)
|
||||
|
@ -225,7 +225,7 @@ H5Z_set_local_szip(hid_t dcpl_id, hid_t type_id, hid_t space_id)
|
||||
} /* end else */
|
||||
|
||||
/* Assign the final value to the scanline */
|
||||
H5_ASSIGN_OVERFLOW(cd_values[H5Z_SZIP_PARM_PPS],scanline,hsize_t,unsigned);
|
||||
H5_CHECKED_ASSIGN(cd_values[H5Z_SZIP_PARM_PPS], unsigned, scanline, hsize_t);
|
||||
|
||||
/* Get datatype's endianness order */
|
||||
if((dtype_order = H5T_get_order(type)) == H5T_ORDER_ERROR)
|
||||
@ -301,10 +301,10 @@ H5Z_filter_szip (unsigned flags, size_t cd_nelmts, const unsigned cd_values[],
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, 0, "invalid deflate aggression level")
|
||||
|
||||
/* Copy the filter parameters into the szip parameter block */
|
||||
H5_ASSIGN_OVERFLOW(sz_param.options_mask,cd_values[H5Z_SZIP_PARM_MASK],unsigned,int);
|
||||
H5_ASSIGN_OVERFLOW(sz_param.bits_per_pixel,cd_values[H5Z_SZIP_PARM_BPP],unsigned,int);
|
||||
H5_ASSIGN_OVERFLOW(sz_param.pixels_per_block,cd_values[H5Z_SZIP_PARM_PPB],unsigned,int);
|
||||
H5_ASSIGN_OVERFLOW(sz_param.pixels_per_scanline,cd_values[H5Z_SZIP_PARM_PPS],unsigned,int);
|
||||
H5_CHECKED_ASSIGN(sz_param.options_mask, int, cd_values[H5Z_SZIP_PARM_MASK], unsigned);
|
||||
H5_CHECKED_ASSIGN(sz_param.bits_per_pixel, int, cd_values[H5Z_SZIP_PARM_BPP], unsigned);
|
||||
H5_CHECKED_ASSIGN(sz_param.pixels_per_block, int, cd_values[H5Z_SZIP_PARM_PPB], unsigned);
|
||||
H5_CHECKED_ASSIGN(sz_param.pixels_per_scanline, int, cd_values[H5Z_SZIP_PARM_PPS], unsigned);
|
||||
|
||||
/* Input; uncompress */
|
||||
if (flags & H5Z_FLAG_REVERSE) {
|
||||
@ -314,7 +314,7 @@ H5Z_filter_szip (unsigned flags, size_t cd_nelmts, const unsigned cd_values[],
|
||||
/* Get the size of the uncompressed buffer */
|
||||
newbuf = *buf;
|
||||
UINT32DECODE(newbuf,stored_nalloc);
|
||||
H5_ASSIGN_OVERFLOW(nalloc,stored_nalloc,uint32_t,size_t);
|
||||
H5_CHECKED_ASSIGN(nalloc, size_t, stored_nalloc, uint32_t);
|
||||
|
||||
/* Allocate space for the uncompressed buffer */
|
||||
if(NULL==(outbuf = H5MM_malloc(nalloc)))
|
||||
|
@ -1523,11 +1523,14 @@ extern char *strdup(const char *s);
|
||||
/* Include the generated overflow header file */
|
||||
#include "H5overflow.h"
|
||||
|
||||
#define H5_ASSIGN_OVERFLOW(dst, src, srctype, dsttype) \
|
||||
/* Assign a variable to one of a different size (think safer dst = (dsttype)src").
|
||||
* The code generated by the macro checks for overflows.
|
||||
*/
|
||||
#define H5_CHECKED_ASSIGN(dst, dsttype, src, srctype) \
|
||||
H5_GLUE4(ASSIGN_,srctype,_TO_,dsttype)(dst,dsttype,src,srctype)\
|
||||
|
||||
#else /* NDEBUG */
|
||||
#define H5_ASSIGN_OVERFLOW(dst, src, srctype, dsttype) \
|
||||
#define H5_CHECKED_ASSIGN(dst, dsttype, src, srctype) \
|
||||
(dst) = (dsttype)(src);
|
||||
#endif /* NDEBUG */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user