mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-02-11 16:01:00 +08:00
[svn-r19309] Description:
Bring revisions from Coverity branch back to trunk: r19191: Fix coverity items 104 and 105. Added calls to H5HF_sect_indirect_free to H5HF_sect_indirect_reduce and H5HF_sect_indirect_reduce_row if there is an errorbefore "peer_sect" is linked into the main free space structure via its direct sections. Also delayed call to H5HF_sect_indirect_first to prevent peer_sect from being left in an inconsistent state. r19268: Added fix to disallow extendible compact dataset. This was the same check as in H5D_contig_construct() in H5Dcontig.c. Added test to verify the creation of extendible dataset with various layouts. Tested on: Mac OS X/32 10.6.4 (amazon) w/debug & production (h5committested on branch)
This commit is contained in:
parent
da98c48d44
commit
60908b2e12
@ -168,13 +168,16 @@ done:
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
/* ARGSUSED */
|
||||
static herr_t
|
||||
H5D_compact_construct(H5F_t *f, H5D_t *dset)
|
||||
{
|
||||
hssize_t stmp_size; /* Temporary holder for raw data size */
|
||||
hsize_t tmp_size; /* Temporary holder for raw data size */
|
||||
hsize_t max_comp_data_size; /* Max. allowed size of compact data */
|
||||
hsize_t dim[H5O_LAYOUT_NDIMS]; /* Current size of data in elements */
|
||||
hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */
|
||||
int ndims; /* Rank of dataspace */
|
||||
int i; /* Local index variable */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI_NOINIT(H5D_compact_construct)
|
||||
@ -183,6 +186,13 @@ H5D_compact_construct(H5F_t *f, H5D_t *dset)
|
||||
HDassert(f);
|
||||
HDassert(dset);
|
||||
|
||||
/* Check for invalid dataset dimensions */
|
||||
if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dim, max_dim)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataspace dimensions")
|
||||
for(i = 0; i < ndims; i++)
|
||||
if(max_dim[i] > dim[i])
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "extendible compact dataset")
|
||||
|
||||
/*
|
||||
* Compact dataset is stored in dataset object header message of
|
||||
* layout.
|
||||
|
@ -2335,7 +2335,7 @@ done:
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: H5HF_sect_indirect_row
|
||||
* Function: H5HF_sect_indirect_for_row
|
||||
*
|
||||
* Purpose: Create the underlying indirect section for a new row section
|
||||
*
|
||||
@ -2924,6 +2924,7 @@ H5HF_sect_indirect_reduce_row(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_
|
||||
unsigned start_col; /* Start column in indirect block */
|
||||
unsigned end_entry; /* Entry for last block covered */
|
||||
unsigned end_row; /* End row in indirect block */
|
||||
H5HF_free_section_t *peer_sect = NULL; /* Peer indirect section */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI_NOINIT(H5HF_sect_indirect_reduce_row)
|
||||
@ -3053,7 +3054,6 @@ H5HF_sect_indirect_reduce_row(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_
|
||||
} /* end if */
|
||||
} /* end if */
|
||||
else {
|
||||
H5HF_free_section_t *peer_sect; /* Peer indirect section */
|
||||
H5HF_indirect_t *iblock; /* Pointer to indirect block for this section */
|
||||
hsize_t iblock_off; /* Section's indirect block's offset in "heap space" */
|
||||
unsigned peer_nentries; /* Number of entries in new peer indirect section */
|
||||
@ -3090,11 +3090,11 @@ H5HF_sect_indirect_reduce_row(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_
|
||||
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't create indirect section")
|
||||
|
||||
/* Set up direct row & indirect entry information for peer section */
|
||||
peer_sect->u.indirect.dir_nrows = peer_dir_nrows;
|
||||
if(NULL == (peer_sect->u.indirect.dir_rows = (H5HF_free_section_t **)H5MM_malloc(sizeof(H5HF_free_section_t *) * peer_dir_nrows)))
|
||||
HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "allocation failed for row section pointer array")
|
||||
peer_sect->u.indirect.indir_nents = 0;
|
||||
peer_sect->u.indirect.indir_ents = NULL;
|
||||
peer_sect->u.indirect.dir_nrows = peer_dir_nrows;
|
||||
if(NULL == (peer_sect->u.indirect.dir_rows = (H5HF_free_section_t **)H5MM_malloc(sizeof(H5HF_free_section_t *) * peer_dir_nrows)))
|
||||
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, FAIL, "allocation failed for row section pointer array")
|
||||
|
||||
/* Transfer row sections between current & peer sections */
|
||||
HDmemcpy(&peer_sect->u.indirect.dir_rows[0],
|
||||
@ -3137,6 +3137,9 @@ H5HF_sect_indirect_reduce_row(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_
|
||||
(sect->u.indirect.indir_nents + sect->u.indirect.dir_nrows));
|
||||
HDassert(peer_sect->u.indirect.rc ==
|
||||
(peer_sect->u.indirect.indir_nents + peer_sect->u.indirect.dir_nrows));
|
||||
|
||||
/* Reset the peer_sect variable, to indicate that it has been hooked into the data structures correctly and shouldn't be freed */
|
||||
peer_sect = NULL;
|
||||
} /* end else */
|
||||
} /* end if */
|
||||
else {
|
||||
@ -3150,6 +3153,16 @@ H5HF_sect_indirect_reduce_row(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_
|
||||
} /* end else */
|
||||
|
||||
done:
|
||||
/* Free allocated peer_sect. Note that this is necessary for all failures until peer_sect is linked
|
||||
* into the main free space structures (via the direct blocks), and the reference count is updated. */
|
||||
if(peer_sect) {
|
||||
/* Sanity check - we should only be here if an error occurred */
|
||||
HDassert(ret_value < 0);
|
||||
|
||||
if(H5HF_sect_indirect_free(peer_sect) < 0)
|
||||
HDONE_ERROR(H5E_HEAP, H5E_CANTRELEASE, FAIL, "can't free indirect section node")
|
||||
} /* end if */
|
||||
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
} /* end H5HF_sect_indirect_reduce_row() */
|
||||
|
||||
@ -3178,6 +3191,7 @@ H5HF_sect_indirect_reduce(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_t *s
|
||||
unsigned start_col; /* Start column in indirect block */
|
||||
unsigned end_entry; /* Entry for last block covered */
|
||||
unsigned end_row; /* End row in indirect block */
|
||||
H5HF_free_section_t *peer_sect = NULL; /* Peer indirect section */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI_NOINIT(H5HF_sect_indirect_reduce)
|
||||
@ -3264,7 +3278,6 @@ H5HF_sect_indirect_reduce(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_t *s
|
||||
sect->u.indirect.indir_ents = (H5HF_free_section_t **)H5MM_xfree(sect->u.indirect.indir_ents);
|
||||
} /* end if */
|
||||
else {
|
||||
H5HF_free_section_t *peer_sect; /* Peer indirect section */
|
||||
H5HF_indirect_t *iblock; /* Pointer to indirect block for this section */
|
||||
hsize_t iblock_off; /* Section's indirect block's offset in "heap space" */
|
||||
haddr_t peer_sect_addr; /* Address of new peer section in "heap space" */
|
||||
@ -3320,7 +3333,7 @@ H5HF_sect_indirect_reduce(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_t *s
|
||||
peer_sect->u.indirect.dir_rows = NULL;
|
||||
peer_sect->u.indirect.indir_nents = peer_nentries;
|
||||
if(NULL == (peer_sect->u.indirect.indir_ents = (H5HF_free_section_t **)H5MM_malloc(sizeof(H5HF_free_section_t *) * peer_nentries)))
|
||||
HGOTO_ERROR(H5E_HEAP, H5E_NOSPACE, FAIL, "allocation failed for indirect section pointer array")
|
||||
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, FAIL, "allocation failed for indirect section pointer array")
|
||||
|
||||
/* Transfer child indirect sections between current & peer sections */
|
||||
HDmemcpy(&peer_sect->u.indirect.indir_ents[0],
|
||||
@ -3336,10 +3349,6 @@ H5HF_sect_indirect_reduce(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_t *s
|
||||
for(u = 0; u < peer_nentries; u++)
|
||||
peer_sect->u.indirect.indir_ents[u]->u.indirect.parent = peer_sect;
|
||||
|
||||
/* Make new "first row" in peer section */
|
||||
if(H5HF_sect_indirect_first(hdr, dxpl_id, peer_sect->u.indirect.indir_ents[0]) < 0)
|
||||
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't make new 'first row' for peer indirect section")
|
||||
|
||||
/* Adjust reference counts for current & peer sections */
|
||||
peer_sect->u.indirect.rc = peer_nentries;
|
||||
sect->u.indirect.rc -= peer_nentries;
|
||||
@ -3355,6 +3364,13 @@ H5HF_sect_indirect_reduce(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_t *s
|
||||
(sect->u.indirect.indir_nents + sect->u.indirect.dir_nrows));
|
||||
HDassert(peer_sect->u.indirect.rc ==
|
||||
(peer_sect->u.indirect.indir_nents + peer_sect->u.indirect.dir_nrows));
|
||||
|
||||
/* Make new "first row" in peer section */
|
||||
if(H5HF_sect_indirect_first(hdr, dxpl_id, peer_sect->u.indirect.indir_ents[0]) < 0)
|
||||
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't make new 'first row' for peer indirect section")
|
||||
|
||||
/* Reset the peer_sect variable, to indicate that it has been hooked into the data structures correctly and shouldn't be freed */
|
||||
peer_sect = NULL;
|
||||
} /* end else */
|
||||
} /* end if */
|
||||
else {
|
||||
@ -3373,6 +3389,16 @@ H5HF_sect_indirect_reduce(H5HF_hdr_t *hdr, hid_t dxpl_id, H5HF_free_section_t *s
|
||||
HGOTO_ERROR(H5E_HEAP, H5E_CANTRELEASE, FAIL, "can't decrement section's ref. count ")
|
||||
|
||||
done:
|
||||
/* Free allocated peer_sect. Note that this is necessary for all failures until peer_sect is linked
|
||||
* into the main free space structures (via the direct blocks), and the reference count is updated. */
|
||||
if(peer_sect) {
|
||||
/* Sanity check - we should only be here if an error occurred */
|
||||
HDassert(ret_value < 0);
|
||||
|
||||
if(H5HF_sect_indirect_free(peer_sect) < 0)
|
||||
HDONE_ERROR(H5E_HEAP, H5E_CANTRELEASE, FAIL, "can't free indirect section node")
|
||||
} /* end if */
|
||||
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
} /* end H5HF_sect_indirect_reduce() */
|
||||
|
||||
|
126
test/dsets.c
126
test/dsets.c
@ -49,6 +49,7 @@ const char *FILENAME[] = {
|
||||
"big_chunk",
|
||||
"chunk_expand",
|
||||
"copy_dcpl_newfile",
|
||||
"layout_extend",
|
||||
NULL
|
||||
};
|
||||
#define FILENAME_BUF_SIZE 1024
|
||||
@ -846,6 +847,130 @@ error:
|
||||
return -1;
|
||||
} /* end test_max_compact() */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: test_layout_extend
|
||||
*
|
||||
* Purpose: Verify that the creation of extendible dataset with dataspace:
|
||||
* cur_dims < max_dims (max_dims can be fixed size or H5S_UNLIMITED)
|
||||
* will behave as follows:
|
||||
* H5D_COMPACT layout: fail
|
||||
* H5D_CONTIGUOUS layout: fail
|
||||
* H5D_CHUNKED layout: succeed
|
||||
*
|
||||
* Return: Success: 0
|
||||
* Failure: -1
|
||||
*
|
||||
* Programmer: Vailin Choi; August 2010
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
static herr_t
|
||||
test_layout_extend(hid_t fapl)
|
||||
{
|
||||
char filename[FILENAME_BUF_SIZE]; /* File name */
|
||||
hid_t fid; /* File id */
|
||||
hid_t sid_fix, sid_unlim; /* Dataspace id */
|
||||
hid_t dcpl_compact, dcpl_contig, dcpl_chunked; /* Dataset creation property list id */
|
||||
hid_t did_fixed, did_unlim; /* Dataset id */
|
||||
hsize_t cur_size[1] = {10}; /* Current size of dataspace */
|
||||
hsize_t max_unlim[1] = {H5S_UNLIMITED}; /* Maximum size of dataspace (unlimited) */
|
||||
hsize_t max_fix[1] = {100}; /* Maximum size of dataspace (fixed) */
|
||||
|
||||
TESTING("extendible dataset with various layout");
|
||||
|
||||
/* Create a file */
|
||||
h5_fixname(FILENAME[12], fapl, filename, sizeof filename);
|
||||
if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Create dataspace */
|
||||
if((sid_fix = H5Screate_simple(1, cur_size, max_fix)) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
if((sid_unlim = H5Screate_simple(1, cur_size, max_unlim)) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Create property list for compact dataset creation */
|
||||
if((dcpl_compact = H5Pcreate(H5P_DATASET_CREATE)) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
if(H5Pset_layout(dcpl_compact, H5D_COMPACT) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Create dataset with extendible dataspace (fixed max_dims) should fail */
|
||||
H5E_BEGIN_TRY {
|
||||
if(H5Dcreate2(fid, "compact", H5T_NATIVE_INT, sid_fix, H5P_DEFAULT, dcpl_compact, H5P_DEFAULT) != FAIL)
|
||||
TEST_ERROR
|
||||
} H5E_END_TRY;
|
||||
|
||||
/* Create dataset with extendible dataspace (unlimited max_dims) should fail */
|
||||
H5E_BEGIN_TRY {
|
||||
if(H5Dcreate2(fid, "compact", H5T_NATIVE_INT, sid_unlim, H5P_DEFAULT, dcpl_compact, H5P_DEFAULT) != FAIL)
|
||||
TEST_ERROR
|
||||
} H5E_END_TRY;
|
||||
|
||||
/* Create property list for contiguous dataset creation */
|
||||
if((dcpl_contig = H5Pcreate(H5P_DATASET_CREATE)) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
if((H5Pset_layout(dcpl_contig, H5D_CONTIGUOUS)) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Create dataset with extendible dataspace (fixed max_dims) should fail */
|
||||
H5E_BEGIN_TRY {
|
||||
if(H5Dcreate2(fid, "contig", H5T_NATIVE_INT, sid_fix, H5P_DEFAULT, dcpl_contig, H5P_DEFAULT) != FAIL)
|
||||
TEST_ERROR
|
||||
} H5E_END_TRY;
|
||||
|
||||
/* Create dataset with extendible dataspace (unlimited max_dims) should fail*/
|
||||
H5E_BEGIN_TRY {
|
||||
if(H5Dcreate2(fid, "contig", H5T_NATIVE_INT, sid_unlim, H5P_DEFAULT, dcpl_contig, H5P_DEFAULT) != FAIL)
|
||||
TEST_ERROR
|
||||
} H5E_END_TRY;
|
||||
|
||||
/* Create property list for chunked dataset creation */
|
||||
if((dcpl_chunked = H5Pcreate(H5P_DATASET_CREATE)) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
if(H5Pset_layout(dcpl_chunked, H5D_CHUNKED) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Create dataset with extendible dataspace (fixed max_dims) should succeed */
|
||||
if((did_fixed = H5Dcreate2(fid, "chunked_fixed", H5T_NATIVE_INT, sid_fix, H5P_DEFAULT, dcpl_chunked, H5P_DEFAULT)) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Create dataset with extendible dataspace (unlimited max_dims) should succeed */
|
||||
if((did_unlim = H5Dcreate2(fid, "chunked_unlim", H5T_NATIVE_INT, sid_unlim, H5P_DEFAULT, dcpl_chunked, H5P_DEFAULT)) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Closing */
|
||||
if(H5Sclose(sid_fix) < 0) FAIL_STACK_ERROR
|
||||
if(H5Sclose(sid_unlim) < 0) FAIL_STACK_ERROR
|
||||
|
||||
if(H5Pclose(dcpl_compact) < 0) FAIL_STACK_ERROR
|
||||
if(H5Pclose(dcpl_contig) < 0) FAIL_STACK_ERROR
|
||||
if(H5Pclose(dcpl_chunked) < 0) FAIL_STACK_ERROR
|
||||
|
||||
if(H5Dclose(did_fixed) < 0) FAIL_STACK_ERROR
|
||||
if(H5Dclose(did_unlim) < 0) FAIL_STACK_ERROR
|
||||
|
||||
if(H5Fclose(fid) < 0) FAIL_STACK_ERROR
|
||||
|
||||
PASSED();
|
||||
return 0;
|
||||
|
||||
error:
|
||||
H5E_BEGIN_TRY {
|
||||
H5Sclose(sid_fix);
|
||||
H5Sclose(sid_unlim);
|
||||
H5Pclose(dcpl_compact);
|
||||
H5Pclose(dcpl_contig);
|
||||
H5Pclose(dcpl_chunked);
|
||||
H5Dclose(did_fixed);
|
||||
H5Dclose(did_unlim);
|
||||
H5Fclose(fid);
|
||||
} H5E_END_TRY;
|
||||
|
||||
return -1;
|
||||
} /* end test_layout_extend() */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: test_conv_buffer
|
||||
@ -7733,6 +7858,7 @@ main(void)
|
||||
nerrors += (test_chunk_cache(my_fapl) < 0 ? 1 : 0);
|
||||
nerrors += (test_big_chunks_bypass_cache(my_fapl) < 0 ? 1 : 0);
|
||||
nerrors += (test_chunk_expand(my_fapl) < 0 ? 1 : 0);
|
||||
nerrors += (test_layout_extend(my_fapl) < 0 ? 1 : 0);
|
||||
|
||||
if(H5Fclose(file) < 0)
|
||||
goto error;
|
||||
|
Loading…
Reference in New Issue
Block a user