[svn-r13883] Description:

Fixed bugs w/VL-datatype fill values for chunked dataset storage.

Tested on:
    Mac OS X/32 10.4.9 (amazon)
    FreeBSD/32 6.2 (duty)
    FreeBSD/64 6.2 (liberty)
    Linux/32 2.6 (chicago)
    Linux/64 2.6 (chicago2)
This commit is contained in:
Quincey Koziol 2007-06-19 12:35:24 -05:00
parent b2318f778a
commit 32d09759bd
2 changed files with 618 additions and 333 deletions

View File

@ -1843,13 +1843,101 @@ H5D_istore_lock(const H5D_io_info_t *io_info, H5D_istore_ud1_t *udata,
if(fill->fill_time == H5D_FILL_TIME_ALLOC ||
(fill->fill_time == H5D_FILL_TIME_IFSET && fill_status == H5D_FILL_VALUE_USER_DEFINED)) {
/*
* The chunk doesn't exist in the file. Replicate the fill
* value throughout the chunk, if the fill value is defined.
*/
if(fill->buf) {
/*
* The chunk doesn't exist in the file. Replicate the fill
* value throughout the chunk.
*/
HDassert(0 == (chunk_size % (size_t)fill->size));
H5V_array_fill(chunk, fill->buf, (size_t)fill->size, chunk_size / fill->size);
size_t elmts_per_chunk; /* # of elements per chunk */
/* Sanity check */
HDassert(0 == (chunk_size % fill->size));
elmts_per_chunk = chunk_size / fill->size;
/* If necessary, convert fill value datatypes (which copies VL components, etc.) */
if(H5T_detect_class(dset->shared->type, H5T_VLEN) > 0) {
H5T_path_t *tpath; /* Datatype conversion path */
uint8_t *bkg_buf = NULL; /* Background conversion buffer */
H5T_t *mem_type; /* Pointer to memory datatype */
size_t mem_type_size, file_type_size; /* Size of datatype in memory and on disk */
hid_t mem_tid; /* Memory version of disk datatype */
/* Create temporary datatype for conversion operation */
if(NULL == (mem_type = H5T_copy(dset->shared->type, H5T_COPY_REOPEN)))
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, NULL, "unable to copy file datatype")
if((mem_tid = H5I_register(H5I_DATATYPE, mem_type)) < 0) {
H5T_close(mem_type);
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, "unable to register memory datatype")
} /* end if */
/* Retrieve sizes of memory & file datatypes */
mem_type_size = H5T_get_size(mem_type);
HDassert(mem_type_size > 0);
file_type_size = H5T_get_size(dset->shared->type);
HDassert(file_type_size == (size_t)fill->size);
/* Get the datatype conversion path for this operation */
if(NULL == (tpath = H5T_path_find(dset->shared->type, mem_type, NULL, NULL, io_info->dxpl_id, FALSE))) {
H5I_dec_ref(mem_tid);
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to convert between src and dst datatypes")
} /* end if */
/* Allocate a background buffer, if necessary */
if(H5T_path_bkg(tpath) && NULL == (bkg_buf = H5FL_BLK_CALLOC(type_conv, (elmts_per_chunk * MAX(mem_type_size, file_type_size))))) {
H5I_dec_ref(mem_tid);
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
} /* end if */
/* Make a copy of the (disk-based) fill value into the chunk buffer */
HDmemcpy(chunk, fill->buf, file_type_size);
/* Type convert the chunk buffer, to copy any VL components */
if(H5T_convert(tpath, dset->shared->type_id, mem_tid, (size_t)1, (size_t)0, (size_t)0, chunk, bkg_buf, io_info->dxpl_id) < 0) {
if(bkg_buf)
H5FL_BLK_FREE(type_conv, bkg_buf);
H5I_dec_ref(mem_tid);
HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, NULL, "data type conversion failed")
} /* end if */
/* Replicate the fill value into the cached buffer */
H5V_array_fill(chunk, chunk, mem_type_size, elmts_per_chunk);
/* Get the inverse datatype conversion path for this operation */
if(NULL == (tpath = H5T_path_find(mem_type, dset->shared->type, NULL, NULL, io_info->dxpl_id, FALSE))) {
if(bkg_buf)
H5FL_BLK_FREE(type_conv, bkg_buf);
H5I_dec_ref(mem_tid);
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to convert between src and dst datatypes")
} /* end if */
/* Allocate or reset the background buffer, if necessary */
if(H5T_path_bkg(tpath)) {
if(bkg_buf)
HDmemset(bkg_buf, 0, MAX(mem_type_size, file_type_size));
else {
if(NULL == (bkg_buf = H5FL_BLK_CALLOC(type_conv, (elmts_per_chunk * MAX(mem_type_size, file_type_size))))) {
H5I_dec_ref(mem_tid);
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
} /* end if */
} /* end else */
} /* end if */
/* Type convert the chunk buffer, to copy any VL components */
if(H5T_convert(tpath, mem_tid, dset->shared->type_id, elmts_per_chunk, (size_t)0, (size_t)0, chunk, bkg_buf, io_info->dxpl_id) < 0) {
if(bkg_buf)
H5FL_BLK_FREE(type_conv, bkg_buf);
H5I_dec_ref(mem_tid);
HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, NULL, "data type conversion failed")
} /* end if */
/* Release resources used */
if(bkg_buf)
H5FL_BLK_FREE(type_conv, bkg_buf);
H5I_dec_ref(mem_tid);
} /* end if */
else
/* Replicate the [non-VL] fill value into chunk */
H5V_array_fill(chunk, fill->buf, (size_t)fill->size, elmts_per_chunk);
} /* end if */
else {
/*
@ -2715,13 +2803,14 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
H5D_io_info_t io_info; /* Dataset I/O info */
H5D_storage_t store; /* Dataset storage information */
hsize_t chunk_offset[H5O_LAYOUT_NDIMS]; /* Offset of current chunk */
hsize_t chunk_size; /* Size of chunk in bytes */
unsigned filter_mask = 0; /* Filter mask for chunks that have them */
size_t elmts_per_chunk; /* # of elements which fit in a chunk */
size_t orig_chunk_size; /* Original size of chunk in bytes */
unsigned filter_mask = 0; /* Filter mask for chunks that have them */
const H5O_pline_t *pline = &(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
const H5O_fill_t *fill = &(dset->shared->dcpl_cache.fill); /* Fill value info */
H5D_fill_value_t fill_status; /* The fill value status */
hbool_t should_fill = FALSE; /* Whether fill values should be written */
void *chunk = NULL; /* Chunk buffer for writing fill values */
hbool_t should_fill = FALSE; /* Whether fill values should be written */
void *chunk = NULL; /* Chunk buffer for writing fill values */
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
#ifdef H5_HAVE_PARALLEL
@ -2734,8 +2823,15 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
hbool_t carry; /* Flag to indicate that chunk increment carrys to higher dimension (sorta) */
int space_ndims; /* Dataset's space rank */
hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */
int i; /* Local index variable */
unsigned u; /* Local index variable */
H5T_path_t *fill_to_mem_tpath; /* Datatype conversion path for converting the fill value to the memory buffer */
H5T_path_t *mem_to_dset_tpath; /* Datatype conversion path for converting the memory buffer to the dataset elements */
uint8_t *bkg_buf = NULL; /* Background conversion buffer */
H5T_t *mem_type = NULL; /* Pointer to memory datatype */
size_t mem_type_size, file_type_size; /* Size of datatype in memory and on disk */
size_t elmt_size; /* Size of each element */
hid_t mem_tid = (-1); /* Memory version of disk datatype */
size_t bkg_buf_size; /* Size of background buffer */
hbool_t has_vlen_fill_type = FALSE; /* Whether the datatype for the fill value has a variable-length component */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5D_istore_allocate, FAIL)
@ -2746,9 +2842,9 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
HDassert(H5F_addr_defined(dset->shared->layout.u.chunk.addr));
HDassert(TRUE == H5P_isa_class(dxpl_id, H5P_DATASET_XFER));
/* We only handle simple data spaces so far */
/* Retrieve the dataset dimensions */
if((space_ndims = H5S_get_simple_extent_dims(dset->shared->space, space_dim, NULL)) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple data space info")
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple dataspace info")
space_dim[space_ndims] = dset->shared->layout.u.chunk.dim[space_ndims];
/* Fill the DXPL cache values for later use */
@ -2771,13 +2867,9 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
} /* end if */
#endif /* H5_HAVE_PARALLEL */
/*
* Setup indice to go through all chunks. (Future improvement:
* should allocate only chunks that have no file space assigned yet).
*/
for(u = 0; u < dset->shared->layout.u.chunk.ndims; u++)
chunk_offset[u] = 0;
chunk_size = dset->shared->layout.u.chunk.size;
/* Get original chunk size */
H5_CHECK_OVERFLOW(dset->shared->layout.u.chunk.size, hsize_t, size_t);
orig_chunk_size = (size_t)dset->shared->layout.u.chunk.size;
/* Check the dataset's fill-value status */
if(H5P_is_fill_value_defined(fill, &fill_status) < 0)
@ -2793,34 +2885,92 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
|| pline->nused > 0)
should_fill = TRUE;
/* Check if fill values should be written to blocks */
/* Check if fill values should be written to chunks */
if(should_fill) {
/* Allocate chunk buffer for processes to use when writing fill values */
H5_CHECK_OVERFLOW(chunk_size, hsize_t, size_t);
if(NULL == (chunk = H5D_istore_chunk_alloc((size_t)chunk_size, pline)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for chunk")
/* Fill the chunk with the proper values */
if(fill->buf) {
/* Replicate the fill value throughout the chunk */
HDassert(0 == (chunk_size % (size_t)fill->size));
H5V_array_fill(chunk, fill->buf, (size_t)fill->size, (size_t)(chunk_size / fill->size));
/* Detect whether the datatype has a VL component */
has_vlen_fill_type = H5T_detect_class(dset->shared->type, H5T_VLEN);
/* If necessary, convert fill value datatypes (which copies VL components, etc.) */
if(has_vlen_fill_type) {
/* Create temporary datatype for conversion operation */
if(NULL == (mem_type = H5T_copy(dset->shared->type, H5T_COPY_REOPEN)))
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, FAIL, "unable to copy file datatype")
if((mem_tid = H5I_register(H5I_DATATYPE, mem_type)) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register memory datatype")
/* Retrieve sizes of memory & file datatypes */
mem_type_size = H5T_get_size(mem_type);
HDassert(mem_type_size > 0);
file_type_size = H5T_get_size(dset->shared->type);
HDassert(file_type_size == (size_t)fill->size);
/* Compute the base size for a chunk to operate on */
elmt_size = MAX(mem_type_size, file_type_size);
elmts_per_chunk = dset->shared->layout.u.chunk.size / file_type_size;
orig_chunk_size = elmts_per_chunk * elmt_size;
/* Allocate a chunk buffer now, if _no_ filters are used */
if(pline->nused == 0)
if(NULL == (chunk = H5D_istore_chunk_alloc(orig_chunk_size, pline)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for chunk")
/* Get the datatype conversion path for this operation */
if(NULL == (fill_to_mem_tpath = H5T_path_find(dset->shared->type, mem_type, NULL, NULL, dxpl_id, FALSE)))
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to convert between src and dst datatypes")
/* Get the inverse datatype conversion path for this operation */
if(NULL == (mem_to_dset_tpath = H5T_path_find(mem_type, dset->shared->type, NULL, NULL, dxpl_id, FALSE)))
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to convert between src and dst datatypes")
/* Check if we need to allocate a background buffer */
if(H5T_path_bkg(fill_to_mem_tpath) || H5T_path_bkg(mem_to_dset_tpath)) {
/* Check for inverse datatype conversion needing a background buffer */
/* (do this first, since it needs a larger buffer) */
if(H5T_path_bkg(mem_to_dset_tpath))
bkg_buf_size = elmts_per_chunk * elmt_size;
else
bkg_buf_size = elmt_size;
/* Allocate the background buffer */
if(NULL == (bkg_buf = H5FL_BLK_MALLOC(type_conv, bkg_buf_size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
} /* end if */
} /* end if */
else {
/* Allocate chunk buffer for processes to use when writing fill values */
if(NULL == (chunk = H5D_istore_chunk_alloc(orig_chunk_size, pline)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for chunk")
/*
* Replicate the fill value throughout the chunk.
*/
HDassert(0 == (orig_chunk_size % fill->size));
H5V_array_fill(chunk, fill->buf, (size_t)fill->size, (size_t)(orig_chunk_size / fill->size));
} /* end else */
} /* end if */
else
/* No fill value was specified, assume all zeros. */
HDmemset(chunk, 0, (size_t)chunk_size);
else {
/* Allocate chunk buffer for processes to use when writing fill values */
if(NULL == (chunk = H5D_istore_chunk_alloc(orig_chunk_size, pline)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for chunk")
/*
* No fill value was specified, assume all zeros.
*/
HDmemset(chunk, 0, orig_chunk_size);
} /* end else */
/* Check if there are filters which need to be applied to the chunk */
if(pline->nused > 0) {
size_t buf_size = (size_t)chunk_size;
size_t nbytes = (size_t)chunk_size;
/* (only do this in advance when the chunk info can be re-used (i.e.
* it doesn't contain any non-default VL datatype fill values)
*/
if(!has_vlen_fill_type && pline->nused > 0) {
size_t buf_size = orig_chunk_size;
/* Push the chunk through the filters */
if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &nbytes, &buf_size, &chunk) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, FAIL, "output pipeline failed")
/* Keep the number of bytes the chunk turned in to */
chunk_size = nbytes;
if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &orig_chunk_size, &buf_size, &chunk) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed")
} /* end if */
} /* end if */
@ -2828,16 +2978,20 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
store.chunk.offset = chunk_offset;
H5D_BUILD_IO_INFO(&io_info, dset, dxpl_cache, dxpl_id, &store);
/* Reset the chunk offset indices */
HDmemset(chunk_offset, 0, (dset->shared->layout.u.chunk.ndims * sizeof(chunk_offset[0])));
/* Loop over all chunks */
carry = FALSE;
while(!carry) {
hbool_t chunk_exists; /* Flag to indicate whether a chunk exists already */
int i; /* Local index variable */
/* Check if the chunk exists yet on disk */
chunk_exists = TRUE;
if(H5D_istore_get_addr(&io_info, NULL) == HADDR_UNDEF) {
const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
H5D_rdcc_ent_t *ent; /*cache entry */
if(!H5F_addr_defined(H5D_istore_get_addr(&io_info, NULL))) {
const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* Raw data chunk cache */
H5D_rdcc_ent_t *ent; /* Cache entry */
hbool_t chunk_exists; /* Flag to indicate whether a chunk exists already */
unsigned u; /* Local index variable */
/* Didn't find the chunk on disk */
chunk_exists = FALSE;
@ -2846,72 +3000,128 @@ H5D_istore_allocate(H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite)
for(ent = rdcc->head; ent && !chunk_exists; ent = ent->next) {
/* Assume a match */
chunk_exists = TRUE;
for(u = 0; u < dset->shared->layout.u.chunk.ndims && chunk_exists; u++) {
if(ent->offset[u] != chunk_offset[u])
for(u = 0; u < dset->shared->layout.u.chunk.ndims; u++)
if(ent->offset[u] != chunk_offset[u]) {
chunk_exists = FALSE; /* Reset if no match */
} /* end for */
break;
} /* end if */
} /* end for */
} /* end if */
if(!chunk_exists) {
H5D_istore_ud1_t udata; /* B-tree pass-through for creating chunk */
/* Chunk wasn't in cache either, create it now */
if(!chunk_exists) {
H5D_istore_ud1_t udata; /* B-tree pass-through for creating chunk */
size_t chunk_size; /* Size of chunk in bytes, possibly filtered */
/* Initialize the chunk information */
udata.common.mesg = &dset->shared->layout;
udata.common.key.filter_mask = filter_mask;
udata.addr = HADDR_UNDEF;
H5_CHECK_OVERFLOW(chunk_size,hsize_t,size_t);
udata.common.key.nbytes = (size_t)chunk_size;
for(u = 0; u < dset->shared->layout.u.chunk.ndims; u++)
udata.common.key.offset[u] = chunk_offset[u];
/* Check for VL datatype & non-default fill value */
if(has_vlen_fill_type) {
/* Allocate a new chunk buffer each time, if filters are used */
if(pline->nused > 0)
if(NULL == (chunk = H5D_istore_chunk_alloc(orig_chunk_size, pline)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for chunk")
/* Allocate the chunk with all processes */
if(H5B_insert(dset->oloc.file, dxpl_id, H5B_ISTORE, dset->shared->layout.u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk")
/* Make a copy of the (disk-based) fill value into the buffer */
HDmemcpy(chunk, fill->buf, file_type_size);
/* Check if fill values should be written to blocks */
if(should_fill) {
#ifdef H5_HAVE_PARALLEL
/* Check if this file is accessed with an MPI-capable file driver */
if(using_mpi) {
/* Write the chunks out from only one process */
/* !! Use the internal "independent" DXPL!! -QAK */
if(H5_PAR_META_WRITE == mpi_rank)
if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.common.key.nbytes, H5AC_ind_dxpl_id, chunk) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
/* Reset first element of background buffer, if necessary */
if(H5T_path_bkg(fill_to_mem_tpath))
HDmemset(bkg_buf, 0, elmt_size);
/* Indicate that blocks are being written */
blocks_written = TRUE;
/* Type convert the dataset buffer, to copy any VL components */
if(H5T_convert(fill_to_mem_tpath, dset->shared->type_id, mem_tid, (size_t)1, (size_t)0, (size_t)0, chunk, bkg_buf, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "data type conversion failed")
/* Replicate the fill value into the cached buffer */
H5V_array_fill(chunk, chunk, mem_type_size, elmts_per_chunk);
/* Reset the entire background buffer, if necessary */
if(H5T_path_bkg(mem_to_dset_tpath))
HDmemset(bkg_buf, 0, bkg_buf_size);
/* Type convert the dataset buffer, to copy any VL components */
if(H5T_convert(mem_to_dset_tpath, mem_tid, dset->shared->type_id, elmts_per_chunk, (size_t)0, (size_t)0, chunk, bkg_buf, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCONVERT, FAIL, "data type conversion failed")
/* Check if there are filters which need to be applied to the chunk */
if(pline->nused > 0) {
size_t buf_size = orig_chunk_size;
size_t nbytes = (size_t)dset->shared->layout.u.chunk.size;
/* Push the chunk through the filters */
if(H5Z_pipeline(pline, 0, &filter_mask, dxpl_cache->err_detect, dxpl_cache->filter_cb, &nbytes, &buf_size, &chunk) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed")
/* Keep the number of bytes the chunk turned in to */
chunk_size = nbytes;
} /* end if */
else
chunk_size = (size_t)dset->shared->layout.u.chunk.size;
} /* end if */
else {
#endif /* H5_HAVE_PARALLEL */
if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.common.key.nbytes, dxpl_id, chunk) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
else
chunk_size = orig_chunk_size;
/* Initialize the chunk information */
udata.common.mesg = &dset->shared->layout;
udata.common.key.filter_mask = filter_mask;
udata.addr = HADDR_UNDEF;
udata.common.key.nbytes = chunk_size;
for(u = 0; u < dset->shared->layout.u.chunk.ndims; u++)
udata.common.key.offset[u] = chunk_offset[u];
/* Allocate the chunk with all processes */
if(H5B_insert(dset->oloc.file, dxpl_id, H5B_ISTORE, dset->shared->layout.u.chunk.addr, &udata) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk")
/* Check if fill values should be written to chunks */
if(should_fill) {
#ifdef H5_HAVE_PARALLEL
} /* end else */
/* Check if this file is accessed with an MPI-capable file driver */
if(using_mpi) {
/* Write the chunks out from only one process */
/* !! Use the internal "independent" DXPL!! -QAK */
if(H5_PAR_META_WRITE == mpi_rank)
if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.common.key.nbytes, H5AC_ind_dxpl_id, chunk) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
/* Indicate that blocks are being written */
blocks_written = TRUE;
} /* end if */
else {
#endif /* H5_HAVE_PARALLEL */
if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, udata.common.key.nbytes, dxpl_id, chunk) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
#ifdef H5_HAVE_PARALLEL
} /* end else */
#endif /* H5_HAVE_PARALLEL */
} /* end if */
/* Release the chunk if we need to re-allocate it each time */
if(has_vlen_fill_type && pline->nused > 0)
chunk = H5D_istore_chunk_xfree(chunk, pline);
} /* end if */
} /* end if */
/* Increment indices */
for(i = (int)dset->shared->layout.u.chunk.ndims - 1, carry = TRUE; i >= 0 && carry; --i) {
carry = TRUE;
for(i = (int)dset->shared->layout.u.chunk.ndims - 1; i >= 0; --i) {
chunk_offset[i] += dset->shared->layout.u.chunk.dim[i];
if (chunk_offset[i] >= space_dim[i])
if(chunk_offset[i] >= space_dim[i])
chunk_offset[i] = 0;
else
else {
carry = FALSE;
break;
} /* end else */
} /* end for */
} /* end while */
#ifdef H5_HAVE_PARALLEL
/* Only need to block at the barrier if we actually allocated a chunk */
/* And if we are using an MPI-capable file driver */
/* Only need to block at the barrier if we actually initialized a chunk */
/* using an MPI-capable file driver */
if(using_mpi && blocks_written) {
/* Wait at barrier to avoid race conditions where some processes are
* still writing out chunks and other processes race ahead to read
* them in, getting bogus data.
*/
if (MPI_SUCCESS != (mpi_code=MPI_Barrier(mpi_comm)))
if(MPI_SUCCESS != (mpi_code = MPI_Barrier(mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Barrier failed", mpi_code)
} /* end if */
#endif /* H5_HAVE_PARALLEL */
@ -2921,6 +3131,16 @@ done:
if(chunk)
chunk = H5D_istore_chunk_xfree(chunk, pline);
/* Free other resources for vlen fill values */
if(has_vlen_fill_type) {
if(mem_tid > 0)
H5I_dec_ref(mem_tid);
else if(mem_type)
H5T_close(mem_type);
if(bkg_buf)
H5FL_BLK_FREE(type_conv, bkg_buf);
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D_istore_allocate() */

View File

@ -2521,68 +2521,91 @@ test_vltypes_fill_value(void)
CHECK(file_id, FAIL, "H5Fcreate");
/* Create datasets with different storage layouts */
HDfprintf(stderr, "Uncomment loop!\n");
/* for(layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) { */
for(layout = H5D_COMPACT; layout <= H5D_CONTIGUOUS; layout++) {
hid_t tmp_dcpl_id; /* Temporary copy of the dataset creation property list */
for(layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) {
unsigned compress_loop; /* # of times to run loop, for testing compressed chunked dataset */
unsigned test_loop; /* Loop over datasets */
/* Make a copy of the dataset creation property list */
tmp_dcpl_id = H5Pcopy(dcpl_id);
CHECK(tmp_dcpl_id, FAIL, "H5Pcopy");
#ifdef H5_HAVE_FILTER_DEFLATE
if(layout == H5D_CHUNKED)
compress_loop = 2;
else
#endif /* H5_HAVE_FILTER_DEFLATE */
compress_loop = 1;
/* Layout specific actions */
switch(layout) {
case H5D_COMPACT:
HDstrcpy(dset_name1, "dataset1-compact");
HDstrcpy(dset_name2, "dataset2-compact");
dset_dspace_id = small_dspace_id;
ret = H5Pset_layout(tmp_dcpl_id, H5D_COMPACT);
CHECK(ret, FAIL, "H5Pset_layout");
break;
/* Loop over dataset operations */
for(test_loop = 0; test_loop < compress_loop; test_loop++) {
hid_t tmp_dcpl_id; /* Temporary copy of the dataset creation property list */
case H5D_CONTIGUOUS:
HDstrcpy(dset_name1, "dataset1-contig");
HDstrcpy(dset_name2, "dataset2-contig");
dset_dspace_id = large_dspace_id;
break;
/* Make a copy of the dataset creation property list */
tmp_dcpl_id = H5Pcopy(dcpl_id);
CHECK(tmp_dcpl_id, FAIL, "H5Pcopy");
case H5D_CHUNKED:
{
hsize_t chunk_dims[1] = {SPACE4_DIM_LARGE / 4};
/* Layout specific actions */
switch(layout) {
case H5D_COMPACT:
HDstrcpy(dset_name1, "dataset1-compact");
HDstrcpy(dset_name2, "dataset2-compact");
dset_dspace_id = small_dspace_id;
ret = H5Pset_layout(tmp_dcpl_id, H5D_COMPACT);
CHECK(ret, FAIL, "H5Pset_layout");
break;
HDstrcpy(dset_name1, "dataset1-chunked");
HDstrcpy(dset_name2, "dataset2-chunked");
case H5D_CONTIGUOUS:
HDstrcpy(dset_name1, "dataset1-contig");
HDstrcpy(dset_name2, "dataset2-contig");
dset_dspace_id = large_dspace_id;
ret = H5Pset_chunk(tmp_dcpl_id, 1, chunk_dims);
CHECK(ret, FAIL, "H5Pset_chunk");
}
break;
} /* end switch */
break;
/* Create first data set with default setting - no space is allocated */
dset_id = H5Dcreate(file_id, dset_name1, dtype1_id, dset_dspace_id, tmp_dcpl_id);
CHECK(dset_id, FAIL, "H5Dcreate");
case H5D_CHUNKED:
{
hsize_t chunk_dims[1] = {SPACE4_DIM_LARGE / 4};
ret = H5Dclose(dset_id);
CHECK(ret, FAIL, "H5Dclose");
dset_dspace_id = large_dspace_id;
ret = H5Pset_chunk(tmp_dcpl_id, 1, chunk_dims);
CHECK(ret, FAIL, "H5Pset_chunk");
#ifdef H5_HAVE_FILTER_DEFLATE
if(test_loop == 1) {
HDstrcpy(dset_name1, "dataset1-chunked-compressed");
HDstrcpy(dset_name2, "dataset2-chunked-compressed");
ret = H5Pset_deflate(tmp_dcpl_id, 3);
CHECK(ret, FAIL, "H5Pset_deflate");
} /* end if */
else {
#endif /* H5_HAVE_FILTER_DEFLATE */
HDstrcpy(dset_name1, "dataset1-chunked");
HDstrcpy(dset_name2, "dataset2-chunked");
#ifdef H5_HAVE_FILTER_DEFLATE
} /* end else */
#endif /* H5_HAVE_FILTER_DEFLATE */
}
break;
} /* end switch */
/* Create first data set with default setting - no space is allocated */
dset_id = H5Dcreate(file_id, dset_name1, dtype1_id, dset_dspace_id, tmp_dcpl_id);
CHECK(dset_id, FAIL, "H5Dcreate");
ret = H5Dclose(dset_id);
CHECK(ret, FAIL, "H5Dclose");
/* Create a second data set with space allocated and fill value written */
ret = H5Pset_fill_time(tmp_dcpl_id, H5D_FILL_TIME_IFSET);
CHECK(ret, FAIL, "H5Pset_fill_time");
/* Create a second data set with space allocated and fill value written */
ret = H5Pset_fill_time(tmp_dcpl_id, H5D_FILL_TIME_IFSET);
CHECK(ret, FAIL, "H5Pset_fill_time");
ret = H5Pset_alloc_time(tmp_dcpl_id, H5D_ALLOC_TIME_EARLY);
CHECK(ret, FAIL, "H5Pset_alloc_time");
ret = H5Pset_alloc_time(tmp_dcpl_id, H5D_ALLOC_TIME_EARLY);
CHECK(ret, FAIL, "H5Pset_alloc_time");
dset_id = H5Dcreate(file_id, dset_name2, dtype1_id, dset_dspace_id, tmp_dcpl_id);
CHECK(dset_id, FAIL, "H5Dcreate");
dset_id = H5Dcreate(file_id, dset_name2, dtype1_id, dset_dspace_id, tmp_dcpl_id);
CHECK(dset_id, FAIL, "H5Dcreate");
ret = H5Dclose(dset_id);
CHECK(ret, FAIL, "H5Dclose");
ret = H5Dclose(dset_id);
CHECK(ret, FAIL, "H5Dclose");
/* Close temporary DCPL */
ret = H5Pclose(tmp_dcpl_id);
CHECK(ret, FAIL, "H5Pclose");
/* Close temporary DCPL */
ret = H5Pclose(tmp_dcpl_id);
CHECK(ret, FAIL, "H5Pclose");
} /* end for */
} /* end for */
ret = H5Fclose(file_id);
@ -2604,91 +2627,112 @@ HDfprintf(stderr, "Uncomment loop!\n");
CHECK(file_id, FAIL, "H5Fopen");
/* Read datasets with different storage layouts */
HDfprintf(stderr, "Uncomment loop!\n");
/* for(layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) { */
for(layout = H5D_COMPACT; layout <= H5D_CONTIGUOUS; layout++) {
for(layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) {
unsigned compress_loop; /* # of times to run loop, for testing compressed chunked dataset */
unsigned test_loop; /* Loop over datasets */
/* Layout specific actions */
switch(layout) {
case H5D_COMPACT:
HDstrcpy(dset_name1, "dataset1-compact");
HDstrcpy(dset_name2, "dataset2-compact");
dset_dspace_id = small_dspace_id;
dset_elmts = SPACE4_DIM_SMALL;
break;
#ifdef H5_HAVE_FILTER_DEFLATE
if(layout == H5D_CHUNKED)
compress_loop = 2;
else
#endif /* H5_HAVE_FILTER_DEFLATE */
compress_loop = 1;
case H5D_CONTIGUOUS:
HDstrcpy(dset_name1, "dataset1-contig");
HDstrcpy(dset_name2, "dataset2-contig");
dset_dspace_id = large_dspace_id;
dset_elmts = SPACE4_DIM_LARGE;
break;
/* Loop over dataset operations */
for(test_loop = 0; test_loop < compress_loop; test_loop++) {
case H5D_CHUNKED:
HDstrcpy(dset_name1, "dataset1-chunked");
HDstrcpy(dset_name2, "dataset2-chunked");
dset_dspace_id = large_dspace_id;
dset_elmts = SPACE4_DIM_LARGE;
break;
} /* end switch */
/* Layout specific actions */
switch(layout) {
case H5D_COMPACT:
HDstrcpy(dset_name1, "dataset1-compact");
HDstrcpy(dset_name2, "dataset2-compact");
dset_dspace_id = small_dspace_id;
dset_elmts = SPACE4_DIM_SMALL;
break;
/* Open first data set */
dset_id = H5Dopen(file_id, dset_name1);
CHECK(dset_id, FAIL, "H5Dopen");
case H5D_CONTIGUOUS:
HDstrcpy(dset_name1, "dataset1-contig");
HDstrcpy(dset_name2, "dataset2-contig");
dset_dspace_id = large_dspace_id;
dset_elmts = SPACE4_DIM_LARGE;
break;
/* Read in the data of fill value */
ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dread");
case H5D_CHUNKED:
#ifdef H5_HAVE_FILTER_DEFLATE
if(test_loop == 1) {
HDstrcpy(dset_name1, "dataset1-chunked-compressed");
HDstrcpy(dset_name2, "dataset2-chunked-compressed");
} /* end if */
else {
#endif /* H5_HAVE_FILTER_DEFLATE */
HDstrcpy(dset_name1, "dataset1-chunked");
HDstrcpy(dset_name2, "dataset2-chunked");
#ifdef H5_HAVE_FILTER_DEFLATE
} /* end else */
#endif /* H5_HAVE_FILTER_DEFLATE */
dset_dspace_id = large_dspace_id;
dset_elmts = SPACE4_DIM_LARGE;
break;
} /* end switch */
/* Compare data read in */
for(i = 0; i < dset_elmts; i++) {
if(HDstrcmp(rbuf[i].str_id, "foobar")
|| HDstrcmp(rbuf[i].str_name, "")
|| rbuf[i].str_desc
|| HDstrcmp(rbuf[i].str_orig, "\0")
|| HDstrcmp(rbuf[i].str_stat, "dead")
|| HDstrcmp(rbuf[i].str_form, "liquid")
|| HDstrcmp(rbuf[i].str_unit, "meter")) {
TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i);
continue;
} /* end if */
/* Open first data set */
dset_id = H5Dopen(file_id, dset_name1);
CHECK(dset_id, FAIL, "H5Dopen");
/* Read in the data of fill value */
ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dread");
/* Compare data read in */
for(i = 0; i < dset_elmts; i++) {
if(HDstrcmp(rbuf[i].str_id, "foobar")
|| HDstrcmp(rbuf[i].str_name, "")
|| rbuf[i].str_desc
|| HDstrcmp(rbuf[i].str_orig, "\0")
|| HDstrcmp(rbuf[i].str_stat, "dead")
|| HDstrcmp(rbuf[i].str_form, "liquid")
|| HDstrcmp(rbuf[i].str_unit, "meter")) {
TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i);
continue;
} /* end if */
} /* end for */
ret = H5Dclose(dset_id);
CHECK(ret, FAIL, "H5Dclose");
/* Release the space */
ret = H5Dvlen_reclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dvlen_reclaim");
/* Open the second data set to check the value of data */
dset_id = H5Dopen(file_id, dset_name2);
CHECK(dset_id, FAIL, "H5Dopen");
ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dread");
/* Compare data read in */
for(i = 0; i < dset_elmts; i++) {
if(HDstrcmp(rbuf[i].str_id, "foobar")
|| HDstrcmp(rbuf[i].str_name, "")
|| rbuf[i].str_desc
|| HDstrcmp(rbuf[i].str_orig, "\0")
|| HDstrcmp(rbuf[i].str_stat, "dead")
|| HDstrcmp(rbuf[i].str_form, "liquid")
|| HDstrcmp(rbuf[i].str_unit, "meter")) {
TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n",__LINE__,(int)i);
continue;
} /* end if */
} /* end for */
ret = H5Dclose(dset_id);
CHECK(ret, FAIL, "H5Dclose");
/* Release the space */
ret = H5Dvlen_reclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dvlen_reclaim");
} /* end for */
ret = H5Dclose(dset_id);
CHECK(ret, FAIL, "H5Dclose");
/* Release the space */
ret = H5Dvlen_reclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dvlen_reclaim");
/* Open the second data set to check the value of data */
dset_id = H5Dopen(file_id, dset_name2);
CHECK(dset_id, FAIL, "H5Dopen");
ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dread");
/* Compare data read in */
for(i = 0; i < dset_elmts; i++) {
if(HDstrcmp(rbuf[i].str_id, "foobar")
|| HDstrcmp(rbuf[i].str_name, "")
|| rbuf[i].str_desc
|| HDstrcmp(rbuf[i].str_orig, "\0")
|| HDstrcmp(rbuf[i].str_stat, "dead")
|| HDstrcmp(rbuf[i].str_form, "liquid")
|| HDstrcmp(rbuf[i].str_unit, "meter")) {
TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n",__LINE__,(int)i);
continue;
} /* end if */
} /* end for */
ret = H5Dclose(dset_id);
CHECK(ret, FAIL, "H5Dclose");
/* Release the space */
ret = H5Dvlen_reclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dvlen_reclaim");
} /* end for */
ret = H5Fclose(file_id);
@ -2700,139 +2744,160 @@ HDfprintf(stderr, "Uncomment loop!\n");
CHECK(file_id, FAIL, "H5Fopen");
/* Write one element & fill values to datasets with different storage layouts */
HDfprintf(stderr, "Uncomment loop!\n");
/* for(layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) { */
for(layout = H5D_COMPACT; layout <= H5D_CONTIGUOUS; layout++) {
for(layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) {
unsigned compress_loop; /* # of times to run loop, for testing compressed chunked dataset */
unsigned test_loop; /* Loop over datasets */
/* Layout specific actions */
switch(layout) {
case H5D_COMPACT:
HDstrcpy(dset_name1, "dataset1-compact");
HDstrcpy(dset_name2, "dataset2-compact");
dset_dspace_id = small_dspace_id;
dset_elmts = SPACE4_DIM_SMALL;
break;
#ifdef H5_HAVE_FILTER_DEFLATE
if(layout == H5D_CHUNKED)
compress_loop = 2;
else
#endif /* H5_HAVE_FILTER_DEFLATE */
compress_loop = 1;
case H5D_CONTIGUOUS:
HDstrcpy(dset_name1, "dataset1-contig");
HDstrcpy(dset_name2, "dataset2-contig");
dset_dspace_id = large_dspace_id;
dset_elmts = SPACE4_DIM_LARGE;
break;
/* Loop over dataset operations */
for(test_loop = 0; test_loop < compress_loop; test_loop++) {
case H5D_CHUNKED:
HDstrcpy(dset_name1, "dataset1-chunked");
HDstrcpy(dset_name2, "dataset2-chunked");
dset_dspace_id = large_dspace_id;
dset_elmts = SPACE4_DIM_LARGE;
break;
} /* end switch */
/* Layout specific actions */
switch(layout) {
case H5D_COMPACT:
HDstrcpy(dset_name1, "dataset1-compact");
HDstrcpy(dset_name2, "dataset2-compact");
dset_dspace_id = small_dspace_id;
dset_elmts = SPACE4_DIM_SMALL;
break;
/* Copy the dataset's dataspace */
single_dspace_id = H5Scopy(dset_dspace_id);
CHECK(single_dspace_id, FAIL, "H5Scopy");
case H5D_CONTIGUOUS:
HDstrcpy(dset_name1, "dataset1-contig");
HDstrcpy(dset_name2, "dataset2-contig");
dset_dspace_id = large_dspace_id;
dset_elmts = SPACE4_DIM_LARGE;
break;
/* Set a single element in the dataspace */
ret = H5Sselect_hyperslab(single_dspace_id, H5S_SELECT_SET, single_offset,
NULL, single_block, NULL);
CHECK(ret, FAIL, "H5Sselect_hyperslab");
case H5D_CHUNKED:
#ifdef H5_HAVE_FILTER_DEFLATE
if(test_loop == 1) {
HDstrcpy(dset_name1, "dataset1-chunked-compressed");
HDstrcpy(dset_name2, "dataset2-chunked-compressed");
} /* end if */
else {
#endif /* H5_HAVE_FILTER_DEFLATE */
HDstrcpy(dset_name1, "dataset1-chunked");
HDstrcpy(dset_name2, "dataset2-chunked");
#ifdef H5_HAVE_FILTER_DEFLATE
} /* end else */
#endif /* H5_HAVE_FILTER_DEFLATE */
dset_dspace_id = large_dspace_id;
dset_elmts = SPACE4_DIM_LARGE;
break;
} /* end switch */
/* Open first data set */
dset_id = H5Dopen(file_id, dset_name1);
CHECK(dset_id, FAIL, "H5Dopen");
/* Copy the dataset's dataspace */
single_dspace_id = H5Scopy(dset_dspace_id);
CHECK(single_dspace_id, FAIL, "H5Scopy");
/* Write one element in the dataset */
ret = H5Dwrite(dset_id, dtype1_id, scalar_dspace_id, single_dspace_id, xfer_pid, &wdata);
CHECK(ret, FAIL, "H5Dwrite");
/* Set a single element in the dataspace */
ret = H5Sselect_hyperslab(single_dspace_id, H5S_SELECT_SET, single_offset,
NULL, single_block, NULL);
CHECK(ret, FAIL, "H5Sselect_hyperslab");
ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dread");
/* Open first data set */
dset_id = H5Dopen(file_id, dset_name1);
CHECK(dset_id, FAIL, "H5Dopen");
/* Compare data read in */
for(i = 0; i < dset_elmts; i++) {
if(i == single_offset[0]) {
if(HDstrcmp(rbuf[i].str_id, wdata.str_id)
|| rbuf[i].str_name
|| HDstrcmp(rbuf[i].str_desc, wdata.str_desc)
|| HDstrcmp(rbuf[i].str_orig, wdata.str_orig)
|| HDstrcmp(rbuf[i].str_stat, wdata.str_stat)
|| HDstrcmp(rbuf[i].str_form, wdata.str_form)
|| HDstrcmp(rbuf[i].str_unit, wdata.str_unit)) {
TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n",__LINE__,(int)i);
continue;
/* Write one element in the dataset */
ret = H5Dwrite(dset_id, dtype1_id, scalar_dspace_id, single_dspace_id, xfer_pid, &wdata);
CHECK(ret, FAIL, "H5Dwrite");
ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dread");
/* Compare data read in */
for(i = 0; i < dset_elmts; i++) {
if(i == single_offset[0]) {
if(HDstrcmp(rbuf[i].str_id, wdata.str_id)
|| rbuf[i].str_name
|| HDstrcmp(rbuf[i].str_desc, wdata.str_desc)
|| HDstrcmp(rbuf[i].str_orig, wdata.str_orig)
|| HDstrcmp(rbuf[i].str_stat, wdata.str_stat)
|| HDstrcmp(rbuf[i].str_form, wdata.str_form)
|| HDstrcmp(rbuf[i].str_unit, wdata.str_unit)) {
TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n",__LINE__,(int)i);
continue;
} /* end if */
} /* end if */
} /* end if */
else {
if(HDstrcmp(rbuf[i].str_id, "foobar")
|| HDstrcmp(rbuf[i].str_name, "")
|| rbuf[i].str_desc
|| HDstrcmp(rbuf[i].str_orig,"\0")
|| HDstrcmp(rbuf[i].str_stat, "dead")
|| HDstrcmp(rbuf[i].str_form, "liquid")
|| HDstrcmp(rbuf[i].str_unit, "meter")) {
TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n",__LINE__,(int)i);
continue;
else {
if(HDstrcmp(rbuf[i].str_id, "foobar")
|| HDstrcmp(rbuf[i].str_name, "")
|| rbuf[i].str_desc
|| HDstrcmp(rbuf[i].str_orig,"\0")
|| HDstrcmp(rbuf[i].str_stat, "dead")
|| HDstrcmp(rbuf[i].str_form, "liquid")
|| HDstrcmp(rbuf[i].str_unit, "meter")) {
TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n",__LINE__,(int)i);
continue;
} /* end if */
} /* end if */
} /* end if */
} /* end for */
ret = H5Dclose(dset_id);
CHECK(ret, FAIL, "H5Dclose");
/* Release the space */
ret = H5Dvlen_reclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dvlen_reclaim");
/* Open the second data set to check the value of data */
dset_id = H5Dopen(file_id, dset_name2);
CHECK(dset_id, FAIL, "H5Dopen");
/* Write one element in the dataset */
ret = H5Dwrite(dset_id, dtype1_id, scalar_dspace_id, single_dspace_id, xfer_pid, &wdata);
CHECK(ret, FAIL, "H5Dwrite");
ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dread");
/* Compare data read in */
for(i = 0; i < dset_elmts; i++) {
if(i == single_offset[0]) {
if(HDstrcmp(rbuf[i].str_id, wdata.str_id)
|| rbuf[i].str_name
|| HDstrcmp(rbuf[i].str_desc, wdata.str_desc)
|| HDstrcmp(rbuf[i].str_orig, wdata.str_orig)
|| HDstrcmp(rbuf[i].str_stat, wdata.str_stat)
|| HDstrcmp(rbuf[i].str_form, wdata.str_form)
|| HDstrcmp(rbuf[i].str_unit, wdata.str_unit)) {
TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n",__LINE__,(int)i);
continue;
} /* end if */
} /* end if */
else {
if(HDstrcmp(rbuf[i].str_id, "foobar")
|| HDstrcmp(rbuf[i].str_name, "")
|| rbuf[i].str_desc
|| HDstrcmp(rbuf[i].str_orig,"\0")
|| HDstrcmp(rbuf[i].str_stat, "dead")
|| HDstrcmp(rbuf[i].str_form, "liquid")
|| HDstrcmp(rbuf[i].str_unit, "meter")) {
TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n",__LINE__,(int)i);
continue;
} /* end if */
} /* end if */
} /* end for */
ret = H5Dclose(dset_id);
CHECK(ret, FAIL, "H5Dclose");
/* Release the space */
ret = H5Dvlen_reclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dvlen_reclaim");
/* Close the dataspace for the writes */
ret = H5Sclose(single_dspace_id);
CHECK(ret, FAIL, "H5Sclose");
} /* end for */
ret = H5Dclose(dset_id);
CHECK(ret, FAIL, "H5Dclose");
/* Release the space */
ret = H5Dvlen_reclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dvlen_reclaim");
/* Open the second data set to check the value of data */
dset_id = H5Dopen(file_id, dset_name2);
CHECK(dset_id, FAIL, "H5Dopen");
/* Write one element in the dataset */
ret = H5Dwrite(dset_id, dtype1_id, scalar_dspace_id, single_dspace_id, xfer_pid, &wdata);
CHECK(ret, FAIL, "H5Dwrite");
ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dread");
/* Compare data read in */
for(i = 0; i < dset_elmts; i++) {
if(i == single_offset[0]) {
if(HDstrcmp(rbuf[i].str_id, wdata.str_id)
|| rbuf[i].str_name
|| HDstrcmp(rbuf[i].str_desc, wdata.str_desc)
|| HDstrcmp(rbuf[i].str_orig, wdata.str_orig)
|| HDstrcmp(rbuf[i].str_stat, wdata.str_stat)
|| HDstrcmp(rbuf[i].str_form, wdata.str_form)
|| HDstrcmp(rbuf[i].str_unit, wdata.str_unit)) {
TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n",__LINE__,(int)i);
continue;
} /* end if */
} /* end if */
else {
if(HDstrcmp(rbuf[i].str_id, "foobar")
|| HDstrcmp(rbuf[i].str_name, "")
|| rbuf[i].str_desc
|| HDstrcmp(rbuf[i].str_orig,"\0")
|| HDstrcmp(rbuf[i].str_stat, "dead")
|| HDstrcmp(rbuf[i].str_form, "liquid")
|| HDstrcmp(rbuf[i].str_unit, "meter")) {
TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n",__LINE__,(int)i);
continue;
} /* end if */
} /* end if */
} /* end for */
ret = H5Dclose(dset_id);
CHECK(ret, FAIL, "H5Dclose");
/* Release the space */
ret = H5Dvlen_reclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
CHECK(ret, FAIL, "H5Dvlen_reclaim");
/* Close the dataspace for the writes */
ret = H5Sclose(single_dspace_id);
CHECK(ret, FAIL, "H5Sclose");
} /* end for */
ret = H5Fclose(file_id);