mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-03-25 17:00:45 +08:00
[svn-r26757] Description:
Minor typos & code cleanups Tested on: MacOSX/64 10.10.2 (amazon) w/serial & parallel (Too minor to require h5committest)
This commit is contained in:
parent
dd32c639c6
commit
4662835250
@ -926,8 +926,7 @@ H5D__btree_idx_is_space_alloc(const H5O_storage_chunk_t *storage)
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: H5D__btree_idx_insert
|
||||
*
|
||||
* Purpose: Create the chunk it if it doesn't exist, or reallocate the
|
||||
* chunk if its size changed.
|
||||
* Purpose: Insert chunk entry into the indexing structure.
|
||||
*
|
||||
* Return: Non-negative on success/Negative on failure
|
||||
*
|
||||
|
102
src/H5Dchunk.c
102
src/H5Dchunk.c
@ -17,7 +17,7 @@
|
||||
* Thursday, April 24, 2008
|
||||
*
|
||||
* Purpose: Abstract indexed (chunked) I/O functions. The logical
|
||||
* multi-dimensional data space is regularly partitioned into
|
||||
* multi-dimensional dataspace is regularly partitioned into
|
||||
* same-sized "chunks", the first of which is aligned with the
|
||||
* logical origin. The chunks are indexed by different methods,
|
||||
* that map a chunk index to disk address. Each chunk can be
|
||||
@ -201,7 +201,6 @@ static herr_t H5D__chunk_write(H5D_io_info_t *io_info, const H5D_type_info_t *ty
|
||||
H5D_chunk_map_t *fm);
|
||||
static herr_t H5D__chunk_flush(H5D_t *dset, hid_t dxpl_id);
|
||||
static herr_t H5D__chunk_io_term(const H5D_chunk_map_t *fm);
|
||||
static herr_t H5D__chunk_cinfo_cache_reset(H5D_chunk_cached_t *last);
|
||||
|
||||
/* "Nonexistent" layout operation callback */
|
||||
static ssize_t
|
||||
@ -216,6 +215,7 @@ static void *H5D__chunk_alloc(size_t size, const H5O_pline_t *pline);
|
||||
static void *H5D__chunk_xfree(void *chk, const H5O_pline_t *pline);
|
||||
static void *H5D__chunk_realloc(void *chk, size_t size,
|
||||
const H5O_pline_t *pline);
|
||||
static herr_t H5D__chunk_cinfo_cache_reset(H5D_chunk_cached_t *last);
|
||||
static herr_t H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last,
|
||||
const H5D_chunk_ud_t *udata);
|
||||
static hbool_t H5D__chunk_cinfo_cache_found(const H5D_chunk_cached_t *last,
|
||||
@ -329,7 +329,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, hsiz
|
||||
|
||||
FUNC_ENTER_STATIC_TAG(dxpl_id, dset->oloc.addr, FAIL)
|
||||
|
||||
/* Allocate data space and initialize it if it hasn't been. */
|
||||
/* Allocate dataspace and initialize it if it hasn't been. */
|
||||
if(!(*dset->shared->layout.ops->is_space_alloc)(&dset->shared->layout.storage))
|
||||
/* Allocate storage */
|
||||
if(H5D__alloc_storage(dset, dxpl_id, H5D_ALLOC_WRITE, FALSE, NULL) < 0)
|
||||
@ -341,10 +341,10 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, hsiz
|
||||
|
||||
/* Calculate the index of this chunk */
|
||||
if(H5VM_chunk_index((unsigned)space_ndims, offset,
|
||||
layout->u.chunk.dim, layout->u.chunk.down_chunks, &chunk_idx) < 0)
|
||||
layout->u.chunk.dim, layout->u.chunk.down_chunks, &chunk_idx) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't get chunk index")
|
||||
|
||||
/* Find out the file address of the chunk */
|
||||
/* Find out the file address of the chunk (if any) */
|
||||
if(H5D__chunk_lookup(dset, dxpl_id, offset, chunk_idx, &udata) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
|
||||
|
||||
@ -381,7 +381,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters, hsiz
|
||||
if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
|
||||
|
||||
/* Evict the entry from the cache if present, but do not flush
|
||||
/* Evict the (old) entry from the cache if present, but do not flush
|
||||
* it to disk */
|
||||
if(UINT_MAX != udata.idx_hint)
|
||||
if(H5D__chunk_cache_evict(dset, dxpl_id, dxpl_cache, rdcc->slot[udata.idx_hint], FALSE) < 0)
|
||||
@ -601,21 +601,21 @@ H5D__chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, hid_t dapl_id)
|
||||
HDassert(dset);
|
||||
|
||||
if(NULL == (dapl = (H5P_genplist_t *)H5I_object(dapl_id)))
|
||||
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for fapl ID");
|
||||
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for fapl ID")
|
||||
|
||||
/* Use the properties in dapl_id if they have been set, otherwise use the properties from the file */
|
||||
if(H5P_get(dapl, H5D_ACS_DATA_CACHE_NUM_SLOTS_NAME, &rdcc->nslots) < 0)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET,FAIL, "can't get data cache number of slots");
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET,FAIL, "can't get data cache number of slots")
|
||||
if(rdcc->nslots == H5D_CHUNK_CACHE_NSLOTS_DEFAULT)
|
||||
rdcc->nslots = H5F_RDCC_NSLOTS(f);
|
||||
|
||||
if(H5P_get(dapl, H5D_ACS_DATA_CACHE_BYTE_SIZE_NAME, &rdcc->nbytes_max) < 0)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET,FAIL, "can't get data cache byte size");
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET,FAIL, "can't get data cache byte size")
|
||||
if(rdcc->nbytes_max == H5D_CHUNK_CACHE_NBYTES_DEFAULT)
|
||||
rdcc->nbytes_max = H5F_RDCC_NBYTES(f);
|
||||
|
||||
if(H5P_get(dapl, H5D_ACS_PREEMPT_READ_CHUNKS_NAME, &rdcc->w0) < 0)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET,FAIL, "can't get preempt read chunks");
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET,FAIL, "can't get preempt read chunks")
|
||||
if(rdcc->w0 < 0)
|
||||
rdcc->w0 = H5F_RDCC_W0(f);
|
||||
|
||||
@ -2244,9 +2244,9 @@ H5D__chunk_cinfo_cache_update(H5D_chunk_cached_t *last, const H5D_chunk_ud_t *ud
|
||||
|
||||
/* Stored the information to cache */
|
||||
HDmemcpy(last->offset, udata->common.offset, sizeof(hsize_t) * udata->common.layout->ndims);
|
||||
last->addr = udata->addr;
|
||||
last->nbytes = udata->nbytes;
|
||||
last->filter_mask = udata->filter_mask;
|
||||
last->addr = udata->addr;
|
||||
|
||||
/* Indicate that the cached info is valid */
|
||||
last->valid = TRUE;
|
||||
@ -2291,9 +2291,9 @@ H5D__chunk_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_chunk_ud_t *uda
|
||||
HGOTO_DONE(FALSE)
|
||||
|
||||
/* Retrieve the information from the cache */
|
||||
udata->addr = last->addr;
|
||||
udata->nbytes = last->nbytes;
|
||||
udata->filter_mask = last->filter_mask;
|
||||
udata->addr = last->addr;
|
||||
|
||||
/* Indicate that the data was found */
|
||||
HGOTO_DONE(TRUE)
|
||||
@ -2392,9 +2392,9 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset,
|
||||
udata->common.rdcc = &(dset->shared->cache.chunk);
|
||||
|
||||
/* Reset information about the chunk we are looking for */
|
||||
udata->addr = HADDR_UNDEF;
|
||||
udata->nbytes = 0;
|
||||
udata->filter_mask = 0;
|
||||
udata->addr = HADDR_UNDEF;
|
||||
|
||||
/* Check for chunk in cache */
|
||||
if(dset->shared->cache.chunk.nslots > 0) {
|
||||
@ -2402,8 +2402,7 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *chunk_offset,
|
||||
ent = dset->shared->cache.chunk.slot[udata->idx_hint];
|
||||
|
||||
if(ent)
|
||||
for(u = 0, found = TRUE; u < dset->shared->layout.u.chunk.ndims - 1;
|
||||
u++)
|
||||
for(u = 0, found = TRUE; u < dset->shared->layout.u.chunk.ndims - 1; u++)
|
||||
if(chunk_offset[u] != ent->offset[u]) {
|
||||
found = FALSE;
|
||||
break;
|
||||
@ -2590,10 +2589,9 @@ done:
|
||||
* output pipeline failed. Do not free the entry or remove it from the
|
||||
* list.
|
||||
*/
|
||||
if(ret_value < 0 && point_of_no_return) {
|
||||
if(ret_value < 0 && point_of_no_return)
|
||||
if(ent->chunk)
|
||||
ent->chunk = (uint8_t *)H5D__chunk_xfree(ent->chunk, &(dset->shared->dcpl_cache.pline));
|
||||
} /* end if */
|
||||
|
||||
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
|
||||
} /* end H5D__chunk_flush_entry() */
|
||||
@ -3264,7 +3262,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
|
||||
int space_ndims; /* Dataset's space rank */
|
||||
hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Dataset's dataspace dimensions */
|
||||
const uint32_t *chunk_dim = layout->u.chunk.dim; /* Convenience pointer to chunk dimensions */
|
||||
int op_dim; /* Current operationg dimension */
|
||||
unsigned op_dim; /* Current operating dimension */
|
||||
H5D_fill_buf_info_t fb_info; /* Dataset's fill buffer info */
|
||||
hbool_t fb_info_init = FALSE; /* Whether the fill value buffer has been initialized */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
@ -3286,7 +3284,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
|
||||
|
||||
/* Check if any space dimensions are 0, if so we do not have to do anything
|
||||
*/
|
||||
for(op_dim=0; op_dim<space_ndims; op_dim++)
|
||||
for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++)
|
||||
if(space_dim[op_dim] == 0) {
|
||||
/* Reset any cached chunk info for this dataset */
|
||||
H5D__chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last);
|
||||
@ -3393,7 +3391,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
|
||||
* certain dimension, max_unalloc is updated in order to avoid allocating
|
||||
* those chunks again.
|
||||
*/
|
||||
for(op_dim=0; op_dim<space_ndims; op_dim++) {
|
||||
for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) {
|
||||
H5D_chunk_ud_t udata; /* User data for querying chunk info */
|
||||
int i; /* Local index variable */
|
||||
|
||||
@ -3456,8 +3454,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
|
||||
* possible (though ill-advised) for the filter to shrink the
|
||||
* buffer. */
|
||||
if(fb_info.fill_buf_size < orig_chunk_size) {
|
||||
if(NULL == (fb_info.fill_buf = H5D__chunk_realloc(
|
||||
fb_info.fill_buf, orig_chunk_size, pline)))
|
||||
if(NULL == (fb_info.fill_buf = H5D__chunk_realloc(fb_info.fill_buf, orig_chunk_size, pline)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory reallocation failed for raw data chunk")
|
||||
fb_info.fill_buf_size = orig_chunk_size;
|
||||
} /* end if */
|
||||
@ -3490,11 +3487,11 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
|
||||
udata.common.storage = &layout->storage.u.chunk;
|
||||
udata.common.offset = chunk_offset;
|
||||
udata.common.rdcc = NULL;
|
||||
udata.addr = HADDR_UNDEF;
|
||||
H5_ASSIGN_OVERFLOW(udata.nbytes, chunk_size, size_t, uint32_t);
|
||||
udata.filter_mask = filter_mask;
|
||||
udata.addr = HADDR_UNDEF;
|
||||
|
||||
/* Allocate the chunk with all processes */
|
||||
/* Allocate the chunk (with all processes) */
|
||||
if((ops->insert)(&idx_info, &udata) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert record into chunk index")
|
||||
HDassert(H5F_addr_defined(udata.addr));
|
||||
@ -3511,11 +3508,9 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
|
||||
/* collect all chunk addresses to be written to
|
||||
write collectively at the end */
|
||||
/* allocate/resize address array if no more space left */
|
||||
if(0 == chunk_info.num_io % 1024) {
|
||||
if(NULL == (chunk_info.addr = (haddr_t *)HDrealloc
|
||||
(chunk_info.addr, (chunk_info.num_io + 1024) * sizeof(haddr_t))))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "memory allocation failed for chunk addresses");
|
||||
} /* end if */
|
||||
if(0 == chunk_info.num_io % 1024)
|
||||
if(NULL == (chunk_info.addr = (haddr_t *)HDrealloc(chunk_info.addr, (chunk_info.num_io + 1024) * sizeof(haddr_t))))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "memory allocation failed for chunk addresses")
|
||||
|
||||
/* Store the chunk's address for later */
|
||||
chunk_info.addr[chunk_info.num_io] = udata.addr;
|
||||
@ -3526,8 +3521,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
|
||||
} /* end if */
|
||||
else {
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size,
|
||||
dxpl_id, fb_info.fill_buf) < 0)
|
||||
if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, udata.addr, chunk_size, dxpl_id, fb_info.fill_buf) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
} /* end else */
|
||||
@ -3539,7 +3533,7 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
|
||||
for(i = ((int)space_ndims - 1); i >= 0; --i) {
|
||||
chunk_offset[i] += chunk_dim[i];
|
||||
if(chunk_offset[i] > max_unalloc[i]) {
|
||||
if(i == op_dim)
|
||||
if((unsigned)i == op_dim)
|
||||
chunk_offset[i] = min_unalloc[i];
|
||||
else
|
||||
chunk_offset[i] = 0;
|
||||
@ -3562,10 +3556,9 @@ H5D__chunk_allocate(const H5D_t *dset, hid_t dxpl_id, hbool_t full_overwrite,
|
||||
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
/* do final collective I/O */
|
||||
if(using_mpi && blocks_written) {
|
||||
if(using_mpi && blocks_written)
|
||||
if(H5D__chunk_collective_fill(dset, dxpl_id, &chunk_info, chunk_size, fb_info.fill_buf) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
|
||||
} /* end if */
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
|
||||
/* Reset any cached chunk info for this dataset */
|
||||
@ -3577,10 +3570,8 @@ done:
|
||||
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "Can't release fill buffer info")
|
||||
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
if(using_mpi) {
|
||||
if(chunk_info.addr)
|
||||
HDfree(chunk_info.addr);
|
||||
} /* end if */
|
||||
if(using_mpi && chunk_info.addr)
|
||||
HDfree(chunk_info.addr);
|
||||
#endif
|
||||
|
||||
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
|
||||
@ -3684,8 +3675,7 @@ H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id,
|
||||
* should use this if MPI_type_create_hindexed block is working
|
||||
* mpi_code = MPI_Type_create_hindexed_block(blocks, block_len, chunk_disp_array, MPI_BYTE, &file_type);
|
||||
*/
|
||||
mpi_code = MPI_Type_create_hindexed(blocks, block_lens, chunk_disp_array,
|
||||
MPI_BYTE, &file_type);
|
||||
mpi_code = MPI_Type_create_hindexed(blocks, block_lens, chunk_disp_array, MPI_BYTE, &file_type);
|
||||
if(mpi_code != MPI_SUCCESS)
|
||||
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
|
||||
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&file_type)))
|
||||
@ -3718,9 +3708,8 @@ H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id,
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set transfer mode")
|
||||
}
|
||||
|
||||
/* low level write */
|
||||
if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, (haddr_t)0, (blocks) ? (size_t)1 : (size_t)0,
|
||||
data_dxpl_id, fill_buf) < 0)
|
||||
/* low level write (collective) */
|
||||
if(H5F_block_write(dset->oloc.file, H5FD_MEM_DRAW, (haddr_t)0, (blocks) ? (size_t)1 : (size_t)0, data_dxpl_id, fill_buf) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
|
||||
|
||||
/* Barrier so processes don't race ahead */
|
||||
@ -3786,8 +3775,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
|
||||
if(H5D__chunk_lookup(dset, io_info->dxpl_id, chunk_offset, io_info->store->chunk.index, &chk_udata) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
|
||||
|
||||
/* If this chunk does not exist in cache or on disk, no need to do anything
|
||||
*/
|
||||
/* If this chunk does not exist in cache or on disk, no need to do anything */
|
||||
if(!H5F_addr_defined(chk_udata.addr) && UINT_MAX == chk_udata.idx_hint)
|
||||
HGOTO_DONE(SUCCEED)
|
||||
|
||||
@ -3804,8 +3792,7 @@ H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata)
|
||||
|
||||
/* Compute the # of elements to leave with existing value, in each dimension */
|
||||
for(u = 0; u < rank; u++) {
|
||||
count[u] = MIN(layout->u.chunk.dim[u], (udata->space_dim[u]
|
||||
- chunk_offset[u]));
|
||||
count[u] = MIN(layout->u.chunk.dim[u], (udata->space_dim[u] - chunk_offset[u]));
|
||||
HDassert(count[u] > 0);
|
||||
} /* end for */
|
||||
|
||||
@ -3985,7 +3972,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
|
||||
H5D_rdcc_ent_t *ent = NULL; /* Cache entry */
|
||||
int space_ndims; /* Dataset's space rank */
|
||||
hsize_t space_dim[H5O_LAYOUT_NDIMS]; /* Current dataspace dimensions */
|
||||
int op_dim; /* Current operationg dimension */
|
||||
unsigned op_dim; /* Current operating dimension */
|
||||
hbool_t shrunk_dim[H5O_LAYOUT_NDIMS]; /* Dimensions which have shrunk */
|
||||
H5D_chunk_it_ud1_t udata; /* Chunk index iterator user data */
|
||||
hbool_t udata_init = FALSE; /* Whether the chunk index iterator user data has been initialized */
|
||||
@ -4021,7 +4008,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
|
||||
chunk_offset[space_ndims] = (hsize_t)0;
|
||||
|
||||
/* Check if any old dimensions are 0, if so we do not have to do anything */
|
||||
for(op_dim=0; op_dim<space_ndims; op_dim++)
|
||||
for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++)
|
||||
if(old_dim[op_dim] == 0) {
|
||||
/* Reset any cached chunk info for this dataset */
|
||||
H5D__chunk_cinfo_cache_reset(&dset->shared->cache.chunk.last);
|
||||
@ -4084,7 +4071,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
|
||||
*/
|
||||
HDmemset(min_mod_chunk_off, 0, sizeof(min_mod_chunk_off));
|
||||
HDmemset(max_mod_chunk_off, 0, sizeof(max_mod_chunk_off));
|
||||
for(op_dim = 0; op_dim < space_ndims; op_dim++) {
|
||||
for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) {
|
||||
/* Calculate the largest offset of chunks that might need to be
|
||||
* modified in this dimension */
|
||||
max_mod_chunk_off[op_dim] = chunk_dim[op_dim] * ((old_dim[op_dim] - 1)
|
||||
@ -4133,7 +4120,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
|
||||
} /* end if */
|
||||
|
||||
/* Main loop: fill or remove chunks */
|
||||
for(op_dim=0; op_dim<space_ndims; op_dim++) {
|
||||
for(op_dim = 0; op_dim < (unsigned)space_ndims; op_dim++) {
|
||||
/* Check if modification along this dimension is really necessary */
|
||||
if(!shrunk_dim[op_dim])
|
||||
continue;
|
||||
@ -4217,7 +4204,7 @@ H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dim)
|
||||
if(chunk_offset[i] > (hsize_t) max_mod_chunk_off[i]) {
|
||||
/* Left maximum dimensions, "wrap around" and check if this
|
||||
* dimension is no longer outside the fill dimension */
|
||||
if(i == op_dim) {
|
||||
if((unsigned)i == op_dim) {
|
||||
chunk_offset[i] = min_mod_chunk_off[i];
|
||||
if(dims_outside_fill[i] && fill_dim[i]) {
|
||||
dims_outside_fill[i] = FALSE;
|
||||
@ -4663,12 +4650,12 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
|
||||
udata_dst.common.storage = udata->idx_info_dst->storage;
|
||||
udata_dst.common.offset = chunk_rec->offset;
|
||||
udata_dst.common.rdcc = NULL;
|
||||
udata_dst.addr = HADDR_UNDEF;
|
||||
udata_dst.nbytes = chunk_rec->nbytes;
|
||||
udata_dst.filter_mask = chunk_rec->filter_mask;
|
||||
udata_dst.addr = HADDR_UNDEF;
|
||||
|
||||
/* Need to compress variable-length & reference data elements before writing to file */
|
||||
if(has_filters && (is_vlen || fix_ref) ) {
|
||||
if(has_filters && (is_vlen || fix_ref)) {
|
||||
if(H5Z_pipeline(pline, 0, &(udata_dst.filter_mask), H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0)
|
||||
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "output pipeline failed")
|
||||
#if H5_SIZEOF_SIZE_T > 4
|
||||
@ -4685,8 +4672,9 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
|
||||
H5_BEGIN_TAG(udata->idx_info_dst->dxpl_id, H5AC__COPIED_TAG, H5_ITER_ERROR);
|
||||
|
||||
/* Insert chunk into the destination index */
|
||||
if((udata->idx_info_dst->storage->ops->insert)(udata->idx_info_dst, &udata_dst) < 0)
|
||||
HGOTO_ERROR_TAG(H5E_DATASET, H5E_CANTINSERT, H5_ITER_ERROR, "unable to insert chunk into index")
|
||||
if(udata->idx_info_dst->storage->ops->insert)
|
||||
if((udata->idx_info_dst->storage->ops->insert)(udata->idx_info_dst, &udata_dst) < 0)
|
||||
HGOTO_ERROR_TAG(H5E_DATASET, H5E_CANTINSERT, H5_ITER_ERROR, "unable to insert chunk addr into index")
|
||||
|
||||
/* Reset metadata tag in dxpl_id */
|
||||
H5_END_TAG(H5_ITER_ERROR);
|
||||
@ -4952,7 +4940,7 @@ done:
|
||||
|
||||
/* Clean up any index information */
|
||||
if(copy_setup_done)
|
||||
if((storage_src->ops->copy_shutdown)(storage_src, storage_dst, dxpl_id) < 0)
|
||||
if(storage_src->ops->copy_shutdown && (storage_src->ops->copy_shutdown)(storage_src, storage_dst, dxpl_id) < 0)
|
||||
HDONE_ERROR(H5E_DATASET, H5E_CANTRELEASE, FAIL, "unable to shut down index copying info")
|
||||
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
|
@ -305,7 +305,7 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id,
|
||||
int ndims = 0;
|
||||
hsize_t dims[H5O_LAYOUT_NDIMS];
|
||||
hsize_t internal_offset[H5O_LAYOUT_NDIMS];
|
||||
unsigned u;
|
||||
unsigned u; /* Local index variable */
|
||||
|
||||
/* Get the dataset transfer property list */
|
||||
if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
|
||||
@ -327,7 +327,7 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id,
|
||||
if((ndims = H5S_get_simple_extent_dims(dset->shared->space, dims, NULL)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve dataspace extent dims")
|
||||
|
||||
for(u = 0; u < ndims; u++) {
|
||||
for(u = 0; u < (unsigned)ndims; u++) {
|
||||
/* Make sure the offset doesn't exceed the dataset's dimensions */
|
||||
if(direct_offset[u] > dims[u])
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset")
|
||||
|
@ -265,9 +265,9 @@ typedef struct H5D_chunk_ud_t {
|
||||
|
||||
/* Upward */
|
||||
unsigned idx_hint; /*index of chunk in cache, if present */
|
||||
haddr_t addr; /*file address of chunk */
|
||||
uint32_t nbytes; /*size of stored data */
|
||||
unsigned filter_mask; /*excluded filters */
|
||||
haddr_t addr; /*file address of chunk */
|
||||
} H5D_chunk_ud_t;
|
||||
|
||||
/* Typedef for "generic" chunk callbacks */
|
||||
@ -365,9 +365,9 @@ typedef struct H5D_chunk_map_t {
|
||||
typedef struct H5D_chunk_cached_t {
|
||||
hbool_t valid; /*whether cache info is valid*/
|
||||
hsize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/
|
||||
haddr_t addr; /*file address of chunk */
|
||||
uint32_t nbytes; /*size of stored data */
|
||||
unsigned filter_mask; /*excluded filters */
|
||||
haddr_t addr; /*file address of chunk */
|
||||
} H5D_chunk_cached_t;
|
||||
|
||||
/* The raw data chunk cache */
|
||||
|
@ -722,6 +722,7 @@ H5O_layout_debug(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const void *_mesg,
|
||||
"B-tree address:", mesg->storage.u.chunk.idx_addr);
|
||||
break;
|
||||
|
||||
case H5D_CHUNK_IDX_NTYPES:
|
||||
default:
|
||||
HDfprintf(stream, "%*s%-*s %s (%u)\n", indent, "", fwidth,
|
||||
"Index Type:", "Unknown", (unsigned)mesg->storage.u.chunk.idx_type);
|
||||
|
15
src/H5VM.c
15
src/H5VM.c
@ -418,8 +418,7 @@ H5VM_hyper_disjointp(unsigned n,
|
||||
HGOTO_DONE(TRUE)
|
||||
|
||||
for(u = 0; u < n; u++) {
|
||||
HDassert(size1[u] < HSIZET_MAX);
|
||||
HDassert(size2[u] < HSIZET_MAX);
|
||||
HDcompile_assert(sizeof(uint32_t) <= sizeof(hsize_t));
|
||||
|
||||
if(0 == size1[u] || 0 == size2[u])
|
||||
HGOTO_DONE(TRUE)
|
||||
@ -1264,16 +1263,14 @@ done:
|
||||
* Programmer: Quincey Koziol
|
||||
* Monday, April 21, 2003
|
||||
*
|
||||
* Modifications:
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
herr_t
|
||||
H5VM_chunk_index(unsigned ndims, const hsize_t *coord, const uint32_t *chunk,
|
||||
const hsize_t *down_nchunks, hsize_t *chunk_idx)
|
||||
{
|
||||
hsize_t scaled_coord[H5VM_HYPER_NDIMS]; /* Scaled, coordinates, in terms of chunks */
|
||||
unsigned u; /* Local index variable */
|
||||
hsize_t scaled_coord[H5VM_HYPER_NDIMS]; /* Scaled, coordinates, in terms of chunks */
|
||||
unsigned u; /* Local index variable */
|
||||
|
||||
FUNC_ENTER_NOAPI_NOINIT_NOERR
|
||||
|
||||
@ -1284,11 +1281,11 @@ H5VM_chunk_index(unsigned ndims, const hsize_t *coord, const uint32_t *chunk,
|
||||
HDassert(chunk_idx);
|
||||
|
||||
/* Compute the scaled coordinates for actual coordinates */
|
||||
for(u=0; u<ndims; u++)
|
||||
scaled_coord[u]=coord[u]/chunk[u];
|
||||
for(u = 0; u < ndims; u++)
|
||||
scaled_coord[u] = coord[u] / chunk[u];
|
||||
|
||||
/* Compute the chunk index */
|
||||
*chunk_idx=H5VM_array_offset_pre(ndims,down_nchunks,scaled_coord); /*lint !e772 scaled_coord will always be initialized */
|
||||
*chunk_idx = H5VM_array_offset_pre(ndims,down_nchunks,scaled_coord); /*lint !e772 scaled_coord will always be initialized */
|
||||
|
||||
FUNC_LEAVE_NOAPI(SUCCEED)
|
||||
} /* end H5VM_chunk_index() */
|
||||
|
14
test/dsets.c
14
test/dsets.c
@ -23,19 +23,19 @@
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
/*
|
||||
* This file needs to access private information from the H5Z package.
|
||||
*/
|
||||
#define H5Z_PACKAGE
|
||||
|
||||
|
||||
#include "h5test.h"
|
||||
#include "H5srcdir.h"
|
||||
#include "H5Zpkg.h"
|
||||
#ifdef H5_HAVE_SZLIB_H
|
||||
# include "szlib.h"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This file needs to access private datatypes from the H5Z package.
|
||||
*/
|
||||
#define H5Z_PACKAGE
|
||||
#include "H5Zpkg.h"
|
||||
|
||||
|
||||
const char *FILENAME[] = {
|
||||
"dataset",
|
||||
"compact_dataset",
|
||||
|
Loading…
x
Reference in New Issue
Block a user