mirror of
https://github.com/HDFGroup/hdf5.git
synced 2024-12-27 08:01:04 +08:00
[svn-r16973] Description:
Refactor chunk cache entry information to remove some [actually] unused fields. Tested on: FreeBSD/32 6.3 (duty) in debug mode FreeBSD/64 6.3 (liberty) w/C++ & FORTRAN, in debug mode Linux/32 2.6 (jam) w/PGI compilers, w/C++ & FORTRAN, w/threadsafe, in debug mode Linux/64-amd64 2.6 (smirom) w/Intel compilers w/default API=1.6.x, w/C++ & FORTRAN, in production mode Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN, w/szip filter, in production mode Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN, in production mode Linux/64-ia64 2.4 (tg-login3) w/parallel, w/FORTRAN, in debug mode Linux/64-amd64 2.6 (abe) w/parallel, w/FORTRAN, in production mode Mac OS X/32 10.5.6 (amazon) in debug mode Mac OS X/32 10.5.6 (amazon) w/C++ & FORTRAN, w/threadsafe, in production mode
This commit is contained in:
parent
d2ef2af9b2
commit
100ff923d9
@ -2159,12 +2159,12 @@ H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *
|
||||
udata.common.mesg = &dset->shared->layout;
|
||||
udata.common.offset = ent->offset;
|
||||
udata.filter_mask = 0;
|
||||
udata.nbytes = ent->chunk_size;
|
||||
udata.nbytes = dset->shared->layout.u.chunk.size;
|
||||
udata.addr = ent->chunk_addr;
|
||||
|
||||
/* Should the chunk be filtered before writing it to disk? */
|
||||
if(dset->shared->dcpl_cache.pline.nused) {
|
||||
size_t alloc = ent->alloc_size; /* Bytes allocated for BUF */
|
||||
size_t alloc = udata.nbytes; /* Bytes allocated for BUF */
|
||||
size_t nbytes; /* Chunk size (in bytes) */
|
||||
|
||||
if(!reset) {
|
||||
@ -2173,10 +2173,10 @@ H5D_chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *
|
||||
* the pipeline because we'll want to save the original buffer
|
||||
* for later.
|
||||
*/
|
||||
H5_ASSIGN_OVERFLOW(alloc, ent->chunk_size, uint32_t, size_t);
|
||||
H5_ASSIGN_OVERFLOW(alloc, udata.nbytes, uint32_t, size_t);
|
||||
if(NULL == (buf = H5MM_malloc(alloc)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for pipeline")
|
||||
HDmemcpy(buf, ent->chunk, ent->chunk_size);
|
||||
HDmemcpy(buf, ent->chunk, udata.nbytes);
|
||||
} /* end if */
|
||||
else {
|
||||
/*
|
||||
@ -2325,7 +2325,7 @@ H5D_chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t *
|
||||
/* Remove from cache */
|
||||
rdcc->slot[ent->idx] = NULL;
|
||||
ent->idx = UINT_MAX;
|
||||
rdcc->nbytes_used -= ent->chunk_size;
|
||||
rdcc->nbytes_used -= dset->shared->layout.u.chunk.size;
|
||||
--rdcc->nused;
|
||||
|
||||
/* Free */
|
||||
@ -2395,8 +2395,8 @@ H5D_chunk_cache_prune(const H5D_t *dset, hid_t dxpl_id,
|
||||
for(i = 0; i < nmeth && (rdcc->nbytes_used + size) > total; i++) {
|
||||
if(0 == i && p[0] && !p[0]->locked &&
|
||||
((0 == p[0]->rd_count && 0 == p[0]->wr_count) ||
|
||||
(0 == p[0]->rd_count && p[0]->chunk_size == p[0]->wr_count) ||
|
||||
(p[0]->chunk_size == p[0]->rd_count && 0 == p[0]->wr_count))) {
|
||||
(0 == p[0]->rd_count && dset->shared->layout.u.chunk.size == p[0]->wr_count) ||
|
||||
(dset->shared->layout.u.chunk.size == p[0]->rd_count && 0 == p[0]->wr_count))) {
|
||||
/*
|
||||
* Method 0: Preempt entries that have been completely written
|
||||
* and/or completely read but not entries that are partially
|
||||
@ -2630,8 +2630,6 @@ H5D_chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
|
||||
ent->locked = 0;
|
||||
ent->dirty = FALSE;
|
||||
ent->chunk_addr = chunk_addr;
|
||||
H5_ASSIGN_OVERFLOW(ent->chunk_size, chunk_size, size_t, uint32_t);
|
||||
ent->alloc_size = chunk_size;
|
||||
for(u = 0; u < layout->u.chunk.ndims; u++)
|
||||
ent->offset[u] = io_info->store->chunk.offset[u];
|
||||
H5_ASSIGN_OVERFLOW(ent->rd_count, chunk_size, size_t, uint32_t);
|
||||
@ -2766,8 +2764,6 @@ H5D_chunk_unlock(const H5D_io_info_t *io_info, const H5D_chunk_ud_t *udata,
|
||||
HDmemcpy(x.offset, io_info->store->chunk.offset, layout->u.chunk.ndims * sizeof(x.offset[0]));
|
||||
HDassert(layout->u.chunk.size > 0);
|
||||
x.chunk_addr = udata->addr;
|
||||
x.chunk_size = layout->u.chunk.size;
|
||||
H5_ASSIGN_OVERFLOW(x.alloc_size, x.chunk_size, uint32_t, size_t);
|
||||
x.chunk = (uint8_t *)chunk;
|
||||
|
||||
if(H5D_chunk_flush_entry(io_info->dset, io_info->dxpl_id, io_info->dxpl_cache, &x, TRUE) < 0)
|
||||
@ -3653,7 +3649,7 @@ H5D_chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id, const hsize_t *old_dims)
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, H5_ITER_ERROR, "memory allocation failed for stack node")
|
||||
|
||||
/* Set up chunk record for fill routine */
|
||||
tmp_stack->rec.nbytes = ent->chunk_size;
|
||||
tmp_stack->rec.nbytes = dset->shared->layout.u.chunk.size;
|
||||
HDmemcpy(tmp_stack->rec.offset, ent->offset, sizeof(tmp_stack->rec.offset));
|
||||
tmp_stack->rec.filter_mask = 0; /* Since the chunk is already in cache this doesn't matter */
|
||||
tmp_stack->rec.chunk_addr = ent->chunk_addr;
|
||||
|
@ -486,8 +486,6 @@ typedef struct H5D_rdcc_ent_t {
|
||||
uint32_t rd_count; /*bytes remaining to be read */
|
||||
uint32_t wr_count; /*bytes remaining to be written */
|
||||
haddr_t chunk_addr; /*address of chunk in file */
|
||||
uint32_t chunk_size; /*size of a chunk */
|
||||
size_t alloc_size; /*amount allocated for the chunk */
|
||||
uint8_t *chunk; /*the unfiltered chunk data */
|
||||
unsigned idx; /*index in hash table */
|
||||
struct H5D_rdcc_ent_t *next;/*next item in doubly-linked list */
|
||||
|
Loading…
Reference in New Issue
Block a user