mirror of
https://github.com/HDFGroup/hdf5.git
synced 2024-11-27 02:10:55 +08:00
[svn-r8303] Purpose:
Code optimization Description: Compute the size of a chunk once, when the layout information is set, instead of each time I/O is performed on the chunk. Platforms tested: h5committest Solaris 2.7 (arabica)
This commit is contained in:
parent
43d3a9bfe8
commit
394dace97b
12
src/H5D.c
12
src/H5D.c
@ -1845,7 +1845,6 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
|
||||
const H5T_t *type; /* Datatype for dataset */
|
||||
H5D_t *new_dset = NULL;
|
||||
int i, ndims;
|
||||
hsize_t comp_data_size;
|
||||
unsigned u;
|
||||
hsize_t max_dim[H5O_LAYOUT_NDIMS]={0};
|
||||
H5F_t *file=NULL;
|
||||
@ -2041,11 +2040,16 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
|
||||
/* Set the dataset's chunk sizes from the property list's chunk sizes */
|
||||
for (u=0; u<new_dset->layout.ndims-1; u++)
|
||||
new_dset->layout.dim[u] = chunk_size[u];
|
||||
|
||||
/* Compute the total size of a chunk */
|
||||
for (u=0, new_dset->layout.chunk_size=1; u<new_dset->layout.ndims; u++)
|
||||
new_dset->layout.chunk_size *= new_dset->layout.dim[u];
|
||||
break;
|
||||
|
||||
case H5D_COMPACT:
|
||||
{
|
||||
hssize_t tmp_size; /* Temporary holder for raw data size */
|
||||
hsize_t comp_data_size;
|
||||
|
||||
/*
|
||||
* Compact dataset is stored in dataset object header message of
|
||||
@ -2060,10 +2064,8 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
|
||||
comp_data_size=H5O_MAX_SIZE-H5O_layout_meta_size(file, &(new_dset->layout));
|
||||
if(new_dset->layout.size > comp_data_size)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "compact dataset size is bigger than header message maximum size")
|
||||
if ((ndims=H5S_get_simple_extent_dims(space, new_dset->layout.dim, max_dim))<0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize dimension size of compact dataset storage")
|
||||
/* remember to check if size is small enough to fit header message */
|
||||
|
||||
if (H5S_get_simple_extent_dims(space, new_dset->layout.dim, NULL)<0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize dimension size of compact dataset storage");
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -212,8 +212,8 @@ H5B_class_t H5B_ISTORE[1] = {{
|
||||
/* Declare a free list to manage H5F_rdcc_ent_t objects */
|
||||
H5FL_DEFINE_STATIC(H5F_rdcc_ent_t);
|
||||
|
||||
/* Declare a PQ free list to manage the H5F_rdcc_ent_ptr_t array information */
|
||||
H5FL_ARR_DEFINE_STATIC(H5F_rdcc_ent_ptr_t,-1);
|
||||
/* Declare a free list to manage the H5F_rdcc_ent_ptr_t sequence information */
|
||||
H5FL_SEQ_DEFINE_STATIC(H5F_rdcc_ent_ptr_t);
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
@ -882,7 +882,7 @@ H5F_istore_init (H5F_t *f)
|
||||
HDmemset (rdcc, 0, sizeof(H5F_rdcc_t));
|
||||
if (f->shared->rdcc_nbytes>0 && f->shared->rdcc_nelmts>0) {
|
||||
rdcc->nslots = f->shared->rdcc_nelmts;
|
||||
rdcc->slot = H5FL_ARR_CALLOC (H5F_rdcc_ent_ptr_t,rdcc->nslots);
|
||||
rdcc->slot = H5FL_SEQ_CALLOC (H5F_rdcc_ent_ptr_t,rdcc->nslots);
|
||||
if (NULL==rdcc->slot)
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed");
|
||||
}
|
||||
@ -1183,7 +1183,7 @@ H5F_istore_dest (H5F_t *f, hid_t dxpl_id)
|
||||
if (nerrors)
|
||||
HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks");
|
||||
|
||||
H5FL_ARR_FREE (H5F_rdcc_ent_ptr_t,rdcc->slot);
|
||||
H5FL_SEQ_FREE (H5F_rdcc_ent_ptr_t,rdcc->slot);
|
||||
HDmemset (rdcc, 0, sizeof(H5F_rdcc_t));
|
||||
|
||||
done:
|
||||
@ -1357,7 +1357,6 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
|
||||
unsigned u; /*counters */
|
||||
H5F_istore_ud1_t udata; /*B-tree pass-through */
|
||||
size_t chunk_size=0; /*size of a chunk */
|
||||
hsize_t tempchunk_size;
|
||||
herr_t status; /*func return status */
|
||||
void *chunk=NULL; /*the file chunk */
|
||||
void *ret_value; /*return value */
|
||||
@ -1405,9 +1404,8 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
|
||||
HDfflush(stderr);
|
||||
#endif
|
||||
rdcc->nhits++;
|
||||
for (u=0, tempchunk_size=1; u<layout->ndims; u++)
|
||||
tempchunk_size *= layout->dim[u];
|
||||
H5_ASSIGN_OVERFLOW(chunk_size,tempchunk_size,hsize_t,size_t);
|
||||
assert(layout->chunk_size>0);
|
||||
H5_ASSIGN_OVERFLOW(chunk_size,layout->chunk_size,hsize_t,size_t);
|
||||
if (NULL==(chunk=H5MM_malloc (chunk_size)))
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk");
|
||||
|
||||
@ -1417,11 +1415,10 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
|
||||
* Not in the cache. Read it from the file and count this as a miss
|
||||
* if it's in the file or an init if it isn't.
|
||||
*/
|
||||
for (u=0, tempchunk_size=1; u<layout->ndims; u++) {
|
||||
for (u=0; u<layout->ndims; u++)
|
||||
udata.key.offset[u] = offset[u];
|
||||
tempchunk_size *= layout->dim[u];
|
||||
}
|
||||
H5_ASSIGN_OVERFLOW(chunk_size,tempchunk_size,hsize_t,size_t);
|
||||
assert(layout->chunk_size>0);
|
||||
H5_ASSIGN_OVERFLOW(chunk_size,layout->chunk_size,hsize_t,size_t);
|
||||
udata.mesg = *layout;
|
||||
udata.addr = HADDR_UNDEF;
|
||||
status = H5B_find (f, dxpl_id, H5B_ISTORE, layout->addr, &udata);
|
||||
@ -1637,17 +1634,15 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
|
||||
*/
|
||||
if (dirty) {
|
||||
H5F_rdcc_ent_t x;
|
||||
hsize_t tempchunk_size;
|
||||
|
||||
HDmemset (&x, 0, sizeof x);
|
||||
x.dirty = TRUE;
|
||||
x.layout = H5O_copy (H5O_LAYOUT_ID, layout, NULL);
|
||||
x.pline = H5O_copy (H5O_PLINE_ID, pline, NULL);
|
||||
for (u=0, tempchunk_size=1; u<layout->ndims; u++) {
|
||||
for (u=0; u<layout->ndims; u++)
|
||||
x.offset[u] = offset[u];
|
||||
tempchunk_size *= layout->dim[u];
|
||||
}
|
||||
H5_ASSIGN_OVERFLOW(x.chunk_size,tempchunk_size,hsize_t,size_t);
|
||||
assert(layout->chunk_size>0);
|
||||
H5_ASSIGN_OVERFLOW(x.chunk_size,layout->chunk_size,hsize_t,size_t);
|
||||
x.alloc_size = x.chunk_size;
|
||||
x.chunk = chunk;
|
||||
|
||||
@ -1699,7 +1694,6 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
|
||||
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
|
||||
void *buf)
|
||||
{
|
||||
hsize_t chunk_size; /* Chunk size, in bytes */
|
||||
haddr_t chunk_addr; /* Chunk address on disk */
|
||||
hssize_t chunk_coords_in_elmts[H5O_LAYOUT_NDIMS];
|
||||
size_t u; /* Local index variables */
|
||||
@ -1719,10 +1713,6 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
|
||||
assert(mem_offset_arr);
|
||||
assert(buf);
|
||||
|
||||
/* Compute chunk size */
|
||||
for (u=0, chunk_size=1; u<layout->ndims; u++)
|
||||
chunk_size *= layout->dim[u];
|
||||
|
||||
#ifndef NDEBUG
|
||||
for (u=0; u<layout->ndims; u++)
|
||||
assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */
|
||||
@ -1739,7 +1729,7 @@ for(u=0; u<layout->ndims; u++)
|
||||
#endif /* QAK */
|
||||
chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords_in_elmts);
|
||||
#ifdef QAK
|
||||
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,chunk_size);
|
||||
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->chunk_size);
|
||||
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
|
||||
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
|
||||
HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]);
|
||||
@ -1752,9 +1742,9 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
|
||||
* for the chunk has been defined, then don't load the chunk into the
|
||||
* cache, just write the data to it directly.
|
||||
*/
|
||||
if (chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 &&
|
||||
if (layout->chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 &&
|
||||
chunk_addr!=HADDR_UNDEF) {
|
||||
if ((ret_value=H5F_contig_readvv(f, chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
|
||||
if ((ret_value=H5F_contig_readvv(f, layout->chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "unable to read raw data to file");
|
||||
} /* end if */
|
||||
else {
|
||||
@ -1811,7 +1801,6 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
|
||||
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
|
||||
const void *buf)
|
||||
{
|
||||
hsize_t chunk_size; /* Chunk size, in bytes */
|
||||
haddr_t chunk_addr; /* Chunk address on disk */
|
||||
hssize_t chunk_coords_in_elmts[H5O_LAYOUT_NDIMS];
|
||||
size_t u; /* Local index variables */
|
||||
@ -1831,10 +1820,6 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
|
||||
assert(mem_offset_arr);
|
||||
assert(buf);
|
||||
|
||||
/* Compute chunk size */
|
||||
for (u=0, chunk_size=1; u<layout->ndims; u++)
|
||||
chunk_size *= layout->dim[u];
|
||||
|
||||
#ifndef NDEBUG
|
||||
for (u=0; u<layout->ndims; u++)
|
||||
assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */
|
||||
@ -1851,7 +1836,7 @@ for(u=0; u<layout->ndims; u++)
|
||||
#endif /* QAK */
|
||||
chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords_in_elmts);
|
||||
#ifdef QAK
|
||||
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,chunk_size);
|
||||
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->chunk_size);
|
||||
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
|
||||
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
|
||||
HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]);
|
||||
@ -1869,14 +1854,14 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
|
||||
* writing to other elements in the same chunk. Do a direct
|
||||
* write-through of only the elements requested.
|
||||
*/
|
||||
if ((chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 && chunk_addr!=HADDR_UNDEF)
|
||||
if ((layout->chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 && chunk_addr!=HADDR_UNDEF)
|
||||
|| (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & f->shared->flags))) {
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
/* Additional sanity check when operating in parallel */
|
||||
if (chunk_addr==HADDR_UNDEF || dcpl_cache->pline.nused>0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk");
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
if ((ret_value=H5F_contig_writevv(f, chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
|
||||
if ((ret_value=H5F_contig_writevv(f, layout->chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
|
||||
} /* end if */
|
||||
else {
|
||||
@ -1889,7 +1874,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
|
||||
* Lock the chunk, copy from application to chunk, then unlock the
|
||||
* chunk.
|
||||
*/
|
||||
if(chunk_max_nseq==1 && chunk_len_arr[0] == chunk_size)
|
||||
if(chunk_max_nseq==1 && chunk_len_arr[0] == layout->chunk_size)
|
||||
relax = TRUE;
|
||||
else
|
||||
relax = FALSE;
|
||||
@ -2177,10 +2162,9 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
|
||||
* Setup indice to go through all chunks. (Future improvement
|
||||
* should allocate only chunks that have no file space assigned yet.
|
||||
*/
|
||||
for (u=0, chunk_size=1; u<layout->ndims; u++) {
|
||||
for (u=0; u<layout->ndims; u++)
|
||||
chunk_offset[u] = 0;
|
||||
chunk_size *= layout->dim[u];
|
||||
} /* end for */
|
||||
chunk_size = layout->chunk_size;
|
||||
|
||||
/* Check the dataset's fill-value status */
|
||||
if (H5P_is_fill_value_defined(&fill, &fill_status) < 0)
|
||||
|
@ -212,8 +212,8 @@ H5B_class_t H5B_ISTORE[1] = {{
|
||||
/* Declare a free list to manage H5F_rdcc_ent_t objects */
|
||||
H5FL_DEFINE_STATIC(H5F_rdcc_ent_t);
|
||||
|
||||
/* Declare a PQ free list to manage the H5F_rdcc_ent_ptr_t array information */
|
||||
H5FL_ARR_DEFINE_STATIC(H5F_rdcc_ent_ptr_t,-1);
|
||||
/* Declare a free list to manage the H5F_rdcc_ent_ptr_t sequence information */
|
||||
H5FL_SEQ_DEFINE_STATIC(H5F_rdcc_ent_ptr_t);
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
@ -882,7 +882,7 @@ H5F_istore_init (H5F_t *f)
|
||||
HDmemset (rdcc, 0, sizeof(H5F_rdcc_t));
|
||||
if (f->shared->rdcc_nbytes>0 && f->shared->rdcc_nelmts>0) {
|
||||
rdcc->nslots = f->shared->rdcc_nelmts;
|
||||
rdcc->slot = H5FL_ARR_CALLOC (H5F_rdcc_ent_ptr_t,rdcc->nslots);
|
||||
rdcc->slot = H5FL_SEQ_CALLOC (H5F_rdcc_ent_ptr_t,rdcc->nslots);
|
||||
if (NULL==rdcc->slot)
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed");
|
||||
}
|
||||
@ -1183,7 +1183,7 @@ H5F_istore_dest (H5F_t *f, hid_t dxpl_id)
|
||||
if (nerrors)
|
||||
HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks");
|
||||
|
||||
H5FL_ARR_FREE (H5F_rdcc_ent_ptr_t,rdcc->slot);
|
||||
H5FL_SEQ_FREE (H5F_rdcc_ent_ptr_t,rdcc->slot);
|
||||
HDmemset (rdcc, 0, sizeof(H5F_rdcc_t));
|
||||
|
||||
done:
|
||||
@ -1357,7 +1357,6 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
|
||||
unsigned u; /*counters */
|
||||
H5F_istore_ud1_t udata; /*B-tree pass-through */
|
||||
size_t chunk_size=0; /*size of a chunk */
|
||||
hsize_t tempchunk_size;
|
||||
herr_t status; /*func return status */
|
||||
void *chunk=NULL; /*the file chunk */
|
||||
void *ret_value; /*return value */
|
||||
@ -1405,9 +1404,8 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
|
||||
HDfflush(stderr);
|
||||
#endif
|
||||
rdcc->nhits++;
|
||||
for (u=0, tempchunk_size=1; u<layout->ndims; u++)
|
||||
tempchunk_size *= layout->dim[u];
|
||||
H5_ASSIGN_OVERFLOW(chunk_size,tempchunk_size,hsize_t,size_t);
|
||||
assert(layout->chunk_size>0);
|
||||
H5_ASSIGN_OVERFLOW(chunk_size,layout->chunk_size,hsize_t,size_t);
|
||||
if (NULL==(chunk=H5MM_malloc (chunk_size)))
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for raw data chunk");
|
||||
|
||||
@ -1417,11 +1415,10 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
|
||||
* Not in the cache. Read it from the file and count this as a miss
|
||||
* if it's in the file or an init if it isn't.
|
||||
*/
|
||||
for (u=0, tempchunk_size=1; u<layout->ndims; u++) {
|
||||
for (u=0; u<layout->ndims; u++)
|
||||
udata.key.offset[u] = offset[u];
|
||||
tempchunk_size *= layout->dim[u];
|
||||
}
|
||||
H5_ASSIGN_OVERFLOW(chunk_size,tempchunk_size,hsize_t,size_t);
|
||||
assert(layout->chunk_size>0);
|
||||
H5_ASSIGN_OVERFLOW(chunk_size,layout->chunk_size,hsize_t,size_t);
|
||||
udata.mesg = *layout;
|
||||
udata.addr = HADDR_UNDEF;
|
||||
status = H5B_find (f, dxpl_id, H5B_ISTORE, layout->addr, &udata);
|
||||
@ -1637,17 +1634,15 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
|
||||
*/
|
||||
if (dirty) {
|
||||
H5F_rdcc_ent_t x;
|
||||
hsize_t tempchunk_size;
|
||||
|
||||
HDmemset (&x, 0, sizeof x);
|
||||
x.dirty = TRUE;
|
||||
x.layout = H5O_copy (H5O_LAYOUT_ID, layout, NULL);
|
||||
x.pline = H5O_copy (H5O_PLINE_ID, pline, NULL);
|
||||
for (u=0, tempchunk_size=1; u<layout->ndims; u++) {
|
||||
for (u=0; u<layout->ndims; u++)
|
||||
x.offset[u] = offset[u];
|
||||
tempchunk_size *= layout->dim[u];
|
||||
}
|
||||
H5_ASSIGN_OVERFLOW(x.chunk_size,tempchunk_size,hsize_t,size_t);
|
||||
assert(layout->chunk_size>0);
|
||||
H5_ASSIGN_OVERFLOW(x.chunk_size,layout->chunk_size,hsize_t,size_t);
|
||||
x.alloc_size = x.chunk_size;
|
||||
x.chunk = chunk;
|
||||
|
||||
@ -1699,7 +1694,6 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
|
||||
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
|
||||
void *buf)
|
||||
{
|
||||
hsize_t chunk_size; /* Chunk size, in bytes */
|
||||
haddr_t chunk_addr; /* Chunk address on disk */
|
||||
hssize_t chunk_coords_in_elmts[H5O_LAYOUT_NDIMS];
|
||||
size_t u; /* Local index variables */
|
||||
@ -1719,10 +1713,6 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
|
||||
assert(mem_offset_arr);
|
||||
assert(buf);
|
||||
|
||||
/* Compute chunk size */
|
||||
for (u=0, chunk_size=1; u<layout->ndims; u++)
|
||||
chunk_size *= layout->dim[u];
|
||||
|
||||
#ifndef NDEBUG
|
||||
for (u=0; u<layout->ndims; u++)
|
||||
assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */
|
||||
@ -1739,7 +1729,7 @@ for(u=0; u<layout->ndims; u++)
|
||||
#endif /* QAK */
|
||||
chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords_in_elmts);
|
||||
#ifdef QAK
|
||||
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,chunk_size);
|
||||
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->chunk_size);
|
||||
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
|
||||
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
|
||||
HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]);
|
||||
@ -1752,9 +1742,9 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
|
||||
* for the chunk has been defined, then don't load the chunk into the
|
||||
* cache, just write the data to it directly.
|
||||
*/
|
||||
if (chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 &&
|
||||
if (layout->chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 &&
|
||||
chunk_addr!=HADDR_UNDEF) {
|
||||
if ((ret_value=H5F_contig_readvv(f, chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
|
||||
if ((ret_value=H5F_contig_readvv(f, layout->chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "unable to read raw data to file");
|
||||
} /* end if */
|
||||
else {
|
||||
@ -1811,7 +1801,6 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
|
||||
size_t mem_max_nseq, size_t *mem_curr_seq, size_t mem_len_arr[], hsize_t mem_offset_arr[],
|
||||
const void *buf)
|
||||
{
|
||||
hsize_t chunk_size; /* Chunk size, in bytes */
|
||||
haddr_t chunk_addr; /* Chunk address on disk */
|
||||
hssize_t chunk_coords_in_elmts[H5O_LAYOUT_NDIMS];
|
||||
size_t u; /* Local index variables */
|
||||
@ -1831,10 +1820,6 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
|
||||
assert(mem_offset_arr);
|
||||
assert(buf);
|
||||
|
||||
/* Compute chunk size */
|
||||
for (u=0, chunk_size=1; u<layout->ndims; u++)
|
||||
chunk_size *= layout->dim[u];
|
||||
|
||||
#ifndef NDEBUG
|
||||
for (u=0; u<layout->ndims; u++)
|
||||
assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */
|
||||
@ -1851,7 +1836,7 @@ for(u=0; u<layout->ndims; u++)
|
||||
#endif /* QAK */
|
||||
chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords_in_elmts);
|
||||
#ifdef QAK
|
||||
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,chunk_size);
|
||||
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->chunk_size);
|
||||
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
|
||||
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
|
||||
HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]);
|
||||
@ -1869,14 +1854,14 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
|
||||
* writing to other elements in the same chunk. Do a direct
|
||||
* write-through of only the elements requested.
|
||||
*/
|
||||
if ((chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 && chunk_addr!=HADDR_UNDEF)
|
||||
if ((layout->chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 && chunk_addr!=HADDR_UNDEF)
|
||||
|| (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & f->shared->flags))) {
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
/* Additional sanity check when operating in parallel */
|
||||
if (chunk_addr==HADDR_UNDEF || dcpl_cache->pline.nused>0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk");
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
if ((ret_value=H5F_contig_writevv(f, chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
|
||||
if ((ret_value=H5F_contig_writevv(f, layout->chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
|
||||
} /* end if */
|
||||
else {
|
||||
@ -1889,7 +1874,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
|
||||
* Lock the chunk, copy from application to chunk, then unlock the
|
||||
* chunk.
|
||||
*/
|
||||
if(chunk_max_nseq==1 && chunk_len_arr[0] == chunk_size)
|
||||
if(chunk_max_nseq==1 && chunk_len_arr[0] == layout->chunk_size)
|
||||
relax = TRUE;
|
||||
else
|
||||
relax = FALSE;
|
||||
@ -2177,10 +2162,9 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
|
||||
* Setup indice to go through all chunks. (Future improvement
|
||||
* should allocate only chunks that have no file space assigned yet.
|
||||
*/
|
||||
for (u=0, chunk_size=1; u<layout->ndims; u++) {
|
||||
for (u=0; u<layout->ndims; u++)
|
||||
chunk_offset[u] = 0;
|
||||
chunk_size *= layout->dim[u];
|
||||
} /* end for */
|
||||
chunk_size = layout->chunk_size;
|
||||
|
||||
/* Check the dataset's fill-value status */
|
||||
if (H5P_is_fill_value_defined(&fill, &fill_status) < 0)
|
||||
|
@ -147,6 +147,11 @@ H5O_layout_decode(H5F_t *f, hid_t UNUSED dxpl_id, const uint8_t *p, H5O_shared_t
|
||||
p += mesg->size;
|
||||
}
|
||||
}
|
||||
else if(mesg->type == H5D_CHUNKED) {
|
||||
/* Compute chunk size */
|
||||
for (u=0, mesg->chunk_size=1; u<mesg->ndims; u++)
|
||||
mesg->chunk_size *= mesg->dim[u];
|
||||
} /* end if */
|
||||
|
||||
/* Set return value */
|
||||
ret_value=mesg;
|
||||
|
@ -129,6 +129,7 @@ typedef struct H5O_layout_t {
|
||||
haddr_t addr; /*file address of data or B-tree */
|
||||
unsigned ndims; /*num dimensions in stored data */
|
||||
hsize_t dim[H5O_LAYOUT_NDIMS]; /*size of data or chunk in bytes */
|
||||
hsize_t chunk_size; /*size of chunk in bytes */
|
||||
hbool_t dirty; /*dirty flag for compact dataset */
|
||||
size_t size; /*size of compact dataset in bytes */
|
||||
void *buf; /*buffer for compact dataset */
|
||||
|
Loading…
Reference in New Issue
Block a user