mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-03-31 17:10:47 +08:00
Fix for HDFFV-7853 H5Ocopy doesn't work with open identifiers
Changes made so that raw data for dataset objects are copied from cached info when possible instead of flushing objects to file and read them back in again.
This commit is contained in:
parent
7a25041caf
commit
980d5b4266
147
src/H5Dchunk.c
147
src/H5Dchunk.c
@ -188,6 +188,10 @@ typedef struct H5D_chunk_it_ud3_t {
|
|||||||
|
|
||||||
/* needed for copy object pointed by refs */
|
/* needed for copy object pointed by refs */
|
||||||
H5O_copy_t *cpy_info; /* Copy options */
|
H5O_copy_t *cpy_info; /* Copy options */
|
||||||
|
|
||||||
|
/* needed for getting raw data from chunk cache */
|
||||||
|
hbool_t chunk_in_cache;
|
||||||
|
uint8_t *chunk; /* the unfiltered chunk data */
|
||||||
} H5D_chunk_it_ud3_t;
|
} H5D_chunk_it_ud3_t;
|
||||||
|
|
||||||
/* Callback info for iteration to dump index */
|
/* Callback info for iteration to dump index */
|
||||||
@ -2280,8 +2284,8 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
|
|||||||
void *chunk = NULL; /* Pointer to locked chunk buffer */
|
void *chunk = NULL; /* Pointer to locked chunk buffer */
|
||||||
htri_t cacheable; /* Whether the chunk is cacheable */
|
htri_t cacheable; /* Whether the chunk is cacheable */
|
||||||
|
|
||||||
/* Set chunk's [scaled] coordinates */
|
/* Set chunk's [scaled] coordinates */
|
||||||
io_info->store->chunk.scaled = chunk_info->scaled;
|
io_info->store->chunk.scaled = chunk_info->scaled;
|
||||||
|
|
||||||
/* Determine if we should use the chunk cache */
|
/* Determine if we should use the chunk cache */
|
||||||
if((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0)
|
if((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0)
|
||||||
@ -2796,7 +2800,7 @@ H5D__chunk_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_chunk_ud_t *uda
|
|||||||
/* Retrieve the information from the cache */
|
/* Retrieve the information from the cache */
|
||||||
udata->chunk_block.offset = last->addr;
|
udata->chunk_block.offset = last->addr;
|
||||||
udata->chunk_block.length = last->nbytes;
|
udata->chunk_block.length = last->nbytes;
|
||||||
udata->chunk_idx = last->chunk_idx;
|
udata->chunk_idx = last->chunk_idx;
|
||||||
udata->filter_mask = last->filter_mask;
|
udata->filter_mask = last->filter_mask;
|
||||||
|
|
||||||
/* Indicate that the data was found */
|
/* Indicate that the data was found */
|
||||||
@ -3065,19 +3069,19 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
|
|||||||
|
|
||||||
buf = ent->chunk;
|
buf = ent->chunk;
|
||||||
if(ent->dirty) {
|
if(ent->dirty) {
|
||||||
H5D_chk_idx_info_t idx_info; /* Chunked index info */
|
H5D_chk_idx_info_t idx_info; /* Chunked index info */
|
||||||
H5D_chunk_ud_t udata; /* pass through B-tree */
|
H5D_chunk_ud_t udata; /* pass through B-tree */
|
||||||
hbool_t must_alloc = FALSE; /* Whether the chunk must be allocated */
|
hbool_t must_alloc = FALSE; /* Whether the chunk must be allocated */
|
||||||
hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
|
hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */
|
||||||
|
|
||||||
/* Set up user data for index callbacks */
|
/* Set up user data for index callbacks */
|
||||||
udata.common.layout = &dset->shared->layout.u.chunk;
|
udata.common.layout = &dset->shared->layout.u.chunk;
|
||||||
udata.common.storage = &dset->shared->layout.storage.u.chunk;
|
udata.common.storage = &dset->shared->layout.storage.u.chunk;
|
||||||
udata.common.scaled = ent->scaled;
|
udata.common.scaled = ent->scaled;
|
||||||
udata.chunk_block.offset = ent->chunk_block.offset;
|
udata.chunk_block.offset = ent->chunk_block.offset;
|
||||||
udata.chunk_block.length = dset->shared->layout.u.chunk.size;
|
udata.chunk_block.length = dset->shared->layout.u.chunk.size;
|
||||||
udata.filter_mask = 0;
|
udata.filter_mask = 0;
|
||||||
udata.chunk_idx = ent->chunk_idx;
|
udata.chunk_idx = ent->chunk_idx;
|
||||||
|
|
||||||
/* Should the chunk be filtered before writing it to disk? */
|
/* Should the chunk be filtered before writing it to disk? */
|
||||||
if(dset->shared->dcpl_cache.pline.nused
|
if(dset->shared->dcpl_cache.pline.nused
|
||||||
@ -3155,8 +3159,8 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
|
|||||||
/* Create the chunk it if it doesn't exist, or reallocate the chunk
|
/* Create the chunk it if it doesn't exist, or reallocate the chunk
|
||||||
* if its size changed.
|
* if its size changed.
|
||||||
*/
|
*/
|
||||||
if(H5D__chunk_file_alloc(&idx_info, &(ent->chunk_block), &udata.chunk_block, &need_insert, ent->scaled) < 0)
|
if(H5D__chunk_file_alloc(&idx_info, &(ent->chunk_block), &udata.chunk_block, &need_insert, ent->scaled) < 0)
|
||||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
|
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level")
|
||||||
|
|
||||||
/* Update the chunk entry's info, in case it was allocated or relocated */
|
/* Update the chunk entry's info, in case it was allocated or relocated */
|
||||||
ent->chunk_block.offset = udata.chunk_block.offset;
|
ent->chunk_block.offset = udata.chunk_block.offset;
|
||||||
@ -3170,7 +3174,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
|
|||||||
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
|
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write raw data to file")
|
||||||
|
|
||||||
/* Insert the chunk record into the index */
|
/* Insert the chunk record into the index */
|
||||||
if(need_insert && dset->shared->layout.storage.u.chunk.ops->insert)
|
if(need_insert && dset->shared->layout.storage.u.chunk.ops->insert)
|
||||||
if((dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, dset) < 0)
|
if((dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, dset) < 0)
|
||||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index")
|
HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index")
|
||||||
|
|
||||||
@ -3644,7 +3648,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
|
|||||||
if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, my_chunk_alloc, io_info->raw_dxpl_id, chunk) < 0)
|
if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, my_chunk_alloc, io_info->raw_dxpl_id, chunk) < 0)
|
||||||
HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
|
HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk")
|
||||||
|
|
||||||
if(old_pline && old_pline->nused) {
|
if(old_pline && old_pline->nused) {
|
||||||
if(H5Z_pipeline(old_pline, H5Z_FLAG_REVERSE,
|
if(H5Z_pipeline(old_pline, H5Z_FLAG_REVERSE,
|
||||||
&(udata->filter_mask),
|
&(udata->filter_mask),
|
||||||
io_info->dxpl_cache->err_detect,
|
io_info->dxpl_cache->err_detect,
|
||||||
@ -3733,14 +3737,14 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
|
|||||||
if(NULL == (ent = H5FL_CALLOC(H5D_rdcc_ent_t)))
|
if(NULL == (ent = H5FL_CALLOC(H5D_rdcc_ent_t)))
|
||||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate raw data chunk entry")
|
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate raw data chunk entry")
|
||||||
|
|
||||||
ent->edge_chunk_state = disable_filters ? H5D_RDCC_DISABLE_FILTERS : 0;
|
ent->edge_chunk_state = disable_filters ? H5D_RDCC_DISABLE_FILTERS : 0;
|
||||||
if(udata->new_unfilt_chunk)
|
if(udata->new_unfilt_chunk)
|
||||||
ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
|
ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS;
|
||||||
|
|
||||||
/* Initialize the new entry */
|
/* Initialize the new entry */
|
||||||
ent->chunk_block.offset = chunk_addr;
|
ent->chunk_block.offset = chunk_addr;
|
||||||
ent->chunk_block.length = chunk_alloc;
|
ent->chunk_block.length = chunk_alloc;
|
||||||
ent->chunk_idx = udata->chunk_idx;
|
ent->chunk_idx = udata->chunk_idx;
|
||||||
HDmemcpy(ent->scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims);
|
HDmemcpy(ent->scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims);
|
||||||
H5_CHECKED_ASSIGN(ent->rd_count, uint32_t, chunk_size, size_t);
|
H5_CHECKED_ASSIGN(ent->rd_count, uint32_t, chunk_size, size_t);
|
||||||
H5_CHECKED_ASSIGN(ent->wr_count, uint32_t, chunk_size, size_t);
|
H5_CHECKED_ASSIGN(ent->wr_count, uint32_t, chunk_size, size_t);
|
||||||
@ -3761,8 +3765,8 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata,
|
|||||||
} /* end if */
|
} /* end if */
|
||||||
else
|
else
|
||||||
rdcc->head = rdcc->tail = ent;
|
rdcc->head = rdcc->tail = ent;
|
||||||
ent->tmp_next = NULL;
|
ent->tmp_next = NULL;
|
||||||
ent->tmp_prev = NULL;
|
ent->tmp_prev = NULL;
|
||||||
|
|
||||||
} /* end if */
|
} /* end if */
|
||||||
else
|
else
|
||||||
@ -5658,11 +5662,22 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
|
|||||||
H5Z_cb_t cb_struct; /* Filter failure callback struct */
|
H5Z_cb_t cb_struct; /* Filter failure callback struct */
|
||||||
int ret_value = H5_ITER_CONT; /* Return value */
|
int ret_value = H5_ITER_CONT; /* Return value */
|
||||||
|
|
||||||
|
|
||||||
FUNC_ENTER_STATIC
|
FUNC_ENTER_STATIC
|
||||||
|
|
||||||
/* Get 'size_t' local value for number of bytes in chunk */
|
/* Get 'size_t' local value for number of bytes in chunk */
|
||||||
H5_CHECKED_ASSIGN(nbytes, size_t, chunk_rec->nbytes, uint32_t);
|
H5_CHECKED_ASSIGN(nbytes, size_t, chunk_rec->nbytes, uint32_t);
|
||||||
|
|
||||||
|
/* Check for filtered chunks */
|
||||||
|
/* Check for an edge chunk that is not filtered */
|
||||||
|
if(pline && pline->nused) {
|
||||||
|
must_filter = TRUE;
|
||||||
|
if( (udata->common.layout->flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) &&
|
||||||
|
(H5D__chunk_is_partial_edge_chunk(udata->dset_ndims, udata->common.layout->dim,
|
||||||
|
chunk_rec->scaled, udata->dset_dims)) )
|
||||||
|
must_filter = FALSE;
|
||||||
|
}
|
||||||
|
|
||||||
/* Check parameter for type conversion */
|
/* Check parameter for type conversion */
|
||||||
if(udata->do_convert) {
|
if(udata->do_convert) {
|
||||||
if(H5T_detect_class(udata->dt_src, H5T_VLEN, FALSE) > 0)
|
if(H5T_detect_class(udata->dt_src, H5T_VLEN, FALSE) > 0)
|
||||||
@ -5673,19 +5688,6 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
|
|||||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, H5_ITER_ERROR, "unable to copy dataset elements")
|
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, H5_ITER_ERROR, "unable to copy dataset elements")
|
||||||
} /* end if */
|
} /* end if */
|
||||||
|
|
||||||
/* Check for filtered chunks */
|
|
||||||
if((is_vlen || fix_ref) && pline && pline->nused) {
|
|
||||||
/* Check if we should disable filters on this chunk */
|
|
||||||
if(udata->common.layout->flags
|
|
||||||
& H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) {
|
|
||||||
/* Check if the chunk is an edge chunk, and disable filters if so */
|
|
||||||
if(!H5D__chunk_is_partial_edge_chunk(udata->dset_ndims, udata->common.layout->dim, chunk_rec->scaled, udata->dset_dims))
|
|
||||||
must_filter = TRUE;
|
|
||||||
} /* end if */
|
|
||||||
else
|
|
||||||
must_filter = TRUE;
|
|
||||||
} /* end if */
|
|
||||||
|
|
||||||
/* Resize the buf if it is too small to hold the data */
|
/* Resize the buf if it is too small to hold the data */
|
||||||
if(nbytes > buf_size) {
|
if(nbytes > buf_size) {
|
||||||
void *new_buf; /* New buffer for data */
|
void *new_buf; /* New buffer for data */
|
||||||
@ -5708,12 +5710,51 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
|
|||||||
udata->buf_size = buf_size = nbytes;
|
udata->buf_size = buf_size = nbytes;
|
||||||
} /* end if */
|
} /* end if */
|
||||||
|
|
||||||
/* read chunk data from the source file */
|
if(udata->chunk_in_cache && udata->chunk) {
|
||||||
if(H5F_block_read(udata->file_src, H5FD_MEM_DRAW, chunk_rec->chunk_addr, nbytes, H5AC_rawdata_dxpl_id, buf) < 0)
|
HDassert(!H5F_addr_defined(chunk_rec->chunk_addr));
|
||||||
HGOTO_ERROR(H5E_IO, H5E_READERROR, H5_ITER_ERROR, "unable to read raw data chunk")
|
HDmemcpy(buf, udata->chunk, nbytes);
|
||||||
|
udata->chunk = NULL;
|
||||||
|
} else {
|
||||||
|
H5D_rdcc_ent_t *ent = NULL; /* Cache entry */
|
||||||
|
unsigned idx; /* Index of chunk in cache, if present */
|
||||||
|
unsigned u; /* Counter */
|
||||||
|
H5D_shared_t *shared_fo = udata->cpy_info->shared_fo;
|
||||||
|
|
||||||
/* Need to uncompress variable-length & reference data elements */
|
/* See if the written chunk is in the chunk cache */
|
||||||
if(must_filter) {
|
if(shared_fo && shared_fo->cache.chunk.nslots > 0) {
|
||||||
|
/* Determine the chunk's location in the hash table */
|
||||||
|
idx = H5D__chunk_hash_val(shared_fo, chunk_rec->scaled);
|
||||||
|
|
||||||
|
/* Get the chunk cache entry for that location */
|
||||||
|
ent = shared_fo->cache.chunk.slot[idx];
|
||||||
|
if(ent) {
|
||||||
|
/* Speculatively set the 'found' flag */
|
||||||
|
udata->chunk_in_cache = TRUE;
|
||||||
|
|
||||||
|
/* Verify that the cache entry is the correct chunk */
|
||||||
|
for(u = 0; u < shared_fo->ndims; u++)
|
||||||
|
if(chunk_rec->scaled[u] != ent->scaled[u]) {
|
||||||
|
udata->chunk_in_cache = FALSE;
|
||||||
|
break;
|
||||||
|
} /* end if */
|
||||||
|
} /* end if */
|
||||||
|
} /* end if */
|
||||||
|
|
||||||
|
if(udata->chunk_in_cache) {
|
||||||
|
HDassert(H5F_addr_defined(chunk_rec->chunk_addr));
|
||||||
|
HDassert(H5F_addr_defined(ent->chunk_block.offset));
|
||||||
|
|
||||||
|
H5_CHECKED_ASSIGN(nbytes, size_t, shared_fo->layout.u.chunk.size, uint32_t);
|
||||||
|
HDmemcpy(buf, ent->chunk, nbytes);
|
||||||
|
} else {
|
||||||
|
/* read chunk data from the source file */
|
||||||
|
if(H5F_block_read(udata->file_src, H5FD_MEM_DRAW, chunk_rec->chunk_addr, nbytes, H5AC_rawdata_dxpl_id, buf) < 0)
|
||||||
|
HGOTO_ERROR(H5E_IO, H5E_READERROR, H5_ITER_ERROR, "unable to read raw data chunk")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Need to uncompress filtered variable-length & reference data elements that are not found in chunk cache */
|
||||||
|
if(must_filter && (is_vlen || fix_ref) && !udata->chunk_in_cache) {
|
||||||
unsigned filter_mask = chunk_rec->filter_mask;
|
unsigned filter_mask = chunk_rec->filter_mask;
|
||||||
|
|
||||||
cb_struct.func = NULL; /* no callback function when failed */
|
cb_struct.func = NULL; /* no callback function when failed */
|
||||||
@ -5777,8 +5818,8 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
|
|||||||
udata_dst.chunk_block.length = chunk_rec->nbytes;
|
udata_dst.chunk_block.length = chunk_rec->nbytes;
|
||||||
udata_dst.filter_mask = chunk_rec->filter_mask;
|
udata_dst.filter_mask = chunk_rec->filter_mask;
|
||||||
|
|
||||||
/* Need to compress variable-length & reference data elements before writing to file */
|
/* Need to compress variable-length or reference data elements or a chunk found in cache before writing to file */
|
||||||
if(must_filter) {
|
if(must_filter && (is_vlen || fix_ref || udata->chunk_in_cache) ) {
|
||||||
if(H5Z_pipeline(pline, 0, &(udata_dst.filter_mask), H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0)
|
if(H5Z_pipeline(pline, 0, &(udata_dst.filter_mask), H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0)
|
||||||
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "output pipeline failed")
|
HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "output pipeline failed")
|
||||||
#if H5_SIZEOF_SIZE_T > 4
|
#if H5_SIZEOF_SIZE_T > 4
|
||||||
@ -5787,12 +5828,14 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata)
|
|||||||
HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, H5_ITER_ERROR, "chunk too large for 32-bit length")
|
HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, H5_ITER_ERROR, "chunk too large for 32-bit length")
|
||||||
#endif /* H5_SIZEOF_SIZE_T > 4 */
|
#endif /* H5_SIZEOF_SIZE_T > 4 */
|
||||||
H5_CHECKED_ASSIGN(udata_dst.chunk_block.length, uint32_t, nbytes, size_t);
|
H5_CHECKED_ASSIGN(udata_dst.chunk_block.length, uint32_t, nbytes, size_t);
|
||||||
udata->buf = buf;
|
udata->buf = buf;
|
||||||
udata->buf_size = buf_size;
|
udata->buf_size = buf_size;
|
||||||
} /* end if */
|
} /* end if */
|
||||||
|
|
||||||
|
udata->chunk_in_cache = FALSE;
|
||||||
|
|
||||||
udata_dst.chunk_idx = H5VM_array_offset_pre(udata_dst.common.layout->ndims - 1,
|
udata_dst.chunk_idx = H5VM_array_offset_pre(udata_dst.common.layout->ndims - 1,
|
||||||
udata_dst.common.layout->max_down_chunks, udata_dst.common.scaled);
|
udata_dst.common.layout->max_down_chunks, udata_dst.common.scaled);
|
||||||
|
|
||||||
/* Allocate chunk in the file */
|
/* Allocate chunk in the file */
|
||||||
if(H5D__chunk_file_alloc(udata->idx_info_dst, NULL, &udata_dst.chunk_block, &need_insert, udata_dst.common.scaled) < 0)
|
if(H5D__chunk_file_alloc(udata->idx_info_dst, NULL, &udata_dst.chunk_block, &need_insert, udata_dst.common.scaled) < 0)
|
||||||
@ -6047,11 +6090,35 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src,
|
|||||||
udata.dset_ndims = (unsigned)sndims;
|
udata.dset_ndims = (unsigned)sndims;
|
||||||
udata.dset_dims = curr_dims;
|
udata.dset_dims = curr_dims;
|
||||||
udata.cpy_info = cpy_info;
|
udata.cpy_info = cpy_info;
|
||||||
|
udata.chunk_in_cache = FALSE;
|
||||||
|
udata.chunk = NULL;
|
||||||
|
|
||||||
/* Iterate over chunks to copy data */
|
/* Iterate over chunks to copy data */
|
||||||
if((storage_src->ops->iterate)(&idx_info_src, H5D__chunk_copy_cb, &udata) < 0)
|
if((storage_src->ops->iterate)(&idx_info_src, H5D__chunk_copy_cb, &udata) < 0)
|
||||||
HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk index to copy data")
|
HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk index to copy data")
|
||||||
|
|
||||||
|
/* Iterate over the chunk cache to copy data for chunks with undefined address */
|
||||||
|
if(udata.cpy_info->shared_fo) {
|
||||||
|
H5D_rdcc_ent_t *ent, *next;
|
||||||
|
H5D_chunk_rec_t chunk_rec;
|
||||||
|
H5D_shared_t *shared_fo = (H5D_shared_t *)udata.cpy_info->shared_fo;
|
||||||
|
|
||||||
|
chunk_rec.nbytes = layout_src->size;
|
||||||
|
chunk_rec.filter_mask = 0;
|
||||||
|
chunk_rec.chunk_addr = HADDR_UNDEF;
|
||||||
|
|
||||||
|
for(ent = shared_fo->cache.chunk.head; ent; ent = next) {
|
||||||
|
if(!H5F_addr_defined(ent->chunk_block.offset)) {
|
||||||
|
HDmemcpy(chunk_rec.scaled, ent->scaled, sizeof(chunk_rec.scaled));
|
||||||
|
udata.chunk = ent->chunk;
|
||||||
|
udata.chunk_in_cache = TRUE;
|
||||||
|
if(H5D__chunk_copy_cb(&chunk_rec, &udata) < 0)
|
||||||
|
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "unable to copy chunk data in cache")
|
||||||
|
}
|
||||||
|
next = ent->next;
|
||||||
|
} /* end for */
|
||||||
|
}
|
||||||
|
|
||||||
/* I/O buffers may have been re-allocated */
|
/* I/O buffers may have been re-allocated */
|
||||||
buf = udata.buf;
|
buf = udata.buf;
|
||||||
bkg = udata.bkg;
|
bkg = udata.bkg;
|
||||||
|
@ -420,7 +420,7 @@ H5D__compact_dest(H5D_t *dset, hid_t H5_ATTR_UNUSED dxpl_id)
|
|||||||
*-------------------------------------------------------------------------
|
*-------------------------------------------------------------------------
|
||||||
*/
|
*/
|
||||||
herr_t
|
herr_t
|
||||||
H5D__compact_copy(H5F_t *f_src, H5O_storage_compact_t *storage_src, H5F_t *f_dst,
|
H5D__compact_copy(H5F_t *f_src, H5O_storage_compact_t *_storage_src, H5F_t *f_dst,
|
||||||
H5O_storage_compact_t *storage_dst, H5T_t *dt_src, H5O_copy_t *cpy_info,
|
H5O_storage_compact_t *storage_dst, H5T_t *dt_src, H5O_copy_t *cpy_info,
|
||||||
hid_t dxpl_id)
|
hid_t dxpl_id)
|
||||||
{
|
{
|
||||||
@ -431,6 +431,8 @@ H5D__compact_copy(H5F_t *f_src, H5O_storage_compact_t *storage_src, H5F_t *f_dst
|
|||||||
void *bkg = NULL; /* Temporary buffer for copying data */
|
void *bkg = NULL; /* Temporary buffer for copying data */
|
||||||
void *reclaim_buf = NULL; /* Buffer for reclaiming data */
|
void *reclaim_buf = NULL; /* Buffer for reclaiming data */
|
||||||
hid_t buf_sid = -1; /* ID for buffer dataspace */
|
hid_t buf_sid = -1; /* ID for buffer dataspace */
|
||||||
|
H5D_shared_t *shared_fo = cpy_info->shared_fo; /* Pointer to the shared struct for dataset object */
|
||||||
|
H5O_storage_compact_t *storage_src = _storage_src; /* Pointer to storage_src */
|
||||||
herr_t ret_value = SUCCEED; /* Return value */
|
herr_t ret_value = SUCCEED; /* Return value */
|
||||||
|
|
||||||
FUNC_ENTER_PACKAGE
|
FUNC_ENTER_PACKAGE
|
||||||
@ -443,6 +445,10 @@ H5D__compact_copy(H5F_t *f_src, H5O_storage_compact_t *storage_src, H5F_t *f_dst
|
|||||||
HDassert(storage_dst->buf);
|
HDassert(storage_dst->buf);
|
||||||
HDassert(dt_src);
|
HDassert(dt_src);
|
||||||
|
|
||||||
|
/* If the dataset is open in the file, point to "layout" in the shared struct */
|
||||||
|
if(shared_fo != NULL)
|
||||||
|
storage_src = &(shared_fo->layout.storage.u.compact);
|
||||||
|
|
||||||
/* Create datatype ID for src datatype, so it gets freed */
|
/* Create datatype ID for src datatype, so it gets freed */
|
||||||
if((tid_src = H5I_register(H5I_DATATYPE, dt_src, FALSE)) < 0)
|
if((tid_src = H5I_register(H5I_DATATYPE, dt_src, FALSE)) < 0)
|
||||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "unable to register source file datatype")
|
HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "unable to register source file datatype")
|
||||||
|
@ -31,17 +31,18 @@
|
|||||||
/***********/
|
/***********/
|
||||||
/* Headers */
|
/* Headers */
|
||||||
/***********/
|
/***********/
|
||||||
#include "H5private.h" /* Generic Functions */
|
#include "H5private.h" /* Generic Functions */
|
||||||
#include "H5Dpkg.h" /* Dataset functions */
|
#include "H5Dpkg.h" /* Dataset functions */
|
||||||
#include "H5Eprivate.h" /* Error handling */
|
#include "H5Eprivate.h" /* Error handling */
|
||||||
#include "H5Fprivate.h" /* Files */
|
#include "H5Fprivate.h" /* Files */
|
||||||
#include "H5FDprivate.h" /* File drivers */
|
#include "H5FDprivate.h" /* File drivers */
|
||||||
#include "H5FLprivate.h" /* Free Lists */
|
#include "H5FLprivate.h" /* Free Lists */
|
||||||
#include "H5Iprivate.h" /* IDs */
|
#include "H5Iprivate.h" /* IDs */
|
||||||
#include "H5MFprivate.h" /* File memory management */
|
#include "H5MFprivate.h" /* File memory management */
|
||||||
#include "H5Oprivate.h" /* Object headers */
|
#include "H5FOprivate.h" /* File objects */
|
||||||
#include "H5Pprivate.h" /* Property lists */
|
#include "H5Oprivate.h" /* Object headers */
|
||||||
#include "H5VMprivate.h" /* Vector and array functions */
|
#include "H5Pprivate.h" /* Property lists */
|
||||||
|
#include "H5VMprivate.h" /* Vector and array functions */
|
||||||
|
|
||||||
|
|
||||||
/****************/
|
/****************/
|
||||||
@ -1362,6 +1363,10 @@ H5D__contig_copy(H5F_t *f_src, const H5O_storage_contig_t *storage_src,
|
|||||||
hsize_t buf_dim[1] = {0}; /* Dimension for buffer */
|
hsize_t buf_dim[1] = {0}; /* Dimension for buffer */
|
||||||
hbool_t is_vlen = FALSE; /* Flag to indicate that VL type conversion should occur */
|
hbool_t is_vlen = FALSE; /* Flag to indicate that VL type conversion should occur */
|
||||||
hbool_t fix_ref = FALSE; /* Flag to indicate that ref values should be fixed */
|
hbool_t fix_ref = FALSE; /* Flag to indicate that ref values should be fixed */
|
||||||
|
H5D_shared_t *shared_fo = cpy_info->shared_fo; /* Pointer to the shared struct for dataset object */
|
||||||
|
hbool_t try_sieve = FALSE; /* Try to get data from the sieve buffer */
|
||||||
|
haddr_t sieve_start = HADDR_UNDEF; /* Start location of sieve buffer */
|
||||||
|
haddr_t sieve_end = HADDR_UNDEF; /* End locations of sieve buffer */
|
||||||
herr_t ret_value = SUCCEED; /* Return value */
|
herr_t ret_value = SUCCEED; /* Return value */
|
||||||
|
|
||||||
FUNC_ENTER_PACKAGE
|
FUNC_ENTER_PACKAGE
|
||||||
@ -1485,6 +1490,16 @@ H5D__contig_copy(H5F_t *f_src, const H5O_storage_contig_t *storage_src,
|
|||||||
/* Loop over copying data */
|
/* Loop over copying data */
|
||||||
addr_src = storage_src->addr;
|
addr_src = storage_src->addr;
|
||||||
addr_dst = storage_dst->addr;
|
addr_dst = storage_dst->addr;
|
||||||
|
|
||||||
|
/* If data sieving is enabled and the dataset is open in the file,
|
||||||
|
set up to copy data out of the sieve buffer if deemed possible later */
|
||||||
|
if(H5F_HAS_FEATURE(f_src, H5FD_FEAT_DATA_SIEVE) &&
|
||||||
|
shared_fo && shared_fo->cache.contig.sieve_buf) {
|
||||||
|
try_sieve = TRUE;
|
||||||
|
sieve_start = shared_fo->cache.contig.sieve_loc;
|
||||||
|
sieve_end = sieve_start + shared_fo->cache.contig.sieve_size;
|
||||||
|
}
|
||||||
|
|
||||||
while(total_src_nbytes > 0) {
|
while(total_src_nbytes > 0) {
|
||||||
/* Check if we should reduce the number of bytes to transfer */
|
/* Check if we should reduce the number of bytes to transfer */
|
||||||
if(total_src_nbytes < src_nbytes) {
|
if(total_src_nbytes < src_nbytes) {
|
||||||
@ -1510,14 +1525,20 @@ H5D__contig_copy(H5F_t *f_src, const H5O_storage_contig_t *storage_src,
|
|||||||
dst_nbytes = mem_nbytes = src_nbytes;
|
dst_nbytes = mem_nbytes = src_nbytes;
|
||||||
} /* end if */
|
} /* end if */
|
||||||
|
|
||||||
/* Read raw data from source file - use raw dxpl because passed in one is metadata */
|
/* If the entire copy is within the sieve buffer, copy data from the sieve buffer */
|
||||||
if(H5F_block_read(f_src, H5FD_MEM_DRAW, addr_src, src_nbytes, H5AC_rawdata_dxpl_id, buf) < 0)
|
if(try_sieve && (addr_src >= sieve_start) && ((addr_src + src_nbytes -1) < sieve_end)) {
|
||||||
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to read raw data")
|
unsigned char *base_sieve_buf = shared_fo->cache.contig.sieve_buf + (addr_src - sieve_start);
|
||||||
|
|
||||||
|
HDmemcpy(buf, base_sieve_buf, src_nbytes);
|
||||||
|
} else
|
||||||
|
/* Read raw data from source file - use raw dxpl because passed in one is metadata */
|
||||||
|
if(H5F_block_read(f_src, H5FD_MEM_DRAW, addr_src, src_nbytes, H5AC_rawdata_dxpl_id, buf) < 0)
|
||||||
|
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to read raw data")
|
||||||
|
|
||||||
/* Perform datatype conversion, if necessary */
|
/* Perform datatype conversion, if necessary */
|
||||||
if(is_vlen) {
|
if(is_vlen) {
|
||||||
/* Convert from source file to memory */
|
/* Convert from source file to memory */
|
||||||
if(H5T_convert(tpath_src_mem, tid_src, tid_mem, nelmts, (size_t)0, (size_t)0, buf, bkg, dxpl_id) < 0)
|
if(H5T_convert(tpath_src_mem, tid_src, tid_mem, nelmts, (size_t)0, (size_t)0, buf, bkg, dxpl_id) < 0)
|
||||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "datatype conversion failed")
|
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "datatype conversion failed")
|
||||||
|
|
||||||
/* Copy into another buffer, to reclaim memory later */
|
/* Copy into another buffer, to reclaim memory later */
|
||||||
@ -1527,13 +1548,13 @@ H5D__contig_copy(H5F_t *f_src, const H5O_storage_contig_t *storage_src,
|
|||||||
HDmemset(bkg, 0, buf_size);
|
HDmemset(bkg, 0, buf_size);
|
||||||
|
|
||||||
/* Convert from memory to destination file */
|
/* Convert from memory to destination file */
|
||||||
if(H5T_convert(tpath_mem_dst, tid_mem, tid_dst, nelmts, (size_t)0, (size_t)0, buf, bkg, dxpl_id) < 0)
|
if(H5T_convert(tpath_mem_dst, tid_mem, tid_dst, nelmts, (size_t)0, (size_t)0, buf, bkg, dxpl_id) < 0)
|
||||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "datatype conversion failed")
|
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "datatype conversion failed")
|
||||||
|
|
||||||
/* Reclaim space from variable length data */
|
/* Reclaim space from variable length data */
|
||||||
if(H5D_vlen_reclaim(tid_mem, buf_space, dxpl_id, reclaim_buf) < 0)
|
if(H5D_vlen_reclaim(tid_mem, buf_space, dxpl_id, reclaim_buf) < 0)
|
||||||
HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to reclaim variable-length data")
|
HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to reclaim variable-length data")
|
||||||
} /* end if */
|
} /* end if */
|
||||||
else if(fix_ref) {
|
else if(fix_ref) {
|
||||||
/* Check for expanding references */
|
/* Check for expanding references */
|
||||||
if(cpy_info->expand_ref) {
|
if(cpy_info->expand_ref) {
|
||||||
|
@ -347,36 +347,8 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/,
|
|||||||
if(NULL == (obj_class = H5O_obj_class(oloc_src, dxpl_id)))
|
if(NULL == (obj_class = H5O_obj_class(oloc_src, dxpl_id)))
|
||||||
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, FAIL, "unable to determine object type")
|
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, FAIL, "unable to determine object type")
|
||||||
|
|
||||||
/* Check if the object at the address is already open in the file */
|
/* Set the pointer to the shared struct for the object if opened in the file */
|
||||||
if(H5FO_opened(oloc_src->file, oloc_src->addr) != NULL) {
|
cpy_info->shared_fo = H5FO_opened(oloc_src->file, oloc_src->addr);
|
||||||
H5G_loc_t tmp_loc; /* Location of object */
|
|
||||||
H5O_loc_t tmp_oloc; /* Location of object */
|
|
||||||
H5G_name_t tmp_path; /* Object's path */
|
|
||||||
void *obj_ptr = NULL; /* Object pointer */
|
|
||||||
hid_t tmp_id = -1; /* Object ID */
|
|
||||||
|
|
||||||
tmp_loc.oloc = &tmp_oloc;
|
|
||||||
tmp_loc.path = &tmp_path;
|
|
||||||
tmp_oloc.file = oloc_src->file;
|
|
||||||
tmp_oloc.addr = oloc_src->addr;
|
|
||||||
tmp_oloc.holding_file = FALSE;
|
|
||||||
H5G_name_reset(tmp_loc.path);
|
|
||||||
|
|
||||||
/* Get a temporary ID */
|
|
||||||
if((tmp_id = obj_class->open(&tmp_loc, H5P_DEFAULT, dxpl_id, FALSE)) < 0)
|
|
||||||
HGOTO_ERROR(H5E_OHDR, H5E_CANTFLUSH, FAIL, "unable to open object")
|
|
||||||
|
|
||||||
/* Get object pointer */
|
|
||||||
obj_ptr = H5I_object(tmp_id);
|
|
||||||
|
|
||||||
/* Flush the object */
|
|
||||||
if(obj_class->flush && obj_class->flush(obj_ptr, dxpl_id) < 0)
|
|
||||||
HGOTO_ERROR(H5E_OHDR, H5E_CANTFLUSH, FAIL, "unable to flush object")
|
|
||||||
|
|
||||||
/* Release the temporary ID */
|
|
||||||
if(tmp_id != -1 && H5I_dec_app_ref(tmp_id))
|
|
||||||
HGOTO_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "unable to close temporary ID")
|
|
||||||
} /* end if */
|
|
||||||
|
|
||||||
/* Get source object header */
|
/* Get source object header */
|
||||||
if(NULL == (oh_src = H5O_protect(oloc_src, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE)))
|
if(NULL == (oh_src = H5O_protect(oloc_src, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE)))
|
||||||
|
@ -174,6 +174,7 @@ typedef struct H5O_copy_t {
|
|||||||
H5SL_t *dst_dt_list; /* Skip list to hold committed datatypes in dest file */
|
H5SL_t *dst_dt_list; /* Skip list to hold committed datatypes in dest file */
|
||||||
hbool_t dst_dt_list_complete; /* Whether the destination datatype list is complete (i.e. not only populated with "suggestions" from H5Padd_merge_committed_dtype_path) */
|
hbool_t dst_dt_list_complete; /* Whether the destination datatype list is complete (i.e. not only populated with "suggestions" from H5Padd_merge_committed_dtype_path) */
|
||||||
H5O_t *oh_dst; /* The destination object header */
|
H5O_t *oh_dst; /* The destination object header */
|
||||||
|
void *shared_fo; /* The shared pointer for the object */
|
||||||
H5O_mcdt_search_cb_t mcdt_cb; /* The callback to invoke before searching the global list of committed datatypes at destination */
|
H5O_mcdt_search_cb_t mcdt_cb; /* The callback to invoke before searching the global list of committed datatypes at destination */
|
||||||
void *mcdt_ud; /* User data passed to callback */
|
void *mcdt_ud; /* User data passed to callback */
|
||||||
} H5O_copy_t;
|
} H5O_copy_t;
|
||||||
|
598
test/objcopy.c
598
test/objcopy.c
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user