From 201af70c24ccdd84e8fba376dab9b2024bf21dc4 Mon Sep 17 00:00:00 2001 From: Vailin Choi Date: Sat, 10 Jun 2017 19:40:19 -0500 Subject: [PATCH] Fix for HDFFV-7853 H5Ocopy doesn't work with open identifiers Changes made so that raw data for dataset objects are copied from cached info when possible instead of flushing objects to file and read them back in again. --- src/H5Dchunk.c | 147 ++++++++---- src/H5Dcompact.c | 8 +- src/H5Dcontig.c | 55 +++-- src/H5Ocopy.c | 32 +-- src/H5Oprivate.h | 1 + test/objcopy.c | 598 +++++++++++++++++++++++++++++++++++++---------- 6 files changed, 633 insertions(+), 208 deletions(-) diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 33fc03658e..d6934662d8 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -188,6 +188,10 @@ typedef struct H5D_chunk_it_ud3_t { /* needed for copy object pointed by refs */ H5O_copy_t *cpy_info; /* Copy options */ + + /* needed for getting raw data from chunk cache */ + hbool_t chunk_in_cache; + uint8_t *chunk; /* the unfiltered chunk data */ } H5D_chunk_it_ud3_t; /* Callback info for iteration to dump index */ @@ -2280,8 +2284,8 @@ H5D__chunk_read(H5D_io_info_t *io_info, const H5D_type_info_t *type_info, void *chunk = NULL; /* Pointer to locked chunk buffer */ htri_t cacheable; /* Whether the chunk is cacheable */ - /* Set chunk's [scaled] coordinates */ - io_info->store->chunk.scaled = chunk_info->scaled; + /* Set chunk's [scaled] coordinates */ + io_info->store->chunk.scaled = chunk_info->scaled; /* Determine if we should use the chunk cache */ if((cacheable = H5D__chunk_cacheable(io_info, udata.chunk_block.offset, FALSE)) < 0) @@ -2796,7 +2800,7 @@ H5D__chunk_cinfo_cache_found(const H5D_chunk_cached_t *last, H5D_chunk_ud_t *uda /* Retrieve the information from the cache */ udata->chunk_block.offset = last->addr; udata->chunk_block.length = last->nbytes; - udata->chunk_idx = last->chunk_idx; + udata->chunk_idx = last->chunk_idx; udata->filter_mask = last->filter_mask; /* Indicate that the data was found */ @@ -3065,19 +3069,19 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t buf = ent->chunk; if(ent->dirty) { - H5D_chk_idx_info_t idx_info; /* Chunked index info */ + H5D_chk_idx_info_t idx_info; /* Chunked index info */ H5D_chunk_ud_t udata; /* pass through B-tree */ hbool_t must_alloc = FALSE; /* Whether the chunk must be allocated */ - hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */ + hbool_t need_insert = FALSE; /* Whether the chunk needs to be inserted into the index */ /* Set up user data for index callbacks */ udata.common.layout = &dset->shared->layout.u.chunk; udata.common.storage = &dset->shared->layout.storage.u.chunk; - udata.common.scaled = ent->scaled; + udata.common.scaled = ent->scaled; udata.chunk_block.offset = ent->chunk_block.offset; udata.chunk_block.length = dset->shared->layout.u.chunk.size; udata.filter_mask = 0; - udata.chunk_idx = ent->chunk_idx; + udata.chunk_idx = ent->chunk_idx; /* Should the chunk be filtered before writing it to disk? */ if(dset->shared->dcpl_cache.pline.nused @@ -3155,8 +3159,8 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t /* Create the chunk it if it doesn't exist, or reallocate the chunk * if its size changed. */ - if(H5D__chunk_file_alloc(&idx_info, &(ent->chunk_block), &udata.chunk_block, &need_insert, ent->scaled) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level") + if(H5D__chunk_file_alloc(&idx_info, &(ent->chunk_block), &udata.chunk_block, &need_insert, ent->scaled) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert/resize chunk on chunk level") /* Update the chunk entry's info, in case it was allocated or relocated */ ent->chunk_block.offset = udata.chunk_block.offset; @@ -3170,7 +3174,7 @@ H5D__chunk_flush_entry(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "unable to write raw data to file") /* Insert the chunk record into the index */ - if(need_insert && dset->shared->layout.storage.u.chunk.ops->insert) + if(need_insert && dset->shared->layout.storage.u.chunk.ops->insert) if((dset->shared->layout.storage.u.chunk.ops->insert)(&idx_info, &udata, dset) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINSERT, FAIL, "unable to insert chunk addr into index") @@ -3644,7 +3648,7 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, chunk_addr, my_chunk_alloc, io_info->raw_dxpl_id, chunk) < 0) HGOTO_ERROR(H5E_IO, H5E_READERROR, NULL, "unable to read raw data chunk") - if(old_pline && old_pline->nused) { + if(old_pline && old_pline->nused) { if(H5Z_pipeline(old_pline, H5Z_FLAG_REVERSE, &(udata->filter_mask), io_info->dxpl_cache->err_detect, @@ -3733,14 +3737,14 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, if(NULL == (ent = H5FL_CALLOC(H5D_rdcc_ent_t))) HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, NULL, "can't allocate raw data chunk entry") - ent->edge_chunk_state = disable_filters ? H5D_RDCC_DISABLE_FILTERS : 0; - if(udata->new_unfilt_chunk) - ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS; + ent->edge_chunk_state = disable_filters ? H5D_RDCC_DISABLE_FILTERS : 0; + if(udata->new_unfilt_chunk) + ent->edge_chunk_state |= H5D_RDCC_NEWLY_DISABLED_FILTERS; /* Initialize the new entry */ ent->chunk_block.offset = chunk_addr; ent->chunk_block.length = chunk_alloc; - ent->chunk_idx = udata->chunk_idx; + ent->chunk_idx = udata->chunk_idx; HDmemcpy(ent->scaled, udata->common.scaled, sizeof(hsize_t) * layout->u.chunk.ndims); H5_CHECKED_ASSIGN(ent->rd_count, uint32_t, chunk_size, size_t); H5_CHECKED_ASSIGN(ent->wr_count, uint32_t, chunk_size, size_t); @@ -3761,8 +3765,8 @@ H5D__chunk_lock(const H5D_io_info_t *io_info, H5D_chunk_ud_t *udata, } /* end if */ else rdcc->head = rdcc->tail = ent; - ent->tmp_next = NULL; - ent->tmp_prev = NULL; + ent->tmp_next = NULL; + ent->tmp_prev = NULL; } /* end if */ else @@ -5658,11 +5662,22 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) H5Z_cb_t cb_struct; /* Filter failure callback struct */ int ret_value = H5_ITER_CONT; /* Return value */ + FUNC_ENTER_STATIC /* Get 'size_t' local value for number of bytes in chunk */ H5_CHECKED_ASSIGN(nbytes, size_t, chunk_rec->nbytes, uint32_t); + /* Check for filtered chunks */ + /* Check for an edge chunk that is not filtered */ + if(pline && pline->nused) { + must_filter = TRUE; + if( (udata->common.layout->flags & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) && + (H5D__chunk_is_partial_edge_chunk(udata->dset_ndims, udata->common.layout->dim, + chunk_rec->scaled, udata->dset_dims)) ) + must_filter = FALSE; + } + /* Check parameter for type conversion */ if(udata->do_convert) { if(H5T_detect_class(udata->dt_src, H5T_VLEN, FALSE) > 0) @@ -5673,19 +5688,6 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, H5_ITER_ERROR, "unable to copy dataset elements") } /* end if */ - /* Check for filtered chunks */ - if((is_vlen || fix_ref) && pline && pline->nused) { - /* Check if we should disable filters on this chunk */ - if(udata->common.layout->flags - & H5O_LAYOUT_CHUNK_DONT_FILTER_PARTIAL_BOUND_CHUNKS) { - /* Check if the chunk is an edge chunk, and disable filters if so */ - if(!H5D__chunk_is_partial_edge_chunk(udata->dset_ndims, udata->common.layout->dim, chunk_rec->scaled, udata->dset_dims)) - must_filter = TRUE; - } /* end if */ - else - must_filter = TRUE; - } /* end if */ - /* Resize the buf if it is too small to hold the data */ if(nbytes > buf_size) { void *new_buf; /* New buffer for data */ @@ -5708,12 +5710,51 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) udata->buf_size = buf_size = nbytes; } /* end if */ - /* read chunk data from the source file */ - if(H5F_block_read(udata->file_src, H5FD_MEM_DRAW, chunk_rec->chunk_addr, nbytes, H5AC_rawdata_dxpl_id, buf) < 0) - HGOTO_ERROR(H5E_IO, H5E_READERROR, H5_ITER_ERROR, "unable to read raw data chunk") + if(udata->chunk_in_cache && udata->chunk) { + HDassert(!H5F_addr_defined(chunk_rec->chunk_addr)); + HDmemcpy(buf, udata->chunk, nbytes); + udata->chunk = NULL; + } else { + H5D_rdcc_ent_t *ent = NULL; /* Cache entry */ + unsigned idx; /* Index of chunk in cache, if present */ + unsigned u; /* Counter */ + H5D_shared_t *shared_fo = udata->cpy_info->shared_fo; - /* Need to uncompress variable-length & reference data elements */ - if(must_filter) { + /* See if the written chunk is in the chunk cache */ + if(shared_fo && shared_fo->cache.chunk.nslots > 0) { + /* Determine the chunk's location in the hash table */ + idx = H5D__chunk_hash_val(shared_fo, chunk_rec->scaled); + + /* Get the chunk cache entry for that location */ + ent = shared_fo->cache.chunk.slot[idx]; + if(ent) { + /* Speculatively set the 'found' flag */ + udata->chunk_in_cache = TRUE; + + /* Verify that the cache entry is the correct chunk */ + for(u = 0; u < shared_fo->ndims; u++) + if(chunk_rec->scaled[u] != ent->scaled[u]) { + udata->chunk_in_cache = FALSE; + break; + } /* end if */ + } /* end if */ + } /* end if */ + + if(udata->chunk_in_cache) { + HDassert(H5F_addr_defined(chunk_rec->chunk_addr)); + HDassert(H5F_addr_defined(ent->chunk_block.offset)); + + H5_CHECKED_ASSIGN(nbytes, size_t, shared_fo->layout.u.chunk.size, uint32_t); + HDmemcpy(buf, ent->chunk, nbytes); + } else { + /* read chunk data from the source file */ + if(H5F_block_read(udata->file_src, H5FD_MEM_DRAW, chunk_rec->chunk_addr, nbytes, H5AC_rawdata_dxpl_id, buf) < 0) + HGOTO_ERROR(H5E_IO, H5E_READERROR, H5_ITER_ERROR, "unable to read raw data chunk") + } + } + + /* Need to uncompress filtered variable-length & reference data elements that are not found in chunk cache */ + if(must_filter && (is_vlen || fix_ref) && !udata->chunk_in_cache) { unsigned filter_mask = chunk_rec->filter_mask; cb_struct.func = NULL; /* no callback function when failed */ @@ -5777,8 +5818,8 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) udata_dst.chunk_block.length = chunk_rec->nbytes; udata_dst.filter_mask = chunk_rec->filter_mask; - /* Need to compress variable-length & reference data elements before writing to file */ - if(must_filter) { + /* Need to compress variable-length or reference data elements or a chunk found in cache before writing to file */ + if(must_filter && (is_vlen || fix_ref || udata->chunk_in_cache) ) { if(H5Z_pipeline(pline, 0, &(udata_dst.filter_mask), H5Z_NO_EDC, cb_struct, &nbytes, &buf_size, &buf) < 0) HGOTO_ERROR(H5E_PLINE, H5E_CANTFILTER, H5_ITER_ERROR, "output pipeline failed") #if H5_SIZEOF_SIZE_T > 4 @@ -5787,12 +5828,14 @@ H5D__chunk_copy_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata) HGOTO_ERROR(H5E_DATASET, H5E_BADRANGE, H5_ITER_ERROR, "chunk too large for 32-bit length") #endif /* H5_SIZEOF_SIZE_T > 4 */ H5_CHECKED_ASSIGN(udata_dst.chunk_block.length, uint32_t, nbytes, size_t); - udata->buf = buf; - udata->buf_size = buf_size; + udata->buf = buf; + udata->buf_size = buf_size; } /* end if */ + udata->chunk_in_cache = FALSE; + udata_dst.chunk_idx = H5VM_array_offset_pre(udata_dst.common.layout->ndims - 1, - udata_dst.common.layout->max_down_chunks, udata_dst.common.scaled); + udata_dst.common.layout->max_down_chunks, udata_dst.common.scaled); /* Allocate chunk in the file */ if(H5D__chunk_file_alloc(udata->idx_info_dst, NULL, &udata_dst.chunk_block, &need_insert, udata_dst.common.scaled) < 0) @@ -6047,11 +6090,35 @@ H5D__chunk_copy(H5F_t *f_src, H5O_storage_chunk_t *storage_src, udata.dset_ndims = (unsigned)sndims; udata.dset_dims = curr_dims; udata.cpy_info = cpy_info; + udata.chunk_in_cache = FALSE; + udata.chunk = NULL; /* Iterate over chunks to copy data */ if((storage_src->ops->iterate)(&idx_info_src, H5D__chunk_copy_cb, &udata) < 0) HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to iterate over chunk index to copy data") + /* Iterate over the chunk cache to copy data for chunks with undefined address */ + if(udata.cpy_info->shared_fo) { + H5D_rdcc_ent_t *ent, *next; + H5D_chunk_rec_t chunk_rec; + H5D_shared_t *shared_fo = (H5D_shared_t *)udata.cpy_info->shared_fo; + + chunk_rec.nbytes = layout_src->size; + chunk_rec.filter_mask = 0; + chunk_rec.chunk_addr = HADDR_UNDEF; + + for(ent = shared_fo->cache.chunk.head; ent; ent = next) { + if(!H5F_addr_defined(ent->chunk_block.offset)) { + HDmemcpy(chunk_rec.scaled, ent->scaled, sizeof(chunk_rec.scaled)); + udata.chunk = ent->chunk; + udata.chunk_in_cache = TRUE; + if(H5D__chunk_copy_cb(&chunk_rec, &udata) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "unable to copy chunk data in cache") + } + next = ent->next; + } /* end for */ + } + /* I/O buffers may have been re-allocated */ buf = udata.buf; bkg = udata.bkg; diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c index 99a25b646d..041d28ffb3 100644 --- a/src/H5Dcompact.c +++ b/src/H5Dcompact.c @@ -420,7 +420,7 @@ H5D__compact_dest(H5D_t *dset, hid_t H5_ATTR_UNUSED dxpl_id) *------------------------------------------------------------------------- */ herr_t -H5D__compact_copy(H5F_t *f_src, H5O_storage_compact_t *storage_src, H5F_t *f_dst, +H5D__compact_copy(H5F_t *f_src, H5O_storage_compact_t *_storage_src, H5F_t *f_dst, H5O_storage_compact_t *storage_dst, H5T_t *dt_src, H5O_copy_t *cpy_info, hid_t dxpl_id) { @@ -431,6 +431,8 @@ H5D__compact_copy(H5F_t *f_src, H5O_storage_compact_t *storage_src, H5F_t *f_dst void *bkg = NULL; /* Temporary buffer for copying data */ void *reclaim_buf = NULL; /* Buffer for reclaiming data */ hid_t buf_sid = -1; /* ID for buffer dataspace */ + H5D_shared_t *shared_fo = cpy_info->shared_fo; /* Pointer to the shared struct for dataset object */ + H5O_storage_compact_t *storage_src = _storage_src; /* Pointer to storage_src */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -443,6 +445,10 @@ H5D__compact_copy(H5F_t *f_src, H5O_storage_compact_t *storage_src, H5F_t *f_dst HDassert(storage_dst->buf); HDassert(dt_src); + /* If the dataset is open in the file, point to "layout" in the shared struct */ + if(shared_fo != NULL) + storage_src = &(shared_fo->layout.storage.u.compact); + /* Create datatype ID for src datatype, so it gets freed */ if((tid_src = H5I_register(H5I_DATATYPE, dt_src, FALSE)) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "unable to register source file datatype") diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c index 0ee4d28b2c..86de01768c 100644 --- a/src/H5Dcontig.c +++ b/src/H5Dcontig.c @@ -31,17 +31,18 @@ /***********/ /* Headers */ /***********/ -#include "H5private.h" /* Generic Functions */ -#include "H5Dpkg.h" /* Dataset functions */ -#include "H5Eprivate.h" /* Error handling */ -#include "H5Fprivate.h" /* Files */ -#include "H5FDprivate.h" /* File drivers */ -#include "H5FLprivate.h" /* Free Lists */ -#include "H5Iprivate.h" /* IDs */ -#include "H5MFprivate.h" /* File memory management */ -#include "H5Oprivate.h" /* Object headers */ -#include "H5Pprivate.h" /* Property lists */ -#include "H5VMprivate.h" /* Vector and array functions */ +#include "H5private.h" /* Generic Functions */ +#include "H5Dpkg.h" /* Dataset functions */ +#include "H5Eprivate.h" /* Error handling */ +#include "H5Fprivate.h" /* Files */ +#include "H5FDprivate.h" /* File drivers */ +#include "H5FLprivate.h" /* Free Lists */ +#include "H5Iprivate.h" /* IDs */ +#include "H5MFprivate.h" /* File memory management */ +#include "H5FOprivate.h" /* File objects */ +#include "H5Oprivate.h" /* Object headers */ +#include "H5Pprivate.h" /* Property lists */ +#include "H5VMprivate.h" /* Vector and array functions */ /****************/ @@ -1362,6 +1363,10 @@ H5D__contig_copy(H5F_t *f_src, const H5O_storage_contig_t *storage_src, hsize_t buf_dim[1] = {0}; /* Dimension for buffer */ hbool_t is_vlen = FALSE; /* Flag to indicate that VL type conversion should occur */ hbool_t fix_ref = FALSE; /* Flag to indicate that ref values should be fixed */ + H5D_shared_t *shared_fo = cpy_info->shared_fo; /* Pointer to the shared struct for dataset object */ + hbool_t try_sieve = FALSE; /* Try to get data from the sieve buffer */ + haddr_t sieve_start = HADDR_UNDEF; /* Start location of sieve buffer */ + haddr_t sieve_end = HADDR_UNDEF; /* End locations of sieve buffer */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE @@ -1485,6 +1490,16 @@ H5D__contig_copy(H5F_t *f_src, const H5O_storage_contig_t *storage_src, /* Loop over copying data */ addr_src = storage_src->addr; addr_dst = storage_dst->addr; + + /* If data sieving is enabled and the dataset is open in the file, + set up to copy data out of the sieve buffer if deemed possible later */ + if(H5F_HAS_FEATURE(f_src, H5FD_FEAT_DATA_SIEVE) && + shared_fo && shared_fo->cache.contig.sieve_buf) { + try_sieve = TRUE; + sieve_start = shared_fo->cache.contig.sieve_loc; + sieve_end = sieve_start + shared_fo->cache.contig.sieve_size; + } + while(total_src_nbytes > 0) { /* Check if we should reduce the number of bytes to transfer */ if(total_src_nbytes < src_nbytes) { @@ -1510,14 +1525,20 @@ H5D__contig_copy(H5F_t *f_src, const H5O_storage_contig_t *storage_src, dst_nbytes = mem_nbytes = src_nbytes; } /* end if */ - /* Read raw data from source file - use raw dxpl because passed in one is metadata */ - if(H5F_block_read(f_src, H5FD_MEM_DRAW, addr_src, src_nbytes, H5AC_rawdata_dxpl_id, buf) < 0) - HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to read raw data") + /* If the entire copy is within the sieve buffer, copy data from the sieve buffer */ + if(try_sieve && (addr_src >= sieve_start) && ((addr_src + src_nbytes -1) < sieve_end)) { + unsigned char *base_sieve_buf = shared_fo->cache.contig.sieve_buf + (addr_src - sieve_start); + + HDmemcpy(buf, base_sieve_buf, src_nbytes); + } else + /* Read raw data from source file - use raw dxpl because passed in one is metadata */ + if(H5F_block_read(f_src, H5FD_MEM_DRAW, addr_src, src_nbytes, H5AC_rawdata_dxpl_id, buf) < 0) + HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "unable to read raw data") /* Perform datatype conversion, if necessary */ if(is_vlen) { /* Convert from source file to memory */ - if(H5T_convert(tpath_src_mem, tid_src, tid_mem, nelmts, (size_t)0, (size_t)0, buf, bkg, dxpl_id) < 0) + if(H5T_convert(tpath_src_mem, tid_src, tid_mem, nelmts, (size_t)0, (size_t)0, buf, bkg, dxpl_id) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "datatype conversion failed") /* Copy into another buffer, to reclaim memory later */ @@ -1527,13 +1548,13 @@ H5D__contig_copy(H5F_t *f_src, const H5O_storage_contig_t *storage_src, HDmemset(bkg, 0, buf_size); /* Convert from memory to destination file */ - if(H5T_convert(tpath_mem_dst, tid_mem, tid_dst, nelmts, (size_t)0, (size_t)0, buf, bkg, dxpl_id) < 0) + if(H5T_convert(tpath_mem_dst, tid_mem, tid_dst, nelmts, (size_t)0, (size_t)0, buf, bkg, dxpl_id) < 0) HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "datatype conversion failed") /* Reclaim space from variable length data */ if(H5D_vlen_reclaim(tid_mem, buf_space, dxpl_id, reclaim_buf) < 0) HGOTO_ERROR(H5E_DATASET, H5E_BADITER, FAIL, "unable to reclaim variable-length data") - } /* end if */ + } /* end if */ else if(fix_ref) { /* Check for expanding references */ if(cpy_info->expand_ref) { diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c index 597af632d3..aa2a9e08c0 100644 --- a/src/H5Ocopy.c +++ b/src/H5Ocopy.c @@ -347,36 +347,8 @@ H5O_copy_header_real(const H5O_loc_t *oloc_src, H5O_loc_t *oloc_dst /*out*/, if(NULL == (obj_class = H5O_obj_class(oloc_src, dxpl_id))) HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, FAIL, "unable to determine object type") - /* Check if the object at the address is already open in the file */ - if(H5FO_opened(oloc_src->file, oloc_src->addr) != NULL) { - H5G_loc_t tmp_loc; /* Location of object */ - H5O_loc_t tmp_oloc; /* Location of object */ - H5G_name_t tmp_path; /* Object's path */ - void *obj_ptr = NULL; /* Object pointer */ - hid_t tmp_id = -1; /* Object ID */ - - tmp_loc.oloc = &tmp_oloc; - tmp_loc.path = &tmp_path; - tmp_oloc.file = oloc_src->file; - tmp_oloc.addr = oloc_src->addr; - tmp_oloc.holding_file = FALSE; - H5G_name_reset(tmp_loc.path); - - /* Get a temporary ID */ - if((tmp_id = obj_class->open(&tmp_loc, H5P_DEFAULT, dxpl_id, FALSE)) < 0) - HGOTO_ERROR(H5E_OHDR, H5E_CANTFLUSH, FAIL, "unable to open object") - - /* Get object pointer */ - obj_ptr = H5I_object(tmp_id); - - /* Flush the object */ - if(obj_class->flush && obj_class->flush(obj_ptr, dxpl_id) < 0) - HGOTO_ERROR(H5E_OHDR, H5E_CANTFLUSH, FAIL, "unable to flush object") - - /* Release the temporary ID */ - if(tmp_id != -1 && H5I_dec_app_ref(tmp_id)) - HGOTO_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "unable to close temporary ID") - } /* end if */ + /* Set the pointer to the shared struct for the object if opened in the file */ + cpy_info->shared_fo = H5FO_opened(oloc_src->file, oloc_src->addr); /* Get source object header */ if(NULL == (oh_src = H5O_protect(oloc_src, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE))) diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h index 0f798b2aa4..563f8fad0b 100644 --- a/src/H5Oprivate.h +++ b/src/H5Oprivate.h @@ -174,6 +174,7 @@ typedef struct H5O_copy_t { H5SL_t *dst_dt_list; /* Skip list to hold committed datatypes in dest file */ hbool_t dst_dt_list_complete; /* Whether the destination datatype list is complete (i.e. not only populated with "suggestions" from H5Padd_merge_committed_dtype_path) */ H5O_t *oh_dst; /* The destination object header */ + void *shared_fo; /* The shared pointer for the object */ H5O_mcdt_search_cb_t mcdt_cb; /* The callback to invoke before searching the global list of committed datatypes at destination */ void *mcdt_ud; /* User data passed to callback */ } H5O_copy_t; diff --git a/test/objcopy.c b/test/objcopy.c index b7f5673214..216d111933 100644 --- a/test/objcopy.c +++ b/test/objcopy.c @@ -2200,6 +2200,9 @@ error: * Purpose: Create a simple dataset in SRC file and copy it to DST file * (Note: dataset has no data) * + * Note: The parameter "test_open" is added to test for H5Ocopy when + * the dataset is open in the file (HDFFV-7853). + * * Return: Success: 0 * Failure: number of errors * @@ -2209,7 +2212,7 @@ error: *------------------------------------------------------------------------- */ static int -test_copy_dataset_simple_empty(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst_fapl) +test_copy_dataset_simple_empty(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst_fapl, hbool_t test_open) { hid_t fid_src = -1, fid_dst = -1; /* File IDs */ hid_t sid = -1; /* Dataspace ID */ @@ -2218,7 +2221,11 @@ test_copy_dataset_simple_empty(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, h char src_filename[NAME_BUF_SIZE]; char dst_filename[NAME_BUF_SIZE]; - TESTING("H5Ocopy(): empty contiguous dataset"); + if(test_open) { + TESTING("H5Ocopy(): empty and openend contiguous dataset"); + } else { + TESTING("H5Ocopy(): empty contiguous dataset"); + } /* Initialize the filenames */ h5_fixname(FILENAME[0], src_fapl, src_filename, sizeof src_filename); @@ -2246,15 +2253,18 @@ test_copy_dataset_simple_empty(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, h /* attach attributes to the dataset */ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR - /* close the dataset */ - if(H5Dclose(did) < 0) TEST_ERROR + if(!test_open) { - /* close the SRC file */ - if(H5Fclose(fid_src) < 0) TEST_ERROR + /* close the dataset */ + if(H5Dclose(did) < 0) TEST_ERROR + /* close the SRC file */ + if(H5Fclose(fid_src) < 0) TEST_ERROR - /* open the source file with read-only */ - if((fid_src = H5Fopen(src_filename, H5F_ACC_RDONLY, src_fapl)) < 0) TEST_ERROR + /* open the source file with read-only */ + if((fid_src = H5Fopen(src_filename, H5F_ACC_RDONLY, src_fapl)) < 0) TEST_ERROR + + } /* create destination file */ if((fid_dst = H5Fcreate(dst_filename, H5F_ACC_TRUNC, fcpl_dst, dst_fapl)) < 0) TEST_ERROR @@ -2265,8 +2275,10 @@ test_copy_dataset_simple_empty(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, h /* copy the dataset from SRC to DST */ if(H5Ocopy(fid_src, NAME_DATASET_SIMPLE, fid_dst, NAME_DATASET_SIMPLE, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR - /* open the dataset for copy */ - if((did = H5Dopen2(fid_src, NAME_DATASET_SIMPLE, H5P_DEFAULT)) < 0) TEST_ERROR + if(!test_open) { + /* open the dataset for copy */ + if((did = H5Dopen2(fid_src, NAME_DATASET_SIMPLE, H5P_DEFAULT)) < 0) TEST_ERROR + } /* open the destination dataset */ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_SIMPLE, H5P_DEFAULT)) < 0) TEST_ERROR @@ -3890,6 +3902,9 @@ error: * * Purpose: Create a compressed, chunked dataset in SRC file and copy it to DST file * + * Note: The parameter "test_open" is added to test for H5Ocopy when + * the dataset is open in the file (HDFFV-7853). + * * Return: Success: 0 * Failure: number of errors * @@ -3902,7 +3917,7 @@ error: */ static int test_copy_dataset_no_edge_filt(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, - hid_t dst_fapl) + hid_t dst_fapl, hbool_t test_open) { #ifdef H5_HAVE_FILTER_DEFLATE hid_t fid_src = -1, fid_dst = -1; /* File IDs */ @@ -3917,7 +3932,11 @@ test_copy_dataset_no_edge_filt(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, char dst_filename[NAME_BUF_SIZE]; #endif /* H5_HAVE_FILTER_DEFLATE */ - TESTING("H5Ocopy(): compressed dataset with no edge filters"); + if(test_open) { + TESTING("H5Ocopy(): compressed and opened dataset with no edge filters"); + } else { + TESTING("H5Ocopy(): compressed dataset with no edge filters"); + } #ifndef H5_HAVE_FILTER_DEFLATE SKIPPED(); @@ -3966,15 +3985,19 @@ test_copy_dataset_no_edge_filt(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, /* attach attributes to the dataset */ if(test_copy_attach_attributes(did, H5T_NATIVE_INT) < 0) TEST_ERROR - /* close the dataset */ - if(H5Dclose(did) < 0) TEST_ERROR + if(!test_open) { - /* close the SRC file */ - if(H5Fclose(fid_src) < 0) TEST_ERROR + /* close the dataset */ + if(H5Dclose(did) < 0) TEST_ERROR + + /* close the SRC file */ + if(H5Fclose(fid_src) < 0) TEST_ERROR - /* open the source file with read-only */ - if((fid_src = H5Fopen(src_filename, H5F_ACC_RDONLY, src_fapl)) < 0) TEST_ERROR + /* open the source file with read-only */ + if((fid_src = H5Fopen(src_filename, H5F_ACC_RDONLY, src_fapl)) < 0) TEST_ERROR + + } /* create destination file */ if((fid_dst = H5Fcreate(dst_filename, H5F_ACC_TRUNC, fcpl_dst, dst_fapl)) < 0) TEST_ERROR @@ -3985,8 +4008,10 @@ test_copy_dataset_no_edge_filt(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, /* copy the dataset from SRC to DST */ if(H5Ocopy(fid_src, NAME_DATASET_CHUNKED, fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT, H5P_DEFAULT) < 0) TEST_ERROR - /* open the dataset for copy */ - if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR + if(!test_open) { + /* open the dataset for copy */ + if((did = H5Dopen2(fid_src, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR + } /* open the destination dataset */ if((did2 = H5Dopen2(fid_dst, NAME_DATASET_CHUNKED, H5P_DEFAULT)) < 0) TEST_ERROR @@ -4034,6 +4059,9 @@ error: * * Purpose: Create a compact dataset in SRC file and copy it to DST file * + * Note: The parameter "test_open" is added to test for H5Ocopy when + * the dataset is open in the file (HDFFV-7853). + * * Return: Success: 0 * Failure: number of errors * @@ -4043,7 +4071,7 @@ error: *------------------------------------------------------------------------- */ static int -test_copy_dataset_compact(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst_fapl) +test_copy_dataset_compact(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst_fapl, hbool_t test_open) { hid_t fid_src = -1, fid_dst = -1; /* File IDs */ hid_t sid = -1; /* Dataspace ID */ @@ -4055,7 +4083,11 @@ test_copy_dataset_compact(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t char src_filename[NAME_BUF_SIZE]; char dst_filename[NAME_BUF_SIZE]; - TESTING("H5Ocopy(): compact dataset"); + if(test_open) { + TESTING("H5Ocopy(): compact and opened dataset"); + } else { + TESTING("H5Ocopy(): compact dataset"); + } /* set initial data values */ for (i=0; i