mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-02-05 15:42:32 +08:00
Merge branch 'develop' into develop_minor
This commit is contained in:
commit
3d7a0dc6d5
@ -236,7 +236,7 @@ string (REGEX REPLACE ".*#define[ \t]+H5_VERS_MINOR[ \t]+([0-9]*).*$"
|
||||
"\\1" H5_VERS_MINOR ${_h5public_h_contents})
|
||||
string (REGEX REPLACE ".*#define[ \t]+H5_VERS_RELEASE[ \t]+([0-9]*).*$"
|
||||
"\\1" H5_VERS_RELEASE ${_h5public_h_contents})
|
||||
string (REGEX REPLACE ".*#define[ \t]+H5_VERS_SUBRELEASE[ \t]+\"([0-9A-Za-z._]*)\".*$"
|
||||
string (REGEX REPLACE ".*#define[ \t]+H5_VERS_SUBRELEASE[ \t]+\"([0-9A-Za-z._\-]*)\".*$"
|
||||
"\\1" H5_VERS_SUBRELEASE ${_h5public_h_contents})
|
||||
#message (STATUS "VERSION: ${H5_VERS_MAJOR}.${H5_VERS_MINOR}.${H5_VERS_RELEASE}-${H5_VERS_SUBRELEASE}")
|
||||
|
||||
@ -339,9 +339,11 @@ set (HDF5_PACKAGE_VERSION "${H5_VERS_MAJOR}.${H5_VERS_MINOR}.${H5_VERS_RELEASE}"
|
||||
set (HDF5_PACKAGE_VERSION_MAJOR "${H5_VERS_MAJOR}.${H5_VERS_MINOR}")
|
||||
set (HDF5_PACKAGE_VERSION_MINOR "${H5_VERS_RELEASE}")
|
||||
if (H5_VERS_SUBRELEASE)
|
||||
set (HDF5_PACKAGE_VERSION_STRING "${HDF5_PACKAGE_VERSION}-${H5_VERS_SUBRELEASE}")
|
||||
set (HDF5_PACKAGE_VERSION_STRING "${HDF5_PACKAGE_VERSION}.${H5_VERS_SUBRELEASE}")
|
||||
set (HDF5_RELEASE_VERSION_STRING "${HDF5_PACKAGE_VERSION}-${H5_VERS_SUBRELEASE}")
|
||||
else ()
|
||||
set (HDF5_PACKAGE_VERSION_STRING "${HDF5_PACKAGE_VERSION}")
|
||||
set (HDF5_RELEASE_VERSION_STRING "${HDF5_PACKAGE_VERSION}")
|
||||
endif ()
|
||||
set (HDF5_LIB_PACKAGE_SOVERSION "${H5_LIB_SOVERS_MAJOR}.${H5_LIB_SOVERS_RELEASE}.${H5_LIB_SOVERS_MINOR}")
|
||||
set (HDF5_LIB_PACKAGE_SOVERSION_MAJOR "${H5_LIB_SOVERS_MAJOR}")
|
||||
|
@ -733,7 +733,7 @@
|
||||
#cmakedefine H5_USE_114_API_DEFAULT @H5_USE_114_API_DEFAULT@
|
||||
|
||||
/* Define if the library will use file locking */
|
||||
#cmakedefine H5_FILE_LOCKING @H5_USE_FILE_LOCKING@
|
||||
#cmakedefine H5_USE_FILE_LOCKING @H5_USE_FILE_LOCKING@
|
||||
|
||||
/* Define if a memory checking tool will be used on the library, to cause
|
||||
library to be very picky about memory operations and also disable the
|
||||
|
@ -21,6 +21,7 @@ cmake_minimum_required (VERSION 3.12)
|
||||
# ctest -S HDF5config.cmake,OPTION=VALUE -C Release -VV -O test.log
|
||||
# where valid options for OPTION are:
|
||||
# BUILD_GENERATOR - The cmake build generator:
|
||||
# MinGW * MinGW Makefiles
|
||||
# Unix * Unix Makefiles
|
||||
# VS2019 * Visual Studio 16 2019
|
||||
# VS201964 * Visual Studio 16 2019
|
||||
@ -167,7 +168,11 @@ if (NOT DEFINED HPC)
|
||||
## Set the following to unique id your computer ##
|
||||
set (CTEST_SITE "WIN7${BUILD_GENERATOR}.XXXX")
|
||||
else ()
|
||||
set (CTEST_CMAKE_GENERATOR "Unix Makefiles")
|
||||
if (MINGW)
|
||||
set (CTEST_CMAKE_GENERATOR "MinGW Makefiles")
|
||||
else ()
|
||||
set (CTEST_CMAKE_GENERATOR "Unix Makefiles")
|
||||
endif ()
|
||||
## Set the following to unique id your computer ##
|
||||
if (APPLE)
|
||||
set (CTEST_SITE "MAC.XXXX")
|
||||
|
@ -98,7 +98,7 @@ int main () {
|
||||
|
||||
for (i=0; i<numfilt; i++) {
|
||||
nelmts = 0;
|
||||
filter_type = H5Pget_filter2 (plist_id, 0, &flags, &nelmts, NULL, 0, NULL,
|
||||
filter_type = H5Pget_filter2 (plist_id, i, &flags, &nelmts, NULL, 0, NULL,
|
||||
&filter_info);
|
||||
printf ("Filter Type: ");
|
||||
switch (filter_type) {
|
||||
|
112
src/H5Dchunk.c
112
src/H5Dchunk.c
@ -4954,7 +4954,7 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info,
|
||||
int blocks, leftover, block_len; /* converted to int for MPI */
|
||||
MPI_Aint *chunk_disp_array = NULL;
|
||||
int *block_lens = NULL;
|
||||
MPI_Datatype mem_type, file_type;
|
||||
MPI_Datatype mem_type = MPI_BYTE, file_type = MPI_BYTE;
|
||||
H5FD_mpio_xfer_t prev_xfer_mode; /* Previous data xfer mode */
|
||||
hbool_t have_xfer_mode = FALSE; /* Whether the previous xffer mode has been retrieved */
|
||||
hbool_t need_addr_sort = FALSE;
|
||||
@ -4980,9 +4980,9 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info,
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "Resulted in division by zero")
|
||||
num_blocks = (size_t)(chunk_info->num_io / (size_t)mpi_size); /* value should be the same on all procs */
|
||||
|
||||
/* after evenly distributing the blocks between processes, are
|
||||
there any leftover blocks for each individual process
|
||||
(round-robin) */
|
||||
/* After evenly distributing the blocks between processes, are there any
|
||||
* leftover blocks for each individual process (round-robin)?
|
||||
*/
|
||||
leftover_blocks = (size_t)(chunk_info->num_io % (size_t)mpi_size);
|
||||
|
||||
/* Cast values to types needed by MPI */
|
||||
@ -4990,58 +4990,62 @@ H5D__chunk_collective_fill(const H5D_t *dset, H5D_chunk_coll_info_t *chunk_info,
|
||||
H5_CHECKED_ASSIGN(leftover, int, leftover_blocks, size_t);
|
||||
H5_CHECKED_ASSIGN(block_len, int, chunk_size, size_t);
|
||||
|
||||
/* Allocate buffers */
|
||||
/* (MSC - should not need block_lens if MPI_type_create_hindexed_block is working) */
|
||||
if(NULL == (block_lens = (int *)H5MM_malloc((size_t)(blocks + 1) * sizeof(int))))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk lengths buffer")
|
||||
if(NULL == (chunk_disp_array = (MPI_Aint *)H5MM_malloc((size_t)(blocks + 1) * sizeof(MPI_Aint))))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file displacement buffer")
|
||||
/* Check if we have any chunks to write on this rank */
|
||||
if(num_blocks > 0 || (leftover && leftover > mpi_rank)) {
|
||||
/* Allocate buffers */
|
||||
/* (MSC - should not need block_lens if MPI_type_create_hindexed_block is working) */
|
||||
if(NULL == (block_lens = (int *)H5MM_malloc((size_t)(blocks + 1) * sizeof(int))))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk lengths buffer")
|
||||
if(NULL == (chunk_disp_array = (MPI_Aint *)H5MM_malloc((size_t)(blocks + 1) * sizeof(MPI_Aint))))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "couldn't allocate chunk file displacement buffer")
|
||||
|
||||
for(i = 0 ; i < blocks ; i++) {
|
||||
/* store the chunk address as an MPI_Aint */
|
||||
chunk_disp_array[i] = (MPI_Aint)(chunk_info->addr[i + mpi_rank*blocks]);
|
||||
for(i = 0 ; i < blocks ; i++) {
|
||||
/* store the chunk address as an MPI_Aint */
|
||||
chunk_disp_array[i] = (MPI_Aint)(chunk_info->addr[i + (mpi_rank * blocks)]);
|
||||
|
||||
/* MSC - should not need this if MPI_type_create_hindexed_block is working */
|
||||
block_lens[i] = block_len;
|
||||
/* MSC - should not need this if MPI_type_create_hindexed_block is working */
|
||||
block_lens[i] = block_len;
|
||||
|
||||
/* make sure that the addresses in the datatype are
|
||||
monotonically non decreasing */
|
||||
if(i && (chunk_disp_array[i] < chunk_disp_array[i - 1]))
|
||||
need_addr_sort = TRUE;
|
||||
} /* end for */
|
||||
/* Make sure that the addresses in the datatype are
|
||||
* monotonically non-decreasing
|
||||
*/
|
||||
if(i && (chunk_disp_array[i] < chunk_disp_array[i - 1]))
|
||||
need_addr_sort = TRUE;
|
||||
} /* end for */
|
||||
|
||||
/* calculate if there are any leftover blocks after evenly
|
||||
distributing. If there are, then round robin the distribution
|
||||
to processes 0 -> leftover. */
|
||||
if(leftover && leftover > mpi_rank) {
|
||||
chunk_disp_array[blocks] = (MPI_Aint)chunk_info->addr[blocks*mpi_size + mpi_rank];
|
||||
if(blocks && (chunk_disp_array[blocks] < chunk_disp_array[blocks - 1]))
|
||||
need_addr_sort = TRUE;
|
||||
block_lens[blocks] = block_len;
|
||||
blocks++;
|
||||
}
|
||||
/* Calculate if there are any leftover blocks after evenly
|
||||
* distributing. If there are, then round-robin the distribution
|
||||
* to processes 0 -> leftover.
|
||||
*/
|
||||
if(leftover && leftover > mpi_rank) {
|
||||
chunk_disp_array[blocks] = (MPI_Aint)chunk_info->addr[(blocks * mpi_size) + mpi_rank];
|
||||
if(blocks && (chunk_disp_array[blocks] < chunk_disp_array[blocks - 1]))
|
||||
need_addr_sort = TRUE;
|
||||
block_lens[blocks] = block_len;
|
||||
blocks++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that the blocks are sorted in monotonically non-decreasing
|
||||
* order of offset in the file.
|
||||
*/
|
||||
if(need_addr_sort)
|
||||
HDqsort(chunk_disp_array, blocks, sizeof(MPI_Aint), H5D__chunk_cmp_addr);
|
||||
/* Ensure that the blocks are sorted in monotonically non-decreasing
|
||||
* order of offset in the file.
|
||||
*/
|
||||
if(need_addr_sort)
|
||||
HDqsort(chunk_disp_array, blocks, sizeof(MPI_Aint), H5D__chunk_cmp_addr);
|
||||
|
||||
/* MSC - should use this if MPI_type_create_hindexed block is working:
|
||||
* mpi_code = MPI_Type_create_hindexed_block(blocks, block_len, chunk_disp_array, MPI_BYTE, &file_type);
|
||||
*/
|
||||
mpi_code = MPI_Type_create_hindexed(blocks, block_lens, chunk_disp_array, MPI_BYTE, &file_type);
|
||||
if(mpi_code != MPI_SUCCESS)
|
||||
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
|
||||
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&file_type)))
|
||||
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
|
||||
/* MSC - should use this if MPI_type_create_hindexed block is working:
|
||||
* mpi_code = MPI_Type_create_hindexed_block(blocks, block_len, chunk_disp_array, MPI_BYTE, &file_type);
|
||||
*/
|
||||
mpi_code = MPI_Type_create_hindexed(blocks, block_lens, chunk_disp_array, MPI_BYTE, &file_type);
|
||||
if(mpi_code != MPI_SUCCESS)
|
||||
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
|
||||
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&file_type)))
|
||||
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
|
||||
|
||||
mpi_code = MPI_Type_create_hvector(blocks, block_len, 0, MPI_BYTE, &mem_type);
|
||||
if(mpi_code != MPI_SUCCESS)
|
||||
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
|
||||
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&mem_type)))
|
||||
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
|
||||
mpi_code = MPI_Type_create_hvector(blocks, block_len, 0, MPI_BYTE, &mem_type);
|
||||
if(mpi_code != MPI_SUCCESS)
|
||||
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
|
||||
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&mem_type)))
|
||||
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
|
||||
} /* end if */
|
||||
|
||||
/* Set MPI-IO VFD properties */
|
||||
|
||||
@ -5073,10 +5077,12 @@ done:
|
||||
HDONE_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set transfer mode")
|
||||
|
||||
/* free things */
|
||||
if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&file_type)))
|
||||
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
|
||||
if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&mem_type)))
|
||||
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
|
||||
if(MPI_BYTE != file_type)
|
||||
if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&file_type)))
|
||||
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
|
||||
if(MPI_BYTE != mem_type)
|
||||
if(MPI_SUCCESS != (mpi_code = MPI_Type_free(&mem_type)))
|
||||
HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code)
|
||||
H5MM_xfree(chunk_disp_array);
|
||||
H5MM_xfree(block_lens);
|
||||
|
||||
|
204
src/H5Dvirtual.c
204
src/H5Dvirtual.c
@ -313,6 +313,8 @@ done:
|
||||
herr_t
|
||||
H5D_virtual_update_min_dims(H5O_layout_t *layout, size_t idx)
|
||||
{
|
||||
H5O_storage_virtual_t *virt = &layout->storage.u.virt;
|
||||
H5O_storage_virtual_ent_t *ent = &virt->list[idx];
|
||||
H5S_sel_type sel_type;
|
||||
int rank;
|
||||
hsize_t bounds_start[H5S_MAX_RANK];
|
||||
@ -324,10 +326,10 @@ H5D_virtual_update_min_dims(H5O_layout_t *layout, size_t idx)
|
||||
|
||||
HDassert(layout);
|
||||
HDassert(layout->type == H5D_VIRTUAL);
|
||||
HDassert(idx < layout->storage.u.virt.list_nalloc);
|
||||
HDassert(idx < virt->list_nalloc);
|
||||
|
||||
/* Get type of selection */
|
||||
if(H5S_SEL_ERROR == (sel_type = H5S_GET_SELECT_TYPE(layout->storage.u.virt.list[idx].source_dset.virtual_select)))
|
||||
if(H5S_SEL_ERROR == (sel_type = H5S_GET_SELECT_TYPE(ent->source_dset.virtual_select)))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get selection type")
|
||||
|
||||
/* Do not update min_dims for "all" or "none" selections */
|
||||
@ -335,19 +337,19 @@ H5D_virtual_update_min_dims(H5O_layout_t *layout, size_t idx)
|
||||
HGOTO_DONE(SUCCEED)
|
||||
|
||||
/* Get rank of vspace */
|
||||
if((rank = H5S_GET_EXTENT_NDIMS(layout->storage.u.virt.list[idx].source_dset.virtual_select)) < 0)
|
||||
if((rank = H5S_GET_EXTENT_NDIMS(ent->source_dset.virtual_select)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get number of dimensions")
|
||||
|
||||
/* Get selection bounds */
|
||||
if(H5S_SELECT_BOUNDS(layout->storage.u.virt.list[idx].source_dset.virtual_select, bounds_start, bounds_end) < 0)
|
||||
if(H5S_SELECT_BOUNDS(ent->source_dset.virtual_select, bounds_start, bounds_end) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get selection bounds")
|
||||
|
||||
/* Update min_dims */
|
||||
for(i = 0; i < rank; i++)
|
||||
/* Don't check unlimited dimensions in the selection */
|
||||
if((i != layout->storage.u.virt.list[idx].unlim_dim_virtual)
|
||||
&& (bounds_end[i] >= layout->storage.u.virt.min_dims[i]))
|
||||
layout->storage.u.virt.min_dims[i] = bounds_end[i] + (hsize_t)1;
|
||||
if((i != ent->unlim_dim_virtual)
|
||||
&& (bounds_end[i] >= virt->min_dims[i]))
|
||||
virt->min_dims[i] = bounds_end[i] + (hsize_t)1;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
@ -419,6 +421,7 @@ done:
|
||||
herr_t
|
||||
H5D__virtual_store_layout(H5F_t *f, H5O_layout_t *layout)
|
||||
{
|
||||
H5O_storage_virtual_t *virt = &layout->storage.u.virt;
|
||||
uint8_t *heap_block = NULL; /* Block to add to heap */
|
||||
size_t *str_size = NULL; /* Array for VDS entry string lengths */
|
||||
uint8_t *heap_block_p; /* Pointer into the heap block, while encoding */
|
||||
@ -433,16 +436,16 @@ H5D__virtual_store_layout(H5F_t *f, H5O_layout_t *layout)
|
||||
/* Sanity checking */
|
||||
HDassert(f);
|
||||
HDassert(layout);
|
||||
HDassert(layout->storage.u.virt.serial_list_hobjid.addr == HADDR_UNDEF);
|
||||
HDassert(virt->serial_list_hobjid.addr == HADDR_UNDEF);
|
||||
|
||||
/* Create block if # of used entries > 0 */
|
||||
if(layout->storage.u.virt.list_nused > 0) {
|
||||
if(virt->list_nused > 0) {
|
||||
|
||||
/* Set the low/high bounds according to 'f' for the API context */
|
||||
H5CX_set_libver_bounds(f);
|
||||
|
||||
/* Allocate array for caching results of strlen */
|
||||
if(NULL == (str_size = (size_t *)H5MM_malloc(2 * layout->storage.u.virt.list_nused * sizeof(size_t))))
|
||||
if(NULL == (str_size = (size_t *)H5MM_malloc(2 * virt->list_nused * sizeof(size_t))))
|
||||
HGOTO_ERROR(H5E_OHDR, H5E_RESOURCE, FAIL, "unable to allocate string length array")
|
||||
|
||||
/*
|
||||
@ -453,29 +456,30 @@ H5D__virtual_store_layout(H5F_t *f, H5O_layout_t *layout)
|
||||
block_size = (size_t)1 + H5F_SIZEOF_SIZE(f);
|
||||
|
||||
/* Calculate size of each entry */
|
||||
for(i = 0; i < layout->storage.u.virt.list_nused; i++) {
|
||||
for(i = 0; i < virt->list_nused; i++) {
|
||||
H5O_storage_virtual_ent_t *ent = &virt->list[i];
|
||||
hssize_t select_serial_size; /* Size of serialized selection */
|
||||
|
||||
HDassert(layout->storage.u.virt.list[i].source_file_name);
|
||||
HDassert(layout->storage.u.virt.list[i].source_dset_name);
|
||||
HDassert(layout->storage.u.virt.list[i].source_select);
|
||||
HDassert(layout->storage.u.virt.list[i].source_dset.virtual_select);
|
||||
HDassert(ent->source_file_name);
|
||||
HDassert(ent->source_dset_name);
|
||||
HDassert(ent->source_select);
|
||||
HDassert(ent->source_dset.virtual_select);
|
||||
|
||||
/* Source file name */
|
||||
str_size[2 * i] = HDstrlen(layout->storage.u.virt.list[i].source_file_name) + (size_t)1;
|
||||
str_size[2 * i] = HDstrlen(ent->source_file_name) + (size_t)1;
|
||||
block_size += str_size[2 * i];
|
||||
|
||||
/* Source dset name */
|
||||
str_size[(2 * i) + 1] = HDstrlen(layout->storage.u.virt.list[i].source_dset_name) + (size_t)1;
|
||||
str_size[(2 * i) + 1] = HDstrlen(ent->source_dset_name) + (size_t)1;
|
||||
block_size += str_size[(2 * i) + 1];
|
||||
|
||||
/* Source selection */
|
||||
if((select_serial_size = H5S_SELECT_SERIAL_SIZE(layout->storage.u.virt.list[i].source_select)) < 0)
|
||||
if((select_serial_size = H5S_SELECT_SERIAL_SIZE(ent->source_select)) < 0)
|
||||
HGOTO_ERROR(H5E_OHDR, H5E_CANTENCODE, FAIL, "unable to check dataspace selection size")
|
||||
block_size += (size_t)select_serial_size;
|
||||
|
||||
/* Virtual dataset selection */
|
||||
if((select_serial_size = H5S_SELECT_SERIAL_SIZE(layout->storage.u.virt.list[i].source_dset.virtual_select)) < 0)
|
||||
if((select_serial_size = H5S_SELECT_SERIAL_SIZE(ent->source_dset.virtual_select)) < 0)
|
||||
HGOTO_ERROR(H5E_OHDR, H5E_CANTENCODE, FAIL, "unable to check dataspace selection size")
|
||||
block_size += (size_t)select_serial_size;
|
||||
} /* end for */
|
||||
@ -498,25 +502,26 @@ H5D__virtual_store_layout(H5F_t *f, H5O_layout_t *layout)
|
||||
*heap_block_p++ = (uint8_t)H5O_LAYOUT_VDS_GH_ENC_VERS;
|
||||
|
||||
/* Number of entries */
|
||||
tmp_nentries = (hsize_t)layout->storage.u.virt.list_nused;
|
||||
tmp_nentries = (hsize_t)virt->list_nused;
|
||||
H5F_ENCODE_LENGTH(f, heap_block_p, tmp_nentries)
|
||||
|
||||
/* Encode each entry */
|
||||
for(i = 0; i < layout->storage.u.virt.list_nused; i++) {
|
||||
for(i = 0; i < virt->list_nused; i++) {
|
||||
H5O_storage_virtual_ent_t *ent = &virt->list[i];
|
||||
/* Source file name */
|
||||
H5MM_memcpy((char *)heap_block_p, layout->storage.u.virt.list[i].source_file_name, str_size[2 * i]);
|
||||
H5MM_memcpy((char *)heap_block_p, ent->source_file_name, str_size[2 * i]);
|
||||
heap_block_p += str_size[2 * i];
|
||||
|
||||
/* Source dataset name */
|
||||
H5MM_memcpy((char *)heap_block_p, layout->storage.u.virt.list[i].source_dset_name, str_size[(2 * i) + 1]);
|
||||
H5MM_memcpy((char *)heap_block_p, ent->source_dset_name, str_size[(2 * i) + 1]);
|
||||
heap_block_p += str_size[(2 * i) + 1];
|
||||
|
||||
/* Source selection */
|
||||
if(H5S_SELECT_SERIALIZE(layout->storage.u.virt.list[i].source_select, &heap_block_p) < 0)
|
||||
if(H5S_SELECT_SERIALIZE(ent->source_select, &heap_block_p) < 0)
|
||||
HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, FAIL, "unable to serialize source selection")
|
||||
|
||||
/* Virtual selection */
|
||||
if(H5S_SELECT_SERIALIZE(layout->storage.u.virt.list[i].source_dset.virtual_select, &heap_block_p) < 0)
|
||||
if(H5S_SELECT_SERIALIZE(ent->source_dset.virtual_select, &heap_block_p) < 0)
|
||||
HGOTO_ERROR(H5E_OHDR, H5E_CANTCOPY, FAIL, "unable to serialize virtual selection")
|
||||
} /* end for */
|
||||
|
||||
@ -525,7 +530,7 @@ H5D__virtual_store_layout(H5F_t *f, H5O_layout_t *layout)
|
||||
UINT32ENCODE(heap_block_p, chksum)
|
||||
|
||||
/* Insert block into global heap */
|
||||
if(H5HG_insert(f, block_size, heap_block, &(layout->storage.u.virt.serial_list_hobjid)) < 0) /* Casting away const OK --NAF */
|
||||
if(H5HG_insert(f, block_size, heap_block, &(virt->serial_list_hobjid)) < 0) /* Casting away const OK --NAF */
|
||||
HGOTO_ERROR(H5E_OHDR, H5E_CANTINSERT, FAIL, "unable to insert virtual dataset heap block")
|
||||
} /* end if */
|
||||
|
||||
@ -556,6 +561,7 @@ herr_t
|
||||
H5D__virtual_copy_layout(H5O_layout_t *layout)
|
||||
{
|
||||
H5O_storage_virtual_ent_t *orig_list = NULL;
|
||||
H5O_storage_virtual_t *virt = &layout->storage.u.virt;
|
||||
hid_t orig_source_fapl;
|
||||
hid_t orig_source_dapl;
|
||||
H5P_genplist_t *plist;
|
||||
@ -569,127 +575,129 @@ H5D__virtual_copy_layout(H5O_layout_t *layout)
|
||||
|
||||
/* Save original entry list and top-level property lists and reset in layout
|
||||
* so the originals aren't closed on error */
|
||||
orig_source_fapl = layout->storage.u.virt.source_fapl;
|
||||
layout->storage.u.virt.source_fapl = -1;
|
||||
orig_source_dapl = layout->storage.u.virt.source_dapl;
|
||||
layout->storage.u.virt.source_dapl = -1;
|
||||
orig_list = layout->storage.u.virt.list;
|
||||
layout->storage.u.virt.list = NULL;
|
||||
orig_source_fapl = virt->source_fapl;
|
||||
virt->source_fapl = -1;
|
||||
orig_source_dapl = virt->source_dapl;
|
||||
virt->source_dapl = -1;
|
||||
orig_list = virt->list;
|
||||
virt->list = NULL;
|
||||
|
||||
/* Copy entry list */
|
||||
if(layout->storage.u.virt.list_nused > 0) {
|
||||
if(virt->list_nused > 0) {
|
||||
HDassert(orig_list);
|
||||
|
||||
/* Allocate memory for the list */
|
||||
if(NULL == (layout->storage.u.virt.list = (H5O_storage_virtual_ent_t *)H5MM_calloc(layout->storage.u.virt.list_nused * sizeof(H5O_storage_virtual_ent_t))))
|
||||
if(NULL == (virt->list = H5MM_calloc(virt->list_nused * sizeof(virt->list[0]))))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate memory for virtual dataset entry list")
|
||||
layout->storage.u.virt.list_nalloc = layout->storage.u.virt.list_nused;
|
||||
virt->list_nalloc = virt->list_nused;
|
||||
|
||||
/* Copy the list entries, though set source_dset.dset and sub_dset to
|
||||
* NULL */
|
||||
for(i = 0; i < layout->storage.u.virt.list_nused; i++) {
|
||||
for(i = 0; i < virt->list_nused; i++) {
|
||||
H5O_storage_virtual_ent_t *ent = &virt->list[i];
|
||||
|
||||
/* Copy virtual selection */
|
||||
if(NULL == (layout->storage.u.virt.list[i].source_dset.virtual_select
|
||||
if(NULL == (ent->source_dset.virtual_select
|
||||
= H5S_copy(orig_list[i].source_dset.virtual_select, FALSE, TRUE)))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "unable to copy virtual selection")
|
||||
|
||||
/* Copy original source names */
|
||||
if(NULL == (layout->storage.u.virt.list[i].source_file_name
|
||||
if(NULL == (ent->source_file_name
|
||||
= H5MM_strdup(orig_list[i].source_file_name)))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_RESOURCE, FAIL, "unable to duplicate source file name")
|
||||
if(NULL == (layout->storage.u.virt.list[i].source_dset_name
|
||||
if(NULL == (ent->source_dset_name
|
||||
= H5MM_strdup(orig_list[i].source_dset_name)))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_RESOURCE, FAIL, "unable to duplicate source dataset name")
|
||||
|
||||
/* Copy source selection */
|
||||
if(NULL == (layout->storage.u.virt.list[i].source_select
|
||||
if(NULL == (ent->source_select
|
||||
= H5S_copy(orig_list[i].source_select, FALSE, TRUE)))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "unable to copy source selection")
|
||||
|
||||
/* Initialize clipped selections */
|
||||
if(orig_list[i].unlim_dim_virtual < 0) {
|
||||
layout->storage.u.virt.list[i].source_dset.clipped_source_select = layout->storage.u.virt.list[i].source_select;
|
||||
layout->storage.u.virt.list[i].source_dset.clipped_virtual_select = layout->storage.u.virt.list[i].source_dset.virtual_select;
|
||||
ent->source_dset.clipped_source_select = ent->source_select;
|
||||
ent->source_dset.clipped_virtual_select = ent->source_dset.virtual_select;
|
||||
} /* end if */
|
||||
|
||||
/* Copy parsed names */
|
||||
if(H5D__virtual_copy_parsed_name(&layout->storage.u.virt.list[i].parsed_source_file_name, orig_list[i].parsed_source_file_name) < 0)
|
||||
if(H5D__virtual_copy_parsed_name(&ent->parsed_source_file_name, orig_list[i].parsed_source_file_name) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "unable to copy parsed source file name")
|
||||
layout->storage.u.virt.list[i].psfn_static_strlen = orig_list[i].psfn_static_strlen;
|
||||
layout->storage.u.virt.list[i].psfn_nsubs = orig_list[i].psfn_nsubs;
|
||||
if(H5D__virtual_copy_parsed_name(&layout->storage.u.virt.list[i].parsed_source_dset_name, orig_list[i].parsed_source_dset_name) < 0)
|
||||
ent->psfn_static_strlen = orig_list[i].psfn_static_strlen;
|
||||
ent->psfn_nsubs = orig_list[i].psfn_nsubs;
|
||||
if(H5D__virtual_copy_parsed_name(&ent->parsed_source_dset_name, orig_list[i].parsed_source_dset_name) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "unable to copy parsed source dataset name")
|
||||
layout->storage.u.virt.list[i].psdn_static_strlen = orig_list[i].psdn_static_strlen;
|
||||
layout->storage.u.virt.list[i].psdn_nsubs = orig_list[i].psdn_nsubs;
|
||||
ent->psdn_static_strlen = orig_list[i].psdn_static_strlen;
|
||||
ent->psdn_nsubs = orig_list[i].psdn_nsubs;
|
||||
|
||||
/* Copy source names in source dset or add reference as appropriate
|
||||
*/
|
||||
if(orig_list[i].source_dset.file_name) {
|
||||
if(orig_list[i].source_dset.file_name
|
||||
== orig_list[i].source_file_name)
|
||||
layout->storage.u.virt.list[i].source_dset.file_name = layout->storage.u.virt.list[i].source_file_name;
|
||||
ent->source_dset.file_name = ent->source_file_name;
|
||||
else if(orig_list[i].parsed_source_file_name
|
||||
&& (orig_list[i].source_dset.file_name
|
||||
!= orig_list[i].parsed_source_file_name->name_segment)) {
|
||||
HDassert(layout->storage.u.virt.list[i].parsed_source_file_name);
|
||||
HDassert(layout->storage.u.virt.list[i].parsed_source_file_name->name_segment);
|
||||
layout->storage.u.virt.list[i].source_dset.file_name = layout->storage.u.virt.list[i].parsed_source_file_name->name_segment;
|
||||
HDassert(ent->parsed_source_file_name);
|
||||
HDassert(ent->parsed_source_file_name->name_segment);
|
||||
ent->source_dset.file_name = ent->parsed_source_file_name->name_segment;
|
||||
} /* end if */
|
||||
else
|
||||
if(NULL == (layout->storage.u.virt.list[i].source_dset.file_name
|
||||
if(NULL == (ent->source_dset.file_name
|
||||
= H5MM_strdup(orig_list[i].source_dset.file_name)))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_RESOURCE, FAIL, "unable to duplicate source file name")
|
||||
} /* end if */
|
||||
if(orig_list[i].source_dset.dset_name) {
|
||||
if(orig_list[i].source_dset.dset_name
|
||||
== orig_list[i].source_dset_name)
|
||||
layout->storage.u.virt.list[i].source_dset.dset_name = layout->storage.u.virt.list[i].source_dset_name;
|
||||
ent->source_dset.dset_name = ent->source_dset_name;
|
||||
else if(orig_list[i].parsed_source_dset_name
|
||||
&& (orig_list[i].source_dset.dset_name
|
||||
!= orig_list[i].parsed_source_dset_name->name_segment)) {
|
||||
HDassert(layout->storage.u.virt.list[i].parsed_source_dset_name);
|
||||
HDassert(layout->storage.u.virt.list[i].parsed_source_dset_name->name_segment);
|
||||
layout->storage.u.virt.list[i].source_dset.dset_name = layout->storage.u.virt.list[i].parsed_source_dset_name->name_segment;
|
||||
HDassert(ent->parsed_source_dset_name);
|
||||
HDassert(ent->parsed_source_dset_name->name_segment);
|
||||
ent->source_dset.dset_name = ent->parsed_source_dset_name->name_segment;
|
||||
} /* end if */
|
||||
else
|
||||
if(NULL == (layout->storage.u.virt.list[i].source_dset.dset_name
|
||||
if(NULL == (ent->source_dset.dset_name
|
||||
= H5MM_strdup(orig_list[i].source_dset.dset_name)))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_RESOURCE, FAIL, "unable to duplicate source dataset name")
|
||||
} /* end if */
|
||||
|
||||
/* Copy other fields in entry */
|
||||
layout->storage.u.virt.list[i].unlim_dim_source = orig_list[i].unlim_dim_source;
|
||||
layout->storage.u.virt.list[i].unlim_dim_virtual = orig_list[i].unlim_dim_virtual;
|
||||
layout->storage.u.virt.list[i].unlim_extent_source = orig_list[i].unlim_extent_source;
|
||||
layout->storage.u.virt.list[i].unlim_extent_virtual = orig_list[i].unlim_extent_virtual;
|
||||
layout->storage.u.virt.list[i].clip_size_source = orig_list[i].clip_size_source;
|
||||
layout->storage.u.virt.list[i].clip_size_virtual = orig_list[i].clip_size_virtual;
|
||||
layout->storage.u.virt.list[i].source_space_status = orig_list[i].source_space_status;
|
||||
layout->storage.u.virt.list[i].virtual_space_status = orig_list[i].virtual_space_status;
|
||||
ent->unlim_dim_source = orig_list[i].unlim_dim_source;
|
||||
ent->unlim_dim_virtual = orig_list[i].unlim_dim_virtual;
|
||||
ent->unlim_extent_source = orig_list[i].unlim_extent_source;
|
||||
ent->unlim_extent_virtual = orig_list[i].unlim_extent_virtual;
|
||||
ent->clip_size_source = orig_list[i].clip_size_source;
|
||||
ent->clip_size_virtual = orig_list[i].clip_size_virtual;
|
||||
ent->source_space_status = orig_list[i].source_space_status;
|
||||
ent->virtual_space_status = orig_list[i].virtual_space_status;
|
||||
} /* end for */
|
||||
} /* end if */
|
||||
else {
|
||||
/* Zero out other fields related to list, just to be sure */
|
||||
layout->storage.u.virt.list = NULL;
|
||||
layout->storage.u.virt.list_nalloc = 0;
|
||||
virt->list = NULL;
|
||||
virt->list_nalloc = 0;
|
||||
} /* end else */
|
||||
|
||||
/* Copy property lists */
|
||||
if(orig_source_fapl >= 0) {
|
||||
if(NULL == (plist = (H5P_genplist_t *)H5I_object_verify(orig_source_fapl, H5I_GENPROP_LST)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list")
|
||||
if((layout->storage.u.virt.source_fapl = H5P_copy_plist(plist, FALSE)) < 0)
|
||||
if((virt->source_fapl = H5P_copy_plist(plist, FALSE)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't copy fapl")
|
||||
} /* end if */
|
||||
if(orig_source_dapl >= 0) {
|
||||
if(NULL == (plist = (H5P_genplist_t *)H5I_object_verify(orig_source_dapl, H5I_GENPROP_LST)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list")
|
||||
if((layout->storage.u.virt.source_dapl = H5P_copy_plist(plist, FALSE)) < 0)
|
||||
if((virt->source_dapl = H5P_copy_plist(plist, FALSE)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't copy dapl")
|
||||
} /* end if */
|
||||
|
||||
/* New layout is not fully initialized */
|
||||
layout->storage.u.virt.init = FALSE;
|
||||
virt->init = FALSE;
|
||||
|
||||
done:
|
||||
/* Release allocated resources on failure */
|
||||
@ -721,6 +729,7 @@ herr_t
|
||||
H5D__virtual_reset_layout(H5O_layout_t *layout)
|
||||
{
|
||||
size_t i, j;
|
||||
H5O_storage_virtual_t *virt = &layout->storage.u.virt;
|
||||
herr_t ret_value = SUCCEED;
|
||||
|
||||
FUNC_ENTER_PACKAGE
|
||||
@ -731,53 +740,54 @@ H5D__virtual_reset_layout(H5O_layout_t *layout)
|
||||
/* Free the list entries. Note we always attempt to free everything even in
|
||||
* the case of a failure. Because of this, and because we free the list
|
||||
* afterwards, we do not need to zero out the memory in the list. */
|
||||
for(i = 0; i < layout->storage.u.virt.list_nused; i++) {
|
||||
for(i = 0; i < virt->list_nused; i++) {
|
||||
H5O_storage_virtual_ent_t *ent = &virt->list[i];
|
||||
/* Free source_dset */
|
||||
if(H5D__virtual_reset_source_dset(&layout->storage.u.virt.list[i], &layout->storage.u.virt.list[i].source_dset) < 0)
|
||||
if(H5D__virtual_reset_source_dset(ent, &ent->source_dset) < 0)
|
||||
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to reset source dataset")
|
||||
|
||||
/* Free original source names */
|
||||
(void)H5MM_xfree(layout->storage.u.virt.list[i].source_file_name);
|
||||
(void)H5MM_xfree(layout->storage.u.virt.list[i].source_dset_name);
|
||||
(void)H5MM_xfree(ent->source_file_name);
|
||||
(void)H5MM_xfree(ent->source_dset_name);
|
||||
|
||||
/* Free sub_dset */
|
||||
for(j = 0; j < layout->storage.u.virt.list[i].sub_dset_nalloc; j++)
|
||||
if(H5D__virtual_reset_source_dset(&layout->storage.u.virt.list[i], &layout->storage.u.virt.list[i].sub_dset[j]) < 0)
|
||||
for(j = 0; j < ent->sub_dset_nalloc; j++)
|
||||
if(H5D__virtual_reset_source_dset(ent, &ent->sub_dset[j]) < 0)
|
||||
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "unable to reset source dataset")
|
||||
layout->storage.u.virt.list[i].sub_dset = (H5O_storage_virtual_srcdset_t *)H5MM_xfree(layout->storage.u.virt.list[i].sub_dset);
|
||||
ent->sub_dset = H5MM_xfree(ent->sub_dset);
|
||||
|
||||
/* Free source_select */
|
||||
if(layout->storage.u.virt.list[i].source_select)
|
||||
if(H5S_close(layout->storage.u.virt.list[i].source_select) < 0)
|
||||
if(ent->source_select)
|
||||
if(H5S_close(ent->source_select) < 0)
|
||||
HDONE_ERROR(H5E_DATASET, H5E_CLOSEERROR, FAIL, "unable to release source selection")
|
||||
|
||||
/* Free parsed_source_file_name */
|
||||
H5D_virtual_free_parsed_name(layout->storage.u.virt.list[i].parsed_source_file_name);
|
||||
H5D_virtual_free_parsed_name(ent->parsed_source_file_name);
|
||||
|
||||
/* Free parsed_source_dset_name */
|
||||
H5D_virtual_free_parsed_name(layout->storage.u.virt.list[i].parsed_source_dset_name);
|
||||
} /* end for */
|
||||
H5D_virtual_free_parsed_name(ent->parsed_source_dset_name);
|
||||
}
|
||||
|
||||
/* Free the list */
|
||||
layout->storage.u.virt.list = (H5O_storage_virtual_ent_t *)H5MM_xfree(layout->storage.u.virt.list);
|
||||
layout->storage.u.virt.list_nalloc = (size_t)0;
|
||||
layout->storage.u.virt.list_nused = (size_t)0;
|
||||
(void)HDmemset(layout->storage.u.virt.min_dims, 0, sizeof(layout->storage.u.virt.min_dims));
|
||||
virt->list = H5MM_xfree(virt->list);
|
||||
virt->list_nalloc = (size_t)0;
|
||||
virt->list_nused = (size_t)0;
|
||||
(void)HDmemset(virt->min_dims, 0, sizeof(virt->min_dims));
|
||||
|
||||
/* Close access property lists */
|
||||
if(layout->storage.u.virt.source_fapl >= 0) {
|
||||
if(H5I_dec_ref(layout->storage.u.virt.source_fapl) < 0)
|
||||
if(virt->source_fapl >= 0) {
|
||||
if(H5I_dec_ref(virt->source_fapl) < 0)
|
||||
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't close source fapl")
|
||||
layout->storage.u.virt.source_fapl = -1;
|
||||
} /* end if */
|
||||
if(layout->storage.u.virt.source_dapl >= 0) {
|
||||
if(H5I_dec_ref(layout->storage.u.virt.source_dapl) < 0)
|
||||
virt->source_fapl = -1;
|
||||
}
|
||||
if(virt->source_dapl >= 0) {
|
||||
if(H5I_dec_ref(virt->source_dapl) < 0)
|
||||
HDONE_ERROR(H5E_DATASET, H5E_CANTFREE, FAIL, "can't close source dapl")
|
||||
layout->storage.u.virt.source_dapl = -1;
|
||||
} /* end if */
|
||||
virt->source_dapl = -1;
|
||||
}
|
||||
|
||||
/* The list is no longer initialized */
|
||||
layout->storage.u.virt.init = FALSE;
|
||||
virt->init = FALSE;
|
||||
|
||||
/* Note the lack of a done: label. This is because there are no HGOTO_ERROR
|
||||
* calls. If one is added, a done: label must also be added */
|
||||
@ -2781,7 +2791,7 @@ H5D__virtual_write_one(H5D_io_info_t *io_info, const H5D_type_info_t *type_info,
|
||||
* extent in the unlimited dimension. -NAF */
|
||||
/* Project intersection of file space and mapping virtual space onto
|
||||
* mapping source space */
|
||||
if(H5S_select_project_intersection(source_dset->virtual_select, source_dset->clipped_source_select, file_space, &projected_src_space, TRUE) < 0)
|
||||
if(H5S_select_project_intersection(source_dset->clipped_virtual_select, source_dset->clipped_source_select, file_space, &projected_src_space, TRUE) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTCLIP, FAIL, "can't project virtual intersection onto source space")
|
||||
|
||||
/* Perform write on source dataset */
|
||||
|
24
src/H5Fint.c
24
src/H5Fint.c
@ -1885,6 +1885,15 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
|
||||
shared = file->shared;
|
||||
lf = shared->lf;
|
||||
|
||||
/* Set the file locking flag. If the file is already open, the file
|
||||
* requested file locking flag must match that of the open file.
|
||||
*/
|
||||
if(shared->nrefs == 1)
|
||||
file->shared->use_file_locking = use_file_locking;
|
||||
else if(shared->nrefs > 1)
|
||||
if(file->shared->use_file_locking != use_file_locking)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "file locking flag values don't match")
|
||||
|
||||
/* Check if page buffering is enabled */
|
||||
if(H5P_get(a_plist, H5F_ACS_PAGE_BUFFER_SIZE_NAME, &page_buf_size) < 0)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "can't get page buffer size")
|
||||
@ -3695,6 +3704,12 @@ H5F__start_swmr_write(H5F_t *f)
|
||||
|
||||
setup = TRUE;
|
||||
|
||||
/* Place an advisory lock on the file */
|
||||
if(H5F_USE_FILE_LOCKING(f))
|
||||
if(H5FD_lock(f->shared->lf, TRUE) < 0) {
|
||||
HGOTO_ERROR(H5E_FILE, H5E_CANTLOCKFILE, FAIL, "unable to lock the file")
|
||||
}
|
||||
|
||||
/* Mark superblock as dirty */
|
||||
if(H5F_super_dirty(f) < 0)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
|
||||
@ -3712,10 +3727,6 @@ H5F__start_swmr_write(H5F_t *f)
|
||||
if(H5O_refresh_metadata_reopen(obj_ids[u], &obj_glocs[u], vol_connector, TRUE) < 0)
|
||||
HGOTO_ERROR(H5E_ATOM, H5E_CLOSEERROR, FAIL, "can't refresh-close object")
|
||||
|
||||
/* Unlock the file */
|
||||
if(H5FD_unlock(f->shared->lf) < 0)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to unlock the file")
|
||||
|
||||
done:
|
||||
if(ret_value < 0 && setup) {
|
||||
|
||||
@ -3744,6 +3755,11 @@ done:
|
||||
HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush superblock")
|
||||
} /* end if */
|
||||
|
||||
/* Unlock the file */
|
||||
if(H5F_USE_FILE_LOCKING(f))
|
||||
if(H5FD_unlock(f->shared->lf) < 0)
|
||||
HDONE_ERROR(H5E_FILE, H5E_CANTUNLOCKFILE, FAIL, "unable to unlock the file")
|
||||
|
||||
/* Free memory */
|
||||
if(obj_ids)
|
||||
H5MM_xfree(obj_ids);
|
||||
|
@ -306,6 +306,7 @@ struct H5F_shared_t {
|
||||
struct H5G_t *root_grp; /* Open root group */
|
||||
H5FO_t *open_objs; /* Open objects in file */
|
||||
H5UC_t *grp_btree_shared; /* Ref-counted group B-tree node info */
|
||||
hbool_t use_file_locking; /* Whether or not to use file locking */
|
||||
hbool_t closing; /* File is in the process of being closed */
|
||||
|
||||
/* Cached VOL connector ID & info */
|
||||
|
@ -338,6 +338,7 @@ typedef struct H5F_t H5F_t;
|
||||
#define H5F_SET_MIN_DSET_OHDR(F, V) ((F)->shared->crt_dset_min_ohdr_flag = (V))
|
||||
#define H5F_VOL_CLS(F) ((F)->shared->vol_cls)
|
||||
#define H5F_VOL_OBJ(F) ((F)->vol_obj)
|
||||
#define H5F_USE_FILE_LOCKING(F) ((F)->shared->use_file_locking)
|
||||
#else /* H5F_MODULE */
|
||||
#define H5F_LOW_BOUND(F) (H5F_get_low_bound(F))
|
||||
#define H5F_HIGH_BOUND(F) (H5F_get_high_bound(F))
|
||||
@ -400,6 +401,7 @@ typedef struct H5F_t H5F_t;
|
||||
#define H5F_SET_MIN_DSET_OHDR(F, V) (H5F_set_min_dset_ohdr((F), (V)))
|
||||
#define H5F_VOL_CLS(F) (H5F_get_vol_cls(F))
|
||||
#define H5F_VOL_OBJ(F) (H5F_get_vol_obj(F))
|
||||
#define H5F_USE_FILE_LOCKING(F) (H5F_get_use_file_locking(F))
|
||||
#endif /* H5F_MODULE */
|
||||
|
||||
|
||||
@ -765,6 +767,7 @@ H5_DLL hbool_t H5F_get_min_dset_ohdr(const H5F_t *f);
|
||||
H5_DLL herr_t H5F_set_min_dset_ohdr(H5F_t *f, hbool_t minimize);
|
||||
H5_DLL const H5VL_class_t *H5F_get_vol_cls(const H5F_t *f);
|
||||
H5_DLL H5VL_object_t *H5F_get_vol_obj(const H5F_t *f);
|
||||
H5_DLL hbool_t H5F_get_file_locking(const H5F_t *f);
|
||||
|
||||
/* Functions than retrieve values set/cached from the superblock/FCPL */
|
||||
H5_DLL haddr_t H5F_get_base_addr(const H5F_t *f);
|
||||
|
@ -1363,3 +1363,24 @@ done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
} /* end H5F_get_cont_info */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: H5F_get_file_locking
|
||||
*
|
||||
* Purpose: Get the file locking flag for the file
|
||||
*
|
||||
* Return: TRUE/FALSE
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
hbool_t
|
||||
H5F_get_file_locking(const H5F_t *f)
|
||||
{
|
||||
FUNC_ENTER_NOAPI_NOINIT_NOERR
|
||||
|
||||
HDassert(f);
|
||||
HDassert(f->shared);
|
||||
|
||||
FUNC_LEAVE_NOAPI(f->shared->use_file_locking)
|
||||
} /* end H5F_get_file_locking */
|
||||
|
||||
|
@ -2645,6 +2645,10 @@ H5MF_settle_raw_data_fsm(H5F_t *f, hbool_t *fsm_settled)
|
||||
HDassert(f->shared);
|
||||
HDassert(fsm_settled);
|
||||
|
||||
/* Initialize structs */
|
||||
HDmemset(&fsinfo, 0, sizeof(fsinfo));
|
||||
HDmemset(&fs_stat, 0, sizeof(fs_stat));
|
||||
|
||||
/*
|
||||
* Only need to settle things if we are persisting free space and
|
||||
* the private property in f->shared->null_fsm_addr is not enabled.
|
||||
|
@ -11334,6 +11334,9 @@ test_bt2_hdr_fd(const char *env_h5_driver, hid_t fapl)
|
||||
|
||||
TESTING("Version 2 B-tree chunk index header flush dependencies handled correctly");
|
||||
|
||||
/* Initialize struct */
|
||||
HDmemset(&info, 0, sizeof(info));
|
||||
|
||||
/* Skip this test if SWMR I/O is not supported for the VFD specified
|
||||
* by the environment variable.
|
||||
*/
|
||||
|
90
test/swmr.c
90
test/swmr.c
@ -6055,16 +6055,13 @@ error:
|
||||
|
||||
} /* end test_file_lock_swmr_concur() */
|
||||
|
||||
|
||||
|
||||
#endif /* !(defined(H5_HAVE_FORK && defined(H5_HAVE_WAITPID)) */
|
||||
|
||||
/****************************************************************
|
||||
**
|
||||
** test_file_lock_swmr_concur(): low-level file test routine.
|
||||
** With the implementation of file locking, this test checks file
|
||||
** open with different combinations of flags + SWMR flags.
|
||||
** This is for concurrent access.
|
||||
** test_file_locking():
|
||||
** Tests various combinations of file locking flags and
|
||||
** and environment variables.
|
||||
**
|
||||
*****************************************************************/
|
||||
static int
|
||||
@ -6242,6 +6239,79 @@ error:
|
||||
} /* end test_file_locking() */
|
||||
|
||||
|
||||
/****************************************************************
|
||||
**
|
||||
** test_different_lock_flags():
|
||||
** Tests opening a file multiple times with different lock
|
||||
** flags.
|
||||
**
|
||||
*****************************************************************/
|
||||
static int
|
||||
test_different_lock_flags(hid_t in_fapl)
|
||||
{
|
||||
hid_t fid1 = H5I_INVALID_HID; /* File ID */
|
||||
hid_t fid2 = H5I_INVALID_HID; /* File ID */
|
||||
hid_t fid3 = H5I_INVALID_HID; /* File ID */
|
||||
hid_t fapl_id = H5I_INVALID_HID; /* File access property list */
|
||||
char filename[NAME_BUF_SIZE]; /* File name */
|
||||
|
||||
TESTING("Using different lock flags")
|
||||
|
||||
/* Copy the incoming fapl */
|
||||
if((fapl_id = H5Pcopy(in_fapl)) < 0)
|
||||
TEST_ERROR
|
||||
|
||||
/* Set locking in the fapl */
|
||||
if(H5Pset_file_locking(fapl_id, TRUE, TRUE) < 0)
|
||||
TEST_ERROR
|
||||
|
||||
/* Set the filename to use for this test (dependent on fapl) */
|
||||
h5_fixname(FILENAME[1], fapl_id, filename, sizeof(filename));
|
||||
|
||||
/* Create the test file */
|
||||
if((fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0)
|
||||
TEST_ERROR
|
||||
|
||||
/* Open the test file with the same flags (should pass) */
|
||||
if((fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl_id)) < 0)
|
||||
TEST_ERROR
|
||||
|
||||
/* Unset locking in the fapl */
|
||||
if(H5Pset_file_locking(fapl_id, FALSE, FALSE) < 0)
|
||||
TEST_ERROR
|
||||
|
||||
/* Open the test file with different flags (should FAIL) */
|
||||
H5E_BEGIN_TRY {
|
||||
fid3 = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
|
||||
} H5E_END_TRY;
|
||||
if(H5I_INVALID_HID != fid3)
|
||||
FAIL_PUTS_ERROR("Should not have been able to open a file with different locking flags")
|
||||
|
||||
/* Close the files */
|
||||
if(H5Fclose(fid1) < 0)
|
||||
TEST_ERROR
|
||||
if(H5Fclose(fid2) < 0)
|
||||
TEST_ERROR
|
||||
|
||||
/* Close the copied property list */
|
||||
if(H5Pclose(fapl_id) < 0)
|
||||
TEST_ERROR
|
||||
|
||||
PASSED();
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
H5E_BEGIN_TRY {
|
||||
H5Pclose(fapl_id);
|
||||
H5Fclose(fid1);
|
||||
H5Fclose(fid2);
|
||||
H5Fclose(fid3);
|
||||
} H5E_END_TRY;
|
||||
|
||||
return -1;
|
||||
} /* end test_different_lock_flags() */
|
||||
|
||||
static int
|
||||
test_swmr_vfd_flag(void)
|
||||
{
|
||||
@ -7179,8 +7249,12 @@ main(void)
|
||||
if(NULL == driver || !HDstrcmp(driver, "") || !HDstrcmp(driver, "sec2"))
|
||||
nerrors += test_swmr_vfd_flag();
|
||||
|
||||
/* This test changes the HDF5_USE_FILE_LOCKING environment variable
|
||||
* so it should be run last.
|
||||
/* Test multiple opens via different locking flags */
|
||||
if (use_file_locking && file_locking_enabled)
|
||||
nerrors += test_different_lock_flags(fapl);
|
||||
|
||||
/* These tests change the HDF5_USE_FILE_LOCKING environment variable
|
||||
* so they should be run last.
|
||||
*/
|
||||
if (use_file_locking && file_locking_enabled) {
|
||||
nerrors += test_file_locking(fapl, TRUE, TRUE);
|
||||
|
@ -1003,6 +1003,7 @@ test_set_configured_fapl(void)
|
||||
|
||||
hid_t fapl_id = H5I_INVALID_HID;
|
||||
other_fa_t wrong_fa = {0x432, 0xf82, 0x9093};
|
||||
#ifdef H5_HAVE_ROS3_VFD
|
||||
H5FD_ros3_fapl_t ros3_anon_fa = {1, FALSE, "", "", ""};
|
||||
H5FD_ros3_fapl_t ros3_auth_fa = {
|
||||
1, /* fapl version */
|
||||
@ -1011,6 +1012,8 @@ test_set_configured_fapl(void)
|
||||
"12345677890abcdef", /* simulate access key ID */
|
||||
"oiwnerwe9u0234nJw0-aoj+dsf", /* simulate secret key */
|
||||
};
|
||||
#endif /* H5_HAVE_ROS3_VFD */
|
||||
#ifdef H5_HAVE_LIBHDFS
|
||||
H5FD_hdfs_fapl_t hdfs_fa = {
|
||||
1, /* fapl version */
|
||||
"", /* namenode name */
|
||||
@ -1019,6 +1022,7 @@ test_set_configured_fapl(void)
|
||||
"", /* user name */
|
||||
2048, /* stream buffer size */
|
||||
};
|
||||
#endif /* H5_HAVE_LIBHDFS */
|
||||
unsigned n_cases = 7; /* number of common testcases */
|
||||
testcase cases[] = {
|
||||
{ "(common) should fail: no fapl id",
|
||||
|
@ -677,6 +677,26 @@ VERIFY_INVALIDBOUNDS()
|
||||
|
||||
} # end of VERIFY_INVALIDBOUNDS
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Expect h5diff to fail
|
||||
# Use only by VERIFY_EXTERNAL_CONSOLIDATION
|
||||
# -----------------------------------------------------------------------------
|
||||
DIFFFAIL()
|
||||
{
|
||||
VERIFY h5diff unequal $@
|
||||
(
|
||||
cd $TESTDIR
|
||||
$RUNSERIAL $H5DIFF_BIN -q "$@"
|
||||
)
|
||||
RET=$?
|
||||
if [ $RET == 0 ] ; then
|
||||
echo "*FAILED*"
|
||||
nerrors="`expr $nerrors + 1`"
|
||||
else
|
||||
echo " PASSED"
|
||||
fi
|
||||
}
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Catchall test for repacking with external files
|
||||
# Loops over all (internally-listed) cases and applies the given arguments
|
||||
|
@ -1 +1 @@
|
||||
h5mkgrp: Version @HDF5_PACKAGE_VERSION_STRING@
|
||||
h5mkgrp: Version @HDF5_RELEASE_VERSION_STRING@
|
||||
|
Loading…
Reference in New Issue
Block a user