hdf5/test/tsohm.c

3857 lines
149 KiB
C
Raw Normal View History

/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by The HDF Group. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://www.hdfgroup.org/licenses. *
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Purpose: Test Shared Object Header Messages
*/
#include "testhdf5.h"
/*
* This file needs to access private information from the H5F package.
* This file also needs to access the file testing code.
*/
2020-09-30 22:27:10 +08:00
#define H5F_FRIEND /* suppress error about including H5Fpkg */
#define H5F_TESTING
2020-09-30 22:27:10 +08:00
#include "H5Fpkg.h" /* File access */
/* Default SOHM values */
#define DEF_NUM_INDEXES 0
2020-09-30 22:27:10 +08:00
const unsigned def_type_flags[H5O_SHMESG_MAX_NINDEXES] = {0, 0, 0, 0, 0, 0};
const unsigned def_minsizes[H5O_SHMESG_MAX_NINDEXES] = {250, 250, 250, 250, 250, 250};
#define DEF_L2B 50
#define DEF_B2L 40
/* Non-default SOHM values for testing */
#define TEST_NUM_INDEXES 4
2020-09-30 22:27:10 +08:00
const unsigned test_type_flags[H5O_SHMESG_MAX_NINDEXES] = {H5O_SHMESG_FILL_FLAG,
H5O_SHMESG_DTYPE_FLAG | H5O_SHMESG_ATTR_FLAG,
H5O_SHMESG_SDSPACE_FLAG,
H5O_SHMESG_PLINE_FLAG,
0,
0};
const unsigned test_minsizes[H5O_SHMESG_MAX_NINDEXES] = {0, 2, 40, 100, 3, 1000};
#define TEST_L2B 65
#define TEST_B2L 64
2020-09-30 22:27:10 +08:00
#define FILENAME "tsohm.h5"
#define FILENAME_SRC "tsohm_src.h5"
#define FILENAME_DST "tsohm_dst.h5"
#define NAME_BUF_SIZE 512
/* How much overhead counts as "not much" when converting B-trees, etc. */
#define OVERHEAD_ALLOWED 1.15F
2020-09-30 22:27:10 +08:00
#define NUM_DATASETS 10
#define NUM_ATTRIBUTES 100
typedef struct dtype1_struct {
2020-09-30 22:27:10 +08:00
int i1;
char str[10];
int i2;
int i3;
int i4;
int i5;
int i6;
int i7;
int i8;
float f1;
} dtype1_struct;
#define DTYPE2_SIZE 1024
2020-09-30 22:27:10 +08:00
const char *DSETNAME[] = {"dataset0", "dataset1", "dataset2", "dataset3", "dataset4",
"dataset5", "dataset6", "dataset7", "dataset8", "dataset9",
"dataset10", "dataset11", NULL};
const char *EXTRA_DSETNAME[] = {"ex_dataset0", "ex_dataset1", "ex_dataset2",
"ex_dataset3", "ex_dataset4", "ex_dataset5",
"ex_dataset6", "ex_dataset7", "ex_dataset8",
"ex_dataset9", "ex_dataset10", "ex_dataset11",
"ex_dataset12", "ex_dataset13", "ex_dataset14",
"ex_dataset15", "ex_dataset16", "ex_dataset17",
"ex_dataset18", "ex_dataset19", NULL};
#define SOHM_HELPER_NUM_EX_DSETS 20
typedef struct complex_t {
2020-09-30 22:27:10 +08:00
double re;
double im;
} complex_t;
#define ENUM_NUM_MEMBS 20
2020-09-30 22:27:10 +08:00
const char *ENUM_NAME[] = {"enum_member0", "enum_member1", "enum_member2",
"enum_member3", "enum_member4", "enum_member5",
"enum_member6", "enum_member7", "enum_member8",
"enum_member9", "enum_member10", "enum_member11",
"enum_member12", "enum_member13", "enum_member14",
"enum_member15", "enum_member16", "enum_member17",
"enum_member18", "enum_member19", NULL};
const int ENUM_VAL[] = {0, 13, -500, 63, 64, -64, 65, 2048, 1, 2, -1,
7, 130, -5000, 630, 640, -640, 650, 20480, 10, -1001, -10};
#define SIZE2_RANK1 6
#define SIZE2_RANK2 10
2020-09-30 22:27:10 +08:00
#define SIZE2_DIMS \
{ \
1, 2, 3, 4, 5, 6, 7, 8, 9, 10 \
}
2020-09-30 22:27:10 +08:00
#define LONG_STRING \
"00 index. A long string used for testing. To create new strings, set the first two characters to be " \
"some ASCII number other than 00, such as 01."
/* Struct returned from size2_helper function */
typedef struct size2_helper_struct {
h5_stat_size_t empty_size;
h5_stat_size_t first_dset;
h5_stat_size_t second_dset;
h5_stat_size_t dsets1;
h5_stat_size_t dsets2;
h5_stat_size_t interleaved;
h5_stat_size_t attrs1;
h5_stat_size_t attrs2;
} size2_helper_struct;
/* Number of distinct messages for the sohm_delete test */
2020-09-30 22:27:10 +08:00
#define DELETE_NUM_MESGS 7
#define HALF_DELETE_NUM_MESGS 3
2020-09-30 22:27:10 +08:00
#define DELETE_DIMS \
{ \
1, 1, 1, 1, 1, 1, 1 \
}
#define DELETE_MIN_MESG_SIZE 10
#define DELETE_MAX_MESG_SIZE 60
/* Number of dimensions in extend_dset test */
#define EXTEND_NDIMS 2
/* Dimensions for external_dtype test */
2020-09-30 22:27:10 +08:00
#define NX 10
#define NY 10
/* Helper function prototypes */
static hid_t make_dtype_1(void);
static hid_t make_dtype_2(void);
2020-09-30 22:27:10 +08:00
static hid_t close_reopen_file(hid_t file, const char *filename, hid_t fapl_id);
static void test_sohm_attrs(void);
#ifdef NOT_NOW
static void size2_dump_struct(const char *name, size2_helper_struct *sizes);
#endif /* NOT_NOW */
static void size2_verify(void);
static void test_sohm_delete(void);
static void test_sohm_delete_revert(void);
static void test_sohm_extlink(void);
/****************************************************************
**
** verify_fcpl_values(): Verifies that FCPL is set as expected.
**
****************************************************************/
static void
2020-09-30 22:27:10 +08:00
verify_fcpl_values(hid_t fcpl_id, const unsigned nindexes_expected, const unsigned *flags_expected,
const unsigned *minsizes_expected, unsigned l2b, unsigned b2l)
{
2020-09-30 22:27:10 +08:00
unsigned nindexes_actual;
unsigned list_size;
unsigned btree_size;
unsigned x;
herr_t ret;
/* Number of indexes */
ret = H5Pget_shared_mesg_nindexes(fcpl_id, &nindexes_actual);
CHECK_I(ret, "H5Pget_shared_mesg_nindexes");
VERIFY(nindexes_actual, nindexes_expected, "H5Pget_shared_mesg_nindexes");
/* Index flags and minsizes */
2020-09-30 22:27:10 +08:00
for (x = 0; x < nindexes_actual; ++x) {
unsigned flags_i;
unsigned min_mesg_size;
ret = H5Pget_shared_mesg_index(fcpl_id, x, &flags_i, &min_mesg_size);
CHECK_I(ret, "H5Pget_shared_mesg_index");
VERIFY(flags_i, flags_expected[x], "H5Pget_shared_mesg_index");
VERIFY(min_mesg_size, minsizes_expected[x], "H5Pget_shared_mesg_index");
}
/* List-to-btree and btree-to-list values */
ret = H5Pget_shared_mesg_phase_change(fcpl_id, &list_size, &btree_size);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
VERIFY(list_size, l2b, "H5Pset_shared_mesg_phase_change");
VERIFY(btree_size, b2l, "H5Pset_shared_mesg_phase_change");
} /* verify_fcpl_values */
/****************************************************************
**
** test_sohm_fcpl(): Test File Creation Property Lists.
**
****************************************************************/
static void
test_sohm_fcpl(void)
{
2020-09-30 22:27:10 +08:00
hid_t fid = -1;
hid_t fcpl_id = -1;
hid_t fcpl2_id = -1;
unsigned x;
herr_t ret;
MESSAGE(5, ("Testing File Creation Properties for Shared Messages\n"));
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
verify_fcpl_values(fcpl_id, DEF_NUM_INDEXES, def_type_flags, def_minsizes, DEF_L2B, DEF_B2L);
/* Create a file with this fcpl and make sure that all the values can be
* retrieved.
*/
fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(fid, "H5Fcreate");
fcpl2_id = H5Fget_create_plist(fid);
CHECK_I(fcpl2_id, "H5Fcreate");
verify_fcpl_values(fcpl2_id, DEF_NUM_INDEXES, def_type_flags, def_minsizes, DEF_L2B, DEF_B2L);
ret = H5Pclose(fcpl2_id);
CHECK_I(ret, "H5Pclose");
/* Close and re-open the file. Make sure that fcpl values are still
* correct.
*/
ret = H5Fclose(fid);
CHECK_I(ret, "H5Fclose");
fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK_I(fid, "H5Fopen");
fcpl2_id = H5Fget_create_plist(fid);
CHECK_I(ret, "H5Fcreate");
verify_fcpl_values(fcpl2_id, DEF_NUM_INDEXES, def_type_flags, def_minsizes, DEF_L2B, DEF_B2L);
/* Clean up */
ret = H5Pclose(fcpl2_id);
CHECK_I(ret, "H5Pclose");
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
ret = H5Fclose(fid);
CHECK_I(ret, "H5Fclose");
/* Start over with a non-default fcpl */
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
/* Set up index values */
ret = H5Pset_shared_mesg_nindexes(fcpl_id, TEST_NUM_INDEXES);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
2020-09-30 22:27:10 +08:00
for (x = 0; x < TEST_NUM_INDEXES; ++x) {
ret = H5Pset_shared_mesg_index(fcpl_id, x, test_type_flags[x], test_minsizes[x]);
CHECK_I(ret, "H5Pset_shared_mesg_index");
} /* end for */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, TEST_L2B, TEST_B2L);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
verify_fcpl_values(fcpl_id, TEST_NUM_INDEXES, test_type_flags, test_minsizes, TEST_L2B, TEST_B2L);
/* Use the fcpl to create a file and get it back again */
fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(fid, "H5Fcreate");
fcpl2_id = H5Fget_create_plist(fid);
CHECK_I(fcpl2_id, "H5Fcreate");
verify_fcpl_values(fcpl2_id, TEST_NUM_INDEXES, test_type_flags, test_minsizes, TEST_L2B, TEST_B2L);
ret = H5Pclose(fcpl2_id);
CHECK_I(ret, "H5Pclose");
/* Close and re-open the file. Make sure that fcpl values are still
* correct.
*/
ret = H5Fclose(fid);
CHECK_I(ret, "H5Fclose");
fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK_I(fid, "H5Fopen");
fcpl2_id = H5Fget_create_plist(fid);
CHECK_I(ret, "H5Fcreate");
verify_fcpl_values(fcpl2_id, TEST_NUM_INDEXES, test_type_flags, test_minsizes, TEST_L2B, TEST_B2L);
/* Clean up */
ret = H5Pclose(fcpl2_id);
CHECK_I(ret, "H5Pclose");
ret = H5Fclose(fid);
CHECK_I(ret, "H5Fclose");
/* Actually, the list max can be exactly 1 greater than the
* btree min, but no more.
* Reset the second index.
*/
ret = H5Pset_shared_mesg_index(fcpl_id, 1, test_type_flags[1], 15);
CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 10, 11);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(fid, "H5Fcreate");
ret = H5Fclose(fid);
CHECK_I(ret, "H5Fclose");
/* Test edge cases:
* H5O_SHMESG_MAX_NINDEXES and H5O_SHMESG_MAX_LIST_SIZE should be valid
* values.
* Creating a file with uninitialized indexes should work. (TODO: not implemented?)
*/
ret = H5Pset_shared_mesg_nindexes(fcpl_id, H5O_SHMESG_MAX_NINDEXES);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_phase_change(fcpl_id, H5O_SHMESG_MAX_LIST_SIZE, H5O_SHMESG_MAX_LIST_SIZE);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(fid, "H5Fcreate");
/* Clean up */
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
ret = H5Fclose(fid);
CHECK_I(ret, "H5Fclose");
} /* test_sohm_fcpl */
/****************************************************************
**
** test_sohm_fcpl_errors(): Test bogus FCPL settings for SOHMs
**
****************************************************************/
static void
test_sohm_fcpl_errors(void)
{
hid_t fcpl_id = -1;
hid_t fid = -1;
unsigned x;
herr_t ret;
MESSAGE(5, ("Testing bogus file creation properties for shared messages\n"));
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
/* Set up index values */
ret = H5Pset_shared_mesg_nindexes(fcpl_id, TEST_NUM_INDEXES);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
2020-09-30 22:27:10 +08:00
for (x = 0; x < TEST_NUM_INDEXES; ++x) {
ret = H5Pset_shared_mesg_index(fcpl_id, x, test_type_flags[x], test_minsizes[x]);
CHECK_I(ret, "H5Pset_shared_mesg_index");
}
ret = H5Pset_shared_mesg_phase_change(fcpl_id, TEST_L2B, TEST_B2L);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
verify_fcpl_values(fcpl_id, TEST_NUM_INDEXES, test_type_flags, test_minsizes, TEST_L2B, TEST_B2L);
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
/* Trying to create too many indexes should fail */
ret = H5Pset_shared_mesg_nindexes(fcpl_id, H5O_SHMESG_MAX_NINDEXES + 1);
VERIFY(ret, -1, "H5Pset_shared_mesg_nindexes");
/* Trying to set index to an index higher than the current number
* of indexes should fail.
*/
ret = H5Pset_shared_mesg_index(fcpl_id, H5O_SHMESG_MAX_NINDEXES, 0, 15);
VERIFY(ret, -1, "H5Pset_shared_mesg_index");
ret = H5Pset_shared_mesg_index(fcpl_id, TEST_NUM_INDEXES, 0, 15);
VERIFY(ret, -1, "H5Pset_shared_mesg_index");
/* Setting an unknown flag (all flags + 1) should fail */
ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_SHMESG_ALL_FLAG + 1, 15);
VERIFY(ret, -1, "H5Pset_shared_mesg_index");
/* Try setting two different indexes to hold fill messages. They
* should hold even very small messages for testing, even though we
* wouldn't really want to share such tiny messages in the real world.
*/
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_FILL_FLAG, 15);
CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_SHMESG_FILL_FLAG, 15);
CHECK_I(ret, "H5Pset_shared_mesg_index");
fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
VERIFY(fid, -1, "H5Fcreate");
ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_SHMESG_DTYPE_FLAG | H5O_SHMESG_FILL_FLAG, 15);
CHECK_I(ret, "H5Pset_shared_mesg_index");
fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
VERIFY(fid, -1, "H5Fcreate");
/* Test list/btree cutoffs. We can set these to any positive value,
* but if the list max is less than the btree min we'll get an error
* when the file is created.
*/
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 10, 12);
VERIFY(ret, -1, "H5Pset_shared_mesg_phase_change");
/* Setting them to extremely large values should also fail */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, H5O_SHMESG_MAX_LIST_SIZE + 1, 0);
VERIFY(ret, -1, "H5Pset_shared_mesg_phase_change");
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 10, H5O_SHMESG_MAX_LIST_SIZE + 10);
VERIFY(ret, -1, "H5Pset_shared_mesg_phase_change");
2020-09-30 22:27:10 +08:00
ret =
H5Pset_shared_mesg_phase_change(fcpl_id, H5O_SHMESG_MAX_LIST_SIZE, H5O_SHMESG_MAX_LIST_SIZE + 1);
VERIFY(ret, -1, "H5Pset_shared_mesg_phase_change");
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
} /* test_sohm_fcpl_errors */
/*-------------------------------------------------------------------------
* Function: make_dtype_1
*
* Purpose: Creates a complicated datatype for use in testing
* shared object header messages. The important thing is that
* the datatypes must take a lot of space to store on disk.
*
* Return: Success: datatype ID (should be closed by calling function)
* Failure: negative
*
* Programmer: James Laird
* Saturday, August 26, 2006
*
*-------------------------------------------------------------------------
*/
static hid_t
make_dtype_1(void)
{
hid_t dtype1_id = -1;
2020-09-30 22:27:10 +08:00
hid_t str_id = -1;
/* Create compound datatype. */
2020-09-30 22:27:10 +08:00
if ((dtype1_id = H5Tcreate(H5T_COMPOUND, sizeof(struct dtype1_struct))) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tinsert(dtype1_id, "i1", HOFFSET(dtype1_struct, i1), H5T_NATIVE_INT) < 0)
TEST_ERROR;
str_id = H5Tcopy(H5T_C_S1);
2020-09-30 22:27:10 +08:00
if (H5Tset_size(str_id, (size_t)10) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tinsert(dtype1_id, "string", HOFFSET(dtype1_struct, str), str_id) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tinsert(dtype1_id, "i2", HOFFSET(dtype1_struct, i2), H5T_NATIVE_INT) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tinsert(dtype1_id, "i3", HOFFSET(dtype1_struct, i3), H5T_NATIVE_INT) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tinsert(dtype1_id, "i4", HOFFSET(dtype1_struct, i4), H5T_NATIVE_INT) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tinsert(dtype1_id, "i5", HOFFSET(dtype1_struct, i5), H5T_NATIVE_INT) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tinsert(dtype1_id, "i6", HOFFSET(dtype1_struct, i6), H5T_NATIVE_INT) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tinsert(dtype1_id, "i7", HOFFSET(dtype1_struct, i7), H5T_NATIVE_INT) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tinsert(dtype1_id, "i8", HOFFSET(dtype1_struct, i8), H5T_NATIVE_INT) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tinsert(dtype1_id, "f1", HOFFSET(dtype1_struct, f1), H5T_NATIVE_FLOAT) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tclose(str_id) < 0)
TEST_ERROR;
return dtype1_id;
error:
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
H5Tclose(str_id);
H5Tclose(dtype1_id);
}
H5E_END_TRY
return -1;
} /* make_dtype1 */
/*-------------------------------------------------------------------------
* Function: make_dtype_2
*
* Purpose: Creates complicated datatypes for use in testing
* shared object header messages. The important thing is that
* the datatypes must take a lot of space to store on disk.
*
* Return: Success: datatype ID (should be closed by calling function)
* Failure: negative
*
* Programmer: James Laird
* Saturday, August 26, 2006
*
*-------------------------------------------------------------------------
*/
static hid_t
make_dtype_2(void)
{
2020-09-30 22:27:10 +08:00
hid_t dtype2_id = -1;
hid_t enum_id = -1;
hid_t int_id = -1;
int x;
hsize_t dims[] = {2, 1, 2, 4};
2020-09-30 22:27:10 +08:00
size_t size;
/* Create an int with a strange precision */
2020-09-30 22:27:10 +08:00
if ((int_id = H5Tcopy(H5T_NATIVE_INT)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tset_precision(int_id, (size_t)24) < 0)
TEST_ERROR;
/* Create an enumeration using that int */
2020-09-30 22:27:10 +08:00
if ((enum_id = H5Tenum_create(int_id)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
for (x = 0; x < ENUM_NUM_MEMBS; x++)
if (H5Tenum_insert(enum_id, ENUM_NAME[x], &ENUM_VAL[x]) < 0)
TEST_ERROR;
/* Create arrays of arrays of arrays of enums */
2020-09-30 22:27:10 +08:00
if ((dtype2_id = H5Tarray_create2(enum_id, 3, dims)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if ((dtype2_id = H5Tarray_create2(dtype2_id, 4, dims)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if ((dtype2_id = H5Tarray_create2(dtype2_id, 2, dims)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if ((dtype2_id = H5Tarray_create2(dtype2_id, 1, dims)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tclose(enum_id) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tclose(int_id) < 0)
TEST_ERROR;
/* Check the datatype size. If this is different than the #defined
* size then the fills values will have the wrong size.
*/
size = H5Tget_size(dtype2_id);
2020-09-30 22:27:10 +08:00
if (size != DTYPE2_SIZE)
TEST_ERROR;
return dtype2_id;
error:
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
H5Tclose(dtype2_id);
H5Tclose(enum_id);
H5Tclose(int_id);
}
H5E_END_TRY
return -1;
} /* make_dtype2 */
/*-------------------------------------------------------------------------
* Function: close_reopen_file
*
* Purpose: Closes a file and then reopens it. Used to ensure that
* SOHMs are written to and read from disk
*
* Return: Success: new hid_t for the file
* Failure: Negative
*
* Programmer: James Laird
* Wednesday, October 4, 2006
*
*-------------------------------------------------------------------------
*/
static hid_t
2020-09-30 22:27:10 +08:00
close_reopen_file(hid_t file, const char *filename, hid_t fapl_id)
{
if (H5Fclose(file) < 0)
FAIL_STACK_ERROR;
file = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
if (file < 0)
FAIL_STACK_ERROR;
2020-09-30 22:27:10 +08:00
return (file);
error:
return -1;
} /* close_reopen_file */
/*-------------------------------------------------------------------------
* Function: size1_helper
*
* Purpose: Creates object headers that use a large datatype message.
*
* Set test_file_closing to TRUE to add file closing and reopening
* whenever possible (to test that SOHMs are written correctly
* on disk and not just in memory).
*
* Return: Success: file ID (may not be the same one passed in)
* Failure: H5I_INVALID_HID
*
* Programmer: James Laird
* Monday, April 10, 2006
*
*-------------------------------------------------------------------------
*/
static hid_t
size1_helper(hid_t file, const char *filename, hid_t fapl_id, hbool_t test_file_closing)
{
dtype1_struct wdata;
dtype1_struct rdata;
2020-09-30 22:27:10 +08:00
hid_t dtype1_id = H5I_INVALID_HID;
hid_t space_id = H5I_INVALID_HID;
hid_t dset_id = H5I_INVALID_HID;
hsize_t dim1[1];
int x;
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* Macro: TSOHM_S1H_VERIFY_DATA
*
* Purpose: Encapsulate a common pattern:
* Reads the dataset and verifies that [a subset of] the data
* are as expected.
*
* Programmer: Jacob Smith
* 2018 November 1
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*/
2020-09-30 22:27:10 +08:00
#define TSOHM_S1H_VERIFY_DATA(dset_id, dtype_id) \
{ \
HDmemset(&rdata, 0, sizeof(rdata)); \
if (0 > H5Dread((dset_id), (dtype_id), H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata)) { \
H5_FAILED(); \
AT(); \
HDprintf("Can't read data\n"); \
goto error; \
} \
if ((rdata.i1 != wdata.i1) || (rdata.i2 != wdata.i2) || HDstrcmp(rdata.str, wdata.str) != 0) { \
2020-09-30 22:27:10 +08:00
H5_FAILED(); \
AT(); \
HDprintf("incorrect read data\n"); \
goto error; \
} \
} /* TSOHM_S1H_VERIFY_DATA() definition */
/* Closing and re-opening the file takes a long time on systems without
* local disks. Don't close and reopen if express testing is enabled.
*/
2020-09-30 22:27:10 +08:00
if (GetTestExpress() > 1)
test_file_closing = FALSE;
/* Initialize wdata */
HDmemset(&wdata, 0, sizeof(wdata));
wdata.i1 = 11;
HDstrcpy(wdata.str, "string");
wdata.i2 = 22;
wdata.i3 = 33;
wdata.i4 = 44;
wdata.i5 = 55;
wdata.i6 = 66;
wdata.i7 = 77;
wdata.i8 = 88;
wdata.f1 = 0.0F;
/* Initialize rdata */
HDmemset(&rdata, 0, sizeof(rdata));
dtype1_id = make_dtype_1();
2020-09-30 22:27:10 +08:00
if (dtype1_id < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
dim1[0] = 1;
space_id = H5Screate_simple(1, dim1, NULL);
2020-09-30 22:27:10 +08:00
if (space_id < 0)
TEST_ERROR;
dset_id = H5Dcreate2(file, DSETNAME[0], dtype1_id, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
2020-09-30 22:27:10 +08:00
if (dset_id < 0)
FAIL_STACK_ERROR;
/* Test writing and reading */
2020-09-30 22:27:10 +08:00
if (H5Dwrite(dset_id, dtype1_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata) < 0)
FAIL_STACK_ERROR;
TSOHM_S1H_VERIFY_DATA(dset_id, dtype1_id)
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset_id) < 0)
FAIL_STACK_ERROR;
2020-09-30 22:27:10 +08:00
if (test_file_closing)
if ((file = close_reopen_file(file, filename, fapl_id)) < 0)
TEST_ERROR;
/* Create 3 more datasets with the same datatype/dataspace */
for (x = 1; x < 4; x++) {
dset_id = H5Dcreate2(file, DSETNAME[x], dtype1_id, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
2020-09-30 22:27:10 +08:00
if (0 > dset_id)
FAIL_STACK_ERROR;
if (x == 3)
2020-09-30 22:27:10 +08:00
if (H5Dwrite(dset_id, dtype1_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset_id) < 0)
FAIL_STACK_ERROR;
if (test_file_closing)
2020-09-30 22:27:10 +08:00
if ((file = close_reopen_file(file, filename, fapl_id)) < 0)
TEST_ERROR;
}
2020-09-30 22:27:10 +08:00
if (H5Tclose(dtype1_id) < 0)
TEST_ERROR;
/* Make sure the data has been written successfully */
dset_id = H5Dopen2(file, DSETNAME[0], H5P_DEFAULT);
2020-09-30 22:27:10 +08:00
if (dset_id < 0)
TEST_ERROR;
dtype1_id = H5Dget_type(dset_id);
2020-09-30 22:27:10 +08:00
if (dtype1_id < 0)
TEST_ERROR;
TSOHM_S1H_VERIFY_DATA(dset_id, dtype1_id)
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset_id) < 0)
TEST_ERROR;
/* Create several copies of the dataset
* this increases the amount of space saved by sharing the datatype message
*/
2020-09-30 22:27:10 +08:00
for (x = 0; x < SOHM_HELPER_NUM_EX_DSETS; x++) {
dset_id =
H5Dcreate2(file, EXTRA_DSETNAME[x], dtype1_id, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
if (dset_id < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset_id) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (test_file_closing)
if ((file = close_reopen_file(file, filename, fapl_id)) < 0)
TEST_ERROR;
}
2020-09-30 22:27:10 +08:00
if (H5Tclose(dtype1_id) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Sclose(space_id) < 0)
TEST_ERROR;
/* Ensure that we can still read data back from dataset 3 */
dset_id = H5Dopen2(file, DSETNAME[3], H5P_DEFAULT);
2020-09-30 22:27:10 +08:00
if (dset_id < 0)
TEST_ERROR;
dtype1_id = H5Dget_type(dset_id);
2020-09-30 22:27:10 +08:00
if (dtype1_id < 0)
TEST_ERROR;
TSOHM_S1H_VERIFY_DATA(dset_id, dtype1_id)
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset_id) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Tclose(dtype1_id) < 0)
TEST_ERROR;
return file;
2020-09-30 22:27:10 +08:00
error:
H5E_BEGIN_TRY
{
H5Sclose(space_id);
H5Tclose(dtype1_id);
H5Dclose(dset_id);
H5Fclose(file);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
return H5I_INVALID_HID;
#undef TSOHM_S1H_VERIFY_DATA /* macro is exclusive to this function */
} /* size1_helper */
/*----------------------------------------------------------------------------
* Function: getsize_testsize1
*
* Purpose: Creates a test file, populates it, and returns its file size.
* Object header information from the "first" dataset in the file
* is stored in pointer `oinfo`.
*
* Programmer: Jacob Smith
* 2018 November 1
*----------------------------------------------------------------------------
*/
static h5_stat_size_t
2020-09-30 22:27:10 +08:00
getsize_testsize1(const char *filename, hid_t fcpl_id, hid_t fapl_id, hbool_t test_file_closing,
H5O_native_info_t *ninfo)
{
2020-09-30 22:27:10 +08:00
hid_t fid = H5I_INVALID_HID;
herr_t ret;
fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, fapl_id);
CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
/* If test_file_closing is TRUE, you will get back a different ID,
* which will need to be closed. The helper will close your passed-in
* ID.
*/
fid = size1_helper(fid, filename, fapl_id, test_file_closing);
CHECK(fid, H5I_INVALID_HID, "size1_helper");
ret = H5Oget_native_info_by_name(fid, DSETNAME[0], ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
CHECK(ret, FAIL, "H5Oget_native_info_by_name");
ret = H5Fclose(fid);
CHECK(ret, FAIL, "H5Fclose");
return h5_get_file_size(filename, fapl_id);
} /* getsize_testsize1() */
/*-------------------------------------------------------------------------
* Function: test_sohm_size1
*
* Purpose: Tests shared object header messages with a large datatype
*
* Programmer: James Laird
* Monday, April 10, 2006
*
*-------------------------------------------------------------------------
*/
static void
test_sohm_size1(void)
{
2020-09-30 22:27:10 +08:00
hid_t file = -1;
hid_t fcpl_id = -1;
hid_t fapl_id = -1;
unsigned use_shared = 0;
2020-09-30 22:27:10 +08:00
unsigned use_btree = 0;
h5_stat_size_t file_sizes[9];
unsigned size_index = 0;
hsize_t oh_sizes[3];
unsigned oh_size_index = 0;
#if 0 /* TBD: lying comment or bug. See Jira HDFFV-10646 */
hsize_t norm_oh_size;
#endif /* Jira HDFFV-10646 */
hsize_t sohm_oh_size;
hsize_t sohm_btree_oh_size;
h5_stat_size_t norm_empty_filesize;
h5_stat_size_t sohm_empty_filesize;
h5_stat_size_t sohm_btree_empty_filesize;
h5_stat_size_t norm_final_filesize;
h5_stat_size_t sohm_final_filesize;
h5_stat_size_t sohm_btree_final_filesize;
h5_stat_size_t norm_final_filesize2;
h5_stat_size_t sohm_final_filesize2;
h5_stat_size_t sohm_btree_final_filesize2;
2020-09-30 22:27:10 +08:00
H5O_native_info_t ninfo;
unsigned num_indexes = 1;
unsigned index_flags = H5O_SHMESG_DTYPE_FLAG;
unsigned min_mesg_size = 50;
unsigned list_max = 11;
unsigned btree_min = 10;
herr_t ret;
MESSAGE(5, ("Testing that shared datatypes save space\n"));
/* Create a FAPL with "semi" close degree, to detect dangling IDs */
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
CHECK_I(fapl_id, "H5Pcreate");
ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI);
CHECK_I(ret, "H5Pset_fclose_degree");
/* ----------------------------------------
* Run operations, accumulating file sizes to compare later.
*/
for (use_shared = 0; use_shared < 2; use_shared++) {
for (use_btree = 0; use_btree < 2; use_btree++) {
hbool_t test_open_close;
/* cannot use btree indexing without shared messages; skip case */
if (use_btree && !use_shared)
continue;
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
if (use_shared) {
/* Tests one index holding only datatype messages */
ret = H5Pset_shared_mesg_nindexes(fcpl_id, num_indexes);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, index_flags, min_mesg_size);
CHECK_I(ret, "H5Pset_shared_mesg_index");
if (use_btree) {
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
2020-09-30 22:27:10 +08:00
}
else {
ret = H5Pset_shared_mesg_phase_change(fcpl_id, list_max, btree_min);
}
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
2020-09-30 22:27:10 +08:00
}
else {
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 0);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
}
file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, fapl_id);
CHECK_I(file, "H5Fcreate");
ret = H5Fclose(file);
CHECK_I(ret, "H5Fclose");
/* size of empty file */
file_sizes[size_index++] = h5_get_file_size(FILENAME, fapl_id);
/* size of populated file, with different populating behaviors */
2020-09-30 22:27:10 +08:00
test_open_close = TRUE;
file_sizes[size_index++] = getsize_testsize1(FILENAME, fcpl_id, fapl_id, test_open_close, &ninfo);
2020-09-30 22:27:10 +08:00
test_open_close = FALSE;
file_sizes[size_index++] = getsize_testsize1(FILENAME, fcpl_id, fapl_id, test_open_close, &ninfo);
oh_sizes[oh_size_index++] = ninfo.hdr.space.total;
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
} /* for btree/listed messages */
2020-09-30 22:27:10 +08:00
} /* for normal/shared messages */
ret = H5Pclose(fapl_id);
CHECK_I(ret, "H5Pclose");
/* sanity-check state of arrays */
VERIFY(9, size_index, "size_index");
VERIFY(3, oh_size_index, "oh_size_index");
/* ----------------------------------------
* Check that all sizes make sense.
*/
/* Put result sizes into human-readable symbolic names.
* Order dependent on loop execution above.
*/
2020-09-30 22:27:10 +08:00
norm_empty_filesize = file_sizes[0];
norm_final_filesize = file_sizes[1];
norm_final_filesize2 = file_sizes[2];
#if 0 /* TBD: lying comment or bug. See Jira HDFFV-10646 */
norm_oh_size = oh_sizes[0];
#endif /* Jira HDFFV-10646 */
2020-09-30 22:27:10 +08:00
sohm_empty_filesize = file_sizes[3];
sohm_final_filesize = file_sizes[4];
sohm_final_filesize2 = file_sizes[5];
2020-09-30 22:27:10 +08:00
sohm_oh_size = oh_sizes[1];
2020-09-30 22:27:10 +08:00
sohm_btree_empty_filesize = file_sizes[6];
sohm_btree_final_filesize = file_sizes[7];
sohm_btree_final_filesize2 = file_sizes[8];
2020-09-30 22:27:10 +08:00
sohm_btree_oh_size = oh_sizes[2];
/* How the SOHM messages are stored shouldn't affect the
* size of the object header.
*/
VERIFY(sohm_btree_oh_size, sohm_oh_size, "H5Oget_info_by_name");
#if 0 /* TBD: lying comment or bug. See Jira HDFFV-10646 */
/* Object headers in SOHM files should be smaller than normal object
* headers.
*/
if (sohm_oh_size >= norm_oh_size)
VERIFY(norm_oh_size, 1, "H5Oget_info_by_name");
#endif /* Jira HDFFV-10646 */
/* Both sohm files should be bigger than a normal file when empty.
* It's hard to say whether a B-tree with no nodes allocated should be
* smaller than a list with SOHM_HELPER_NUM_DTYPES elements.
* The sizes here shouldn't really be 1; it's just used to ensure that the
* error code triggers.
*/
2020-09-30 22:27:10 +08:00
if (sohm_empty_filesize <= norm_empty_filesize)
VERIFY(sohm_empty_filesize, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (sohm_btree_empty_filesize <= norm_empty_filesize)
VERIFY(sohm_btree_empty_filesize, 1, "h5_get_file_size");
/* When full, the sohm btree file should be smaller than the normal file.
* The sohm list file should be at least as small, since it doesn't need
* the overhead of a B-tree.
*/
2020-09-30 22:27:10 +08:00
if (sohm_btree_final_filesize >= norm_final_filesize)
VERIFY(sohm_btree_final_filesize, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (sohm_final_filesize > sohm_btree_final_filesize)
VERIFY(sohm_final_filesize, 1, "h5_get_file_size");
/* Comparative sizes shouldn't change even if we open and close the file
*/
2020-09-30 22:27:10 +08:00
if (sohm_btree_final_filesize2 >= norm_final_filesize2)
VERIFY(sohm_btree_final_filesize2, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (sohm_final_filesize2 > sohm_btree_final_filesize2)
VERIFY(sohm_final_filesize2, 1, "h5_get_file_size");
} /* test_sohm_size1 */
/*---------------------------------------------------------------------------
* Function: test_sohm_size_consistency_open_create
*
* Purpose: Tests that header size is different depending on file open
* procedure?
* Uses "size1_helper" for file setup directed to a specific
* file handle.
*
* Programmer: Jacob Smith
* 2018 November 1
*
*---------------------------------------------------------------------------
*/
#if 0 /* TODO: REVEALS BUG TO BE FIXED - SEE JIRA HDFFV-10645 */
static void
test_sohm_size_consistency_open_create(void)
{
hid_t file = -1;
hid_t fcpl_id = -1;
hid_t fapl_id = -1;
unsigned use_btree;
hsize_t oh_size_open;
hsize_t oh_size_create;
H5O_native_info_t oinfo;
unsigned num_indexes = 1;
unsigned index_flags = H5O_SHMESG_DTYPE_FLAG;
unsigned min_mesg_size = 50;
unsigned list_max = 11;
unsigned btree_min = 10;
herr_t ret;
MESSAGE(5, \
("Testing that header size is consistent between H5Fopen and H5Fcreate\n"));
/* Create a FAPL with "semi" close degree, to detect dangling IDs */
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
CHECK_I(fapl_id, "H5Pcreate");
ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI);
CHECK_I(ret, "H5Pset_fclose_degree");
for (use_btree = 0; use_btree < 2; use_btree++) {
/* Create FCPL with SOHMs enabled
*/
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, num_indexes);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, index_flags, min_mesg_size);
CHECK_I(ret, "H5Pset_shared_mesg_index");
if (use_btree) {
MESSAGE(5, ("----testing with btree index----\n"));
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
} else {
MESSAGE(5, ("----testing with normal index----\n"));
ret = H5Pset_shared_mesg_phase_change(fcpl_id, list_max, btree_min);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
}
/* Create empty file */
file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, fapl_id);
CHECK_I(file, "H5Fcreate");
ret = H5Fclose(file);
CHECK_I(ret, "H5Fclose");
/* Test Open/Write
* Add messages to previously-created file */
file = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl_id);
CHECK_I(file, "H5Fopen");
file = size1_helper(file, FILENAME, fapl_id, FALSE);
CHECK_I(file, "size1_helper");
/* Get the size of a dataset object header */
ret = H5Oget_native_info_by_name(file, DSETNAME[0], &oinfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
CHECK_I(ret, "H5Oget_native_info_by_name");
oh_size_open = oinfo.hdr.space.total;
ret = H5Fclose(file);
CHECK_I(ret, "H5Fclose");
/* Test Create/Write
* Add messages to a newly-created file */
file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, fapl_id);
CHECK_I(file, "H5Fcreate");
file = size1_helper(file, FILENAME, fapl_id, FALSE);
CHECK_I(file, "size1_helper");
/* Get the size of a dataset object header */
ret = H5Oget_native_info_by_name(file, DSETNAME[0], &oinfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
CHECK_I(ret, "H5Oget_native_info_by_name");
oh_size_create = oinfo.hdr.space.total;
ret = H5Fclose(file);
CHECK_I(ret, "H5Fclose");
VERIFY(oh_size_create, oh_size_open, "H5Oget_info_by_name2");
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
} /* for normal/btree indexing */
ret = H5Pclose(fapl_id);
CHECK_I(ret, "H5Pclose");
} /* test_sohm_size_consistency_open_create */
#endif /* Jira HDFFV-10645 */
/*-------------------------------------------------------------------------
* Function: sohm_attr_helper
*
* Purpose: Given an fcpl, tests creating attributes with and without
* committed datatypes.
* Verify that an attribute can be written and read back.
* Tests attribute on a Group.
* Tests committed and non-committed datatypes.
* Tests attribute access through `H5Aopen()`.
*
* Programmer: James Laird
* Thursday, November 30, 2006
*
*-------------------------------------------------------------------------
*/
static void
sohm_attr_helper(hid_t fcpl_id)
{
2020-09-30 22:27:10 +08:00
hid_t file_id;
hid_t space_id;
hsize_t dims = 2;
int wdata[2] = {7, 42};
int rdata[2];
herr_t ret;
size_t x;
unsigned op_index = 0;
#define TSOHM_SAH_OP_COUNT 3
const char *groupnames[TSOHM_SAH_OP_COUNT] = {
"group_for_nothing_special",
"group_for_commited_dtype",
"group_for_commited_dtype_and_other_ID_access",
};
file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(file_id, "H5Fcreate");
space_id = H5Screate_simple(1, &dims, &dims);
CHECK_I(space_id, "H5Screate_simple");
/* loop:
* 0 - nothing special
* 1 - committed datatype
* 2 - committed datatype, read through second ID
*/
for (op_index = 0; op_index < TSOHM_SAH_OP_COUNT; op_index++) {
hid_t type_id = -1;
hid_t group_id = -1;
hid_t attr_id = -1;
hid_t attr_id2 = -1;
hid_t attr_read_id;
/* create group in file with name unique to op_index */
group_id = H5Gcreate2(file_id, groupnames[op_index], H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(group_id, "H5Gcreate2");
type_id = H5Tcopy(H5T_NATIVE_INT);
CHECK_I(type_id, "H5Tcopy");
/* Commit the datatype for the latter iterations.
* Only do this ONCE.
*/
if (op_index == 1) {
ret = H5Tcommit2(file_id, "datatype", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(ret, "H5Tcommit2");
}
attr_id = H5Acreate2(group_id, "attribute", type_id, space_id, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(attr_id, "H5Acreate2");
if (op_index == 2) {
/* Open the attribute to get another handle */
attr_id2 = H5Aopen(group_id, "attribute", H5P_DEFAULT);
CHECK_I(attr_id2, "H5Aopen");
}
ret = H5Awrite(attr_id, H5T_NATIVE_INT, wdata);
CHECK_I(ret, "H5Awrite");
ret = H5Gclose(group_id);
CHECK_I(ret, "H5Gclose");
ret = H5Tclose(type_id);
CHECK_I(ret, "H5Tclose");
/* Flush the file to force data to be written */
ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
CHECK_I(ret, "H5Fflush");
/* Verify */
attr_read_id = (op_index == 2) ? attr_id2 : attr_id;
HDmemset(rdata, 0, sizeof(rdata));
ret = H5Aread(attr_read_id, H5T_NATIVE_INT, rdata);
CHECK_I(ret, "H5Aread");
2020-09-30 22:27:10 +08:00
for (x = 0; x < (size_t)dims; ++x)
VERIFY(rdata[x], wdata[x], "H5Aread");
ret = H5Aclose(attr_id);
CHECK_I(ret, "H5Aclose");
2020-09-30 22:27:10 +08:00
if (attr_id2 > -1) {
ret = H5Aclose(attr_id2);
CHECK_I(ret, "H5Aclose");
}
} /* for each attribute operation */
ret = H5Sclose(space_id);
CHECK_I(ret, "H5Sclose");
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
#undef TSOHM_SAH_OP_COUNT
} /* sohm_attr_helper */
/*-------------------------------------------------------------------------
* Function: test_sohm_attrs
*
* Purpose: Attributes can be shared and can also contain shared
* datatype and dataspace messages. Committed datatypes
* shouldn't be shared.
*
* Test permutations of this.
*
* Programmer: James Laird
* Thursday, November 30, 2006
*
*-------------------------------------------------------------------------
*/
static void
test_sohm_attrs(void)
{
2020-09-30 22:27:10 +08:00
hid_t bad_fid = H5I_INVALID_HID;
hid_t fcpl_id = H5I_INVALID_HID;
unsigned i = 0;
#define TSOHM_TSA_NFLAGS_1 7
unsigned flags1[TSOHM_TSA_NFLAGS_1] = {
H5O_SHMESG_ATTR_FLAG,
H5O_SHMESG_SDSPACE_FLAG,
H5O_SHMESG_DTYPE_FLAG,
H5O_SHMESG_ATTR_FLAG | H5O_SHMESG_SDSPACE_FLAG,
H5O_SHMESG_SDSPACE_FLAG | H5O_SHMESG_DTYPE_FLAG,
H5O_SHMESG_ATTR_FLAG | H5O_SHMESG_DTYPE_FLAG,
H5O_SHMESG_ATTR_FLAG | H5O_SHMESG_SDSPACE_FLAG | H5O_SHMESG_DTYPE_FLAG,
};
#define TSOHM_TSA_NFLAGS_2 6
unsigned flags2[TSOHM_TSA_NFLAGS_2][2] = {
2020-09-30 22:27:10 +08:00
{
H5O_SHMESG_ATTR_FLAG,
H5O_SHMESG_SDSPACE_FLAG | H5O_SHMESG_DTYPE_FLAG,
},
2020-09-30 22:27:10 +08:00
{
H5O_SHMESG_SDSPACE_FLAG,
H5O_SHMESG_ATTR_FLAG | H5O_SHMESG_DTYPE_FLAG,
},
2020-09-30 22:27:10 +08:00
{
H5O_SHMESG_DTYPE_FLAG,
H5O_SHMESG_ATTR_FLAG | H5O_SHMESG_SDSPACE_FLAG,
},
2020-09-30 22:27:10 +08:00
{
H5O_SHMESG_SDSPACE_FLAG | H5O_SHMESG_DTYPE_FLAG,
H5O_SHMESG_ATTR_FLAG,
},
2020-09-30 22:27:10 +08:00
{
H5O_SHMESG_ATTR_FLAG | H5O_SHMESG_DTYPE_FLAG,
H5O_SHMESG_SDSPACE_FLAG,
},
2020-09-30 22:27:10 +08:00
{
H5O_SHMESG_ATTR_FLAG | H5O_SHMESG_SDSPACE_FLAG,
H5O_SHMESG_DTYPE_FLAG,
},
};
#define TSOHM_TSA_NFLAGS_3 5
unsigned flags3[TSOHM_TSA_NFLAGS_3][3] = {
2020-09-30 22:27:10 +08:00
{
H5O_SHMESG_ATTR_FLAG,
H5O_SHMESG_SDSPACE_FLAG,
H5O_SHMESG_DTYPE_FLAG,
},
2020-09-30 22:27:10 +08:00
{
H5O_SHMESG_DTYPE_FLAG,
H5O_SHMESG_ATTR_FLAG,
H5O_SHMESG_SDSPACE_FLAG,
},
2020-09-30 22:27:10 +08:00
{
H5O_SHMESG_SDSPACE_FLAG,
H5O_SHMESG_DTYPE_FLAG,
H5O_SHMESG_ATTR_FLAG,
},
2020-09-30 22:27:10 +08:00
{
0, /* first index does not hold a shared message type? */
H5O_SHMESG_SDSPACE_FLAG | H5O_SHMESG_DTYPE_FLAG,
H5O_SHMESG_ATTR_FLAG,
},
2020-09-30 22:27:10 +08:00
{
0, /* missing SDSPACE flag */
H5O_SHMESG_DTYPE_FLAG,
H5O_SHMESG_ATTR_FLAG,
},
};
herr_t ret;
MESSAGE(5, ("Testing that shared messages work with attributes\n"));
/* No shared messages
*/
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 0);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
sohm_attr_helper(fcpl_id);
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
/* One shared message index
*/
2020-09-30 22:27:10 +08:00
for (i = 0; i < TSOHM_TSA_NFLAGS_1; i++) {
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, flags1[i], 2);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
sohm_attr_helper(fcpl_id);
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
}
/* two shared message indices
*/
2020-09-30 22:27:10 +08:00
for (i = 0; i < TSOHM_TSA_NFLAGS_2; i++) {
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 2);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, flags2[i][0], 2);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 1, flags2[i][1], 2);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
sohm_attr_helper(fcpl_id);
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
}
/* duplicate flags in separate indices causes problems
*/
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 2);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_ATTR_FLAG, 2);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_SHMESG_ATTR_FLAG, 2);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
bad_fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
VERIFY(bad_fid, H5I_INVALID_HID, "H5Fcreate");
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY;
/* three shared message indices
*/
2020-09-30 22:27:10 +08:00
for (i = 0; i < TSOHM_TSA_NFLAGS_3; i++) {
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 3);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, flags3[i][0], 2);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 1, flags3[i][1], 2);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 2, flags3[i][2], 2);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
sohm_attr_helper(fcpl_id);
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
}
#undef TSOHM_TSA_NFLAGS_1
#undef TSOHM_TSA_NFLAGS_2
#undef TSOHM_TSA_NFLAGS_3
} /* test_sohm_attrs */
/*-------------------------------------------------------------------------
* Function: size2_verify_plist1
*
* Purpose: Verify that the property list passed in is in fact the
* same property list used as dcpl1_id in the size2 helper
* function. This ensures that the filters can be read.
*
* Programmer: James Laird
* Wednesday, November 22, 2006
*
*-------------------------------------------------------------------------
*/
static void
size2_verify_plist1(hid_t plist)
{
2020-09-30 22:27:10 +08:00
size_t cd_nelmts;
unsigned int cd_value;
char name[NAME_BUF_SIZE];
H5Z_filter_t filter;
hid_t dtype1_id;
dtype1_struct fill1;
dtype1_struct fill1_correct;
2020-09-30 22:27:10 +08:00
herr_t ret;
/* Hardcoded to correspond to dcpl1_id created in size2_helper */
/* Check filters */
cd_nelmts = 1;
2020-09-30 22:27:10 +08:00
filter = H5Pget_filter2(plist, 0, NULL, &cd_nelmts, &cd_value, (size_t)NAME_BUF_SIZE, name, NULL);
CHECK_I(filter, "H5Pget_filter2");
VERIFY(filter, H5Z_FILTER_SHUFFLE, "H5Pget_filter2");
cd_nelmts = 1;
2020-09-30 22:27:10 +08:00
filter = H5Pget_filter2(plist, 1, NULL, &cd_nelmts, &cd_value, (size_t)NAME_BUF_SIZE, name, NULL);
CHECK_I(filter, "H5Pget_filter2");
VERIFY(filter, H5Z_FILTER_DEFLATE, "H5Pget_filter2");
VERIFY(cd_value, 1, "H5Pget_filter2");
cd_nelmts = 1;
2020-09-30 22:27:10 +08:00
filter = H5Pget_filter2(plist, 2, NULL, &cd_nelmts, &cd_value, (size_t)NAME_BUF_SIZE, name, NULL);
CHECK_I(filter, "H5Pget_filter2");
VERIFY(filter, H5Z_FILTER_SHUFFLE, "H5Pget_filter2");
cd_nelmts = 1;
2020-09-30 22:27:10 +08:00
filter = H5Pget_filter2(plist, 3, NULL, &cd_nelmts, &cd_value, (size_t)NAME_BUF_SIZE, name, NULL);
CHECK_I(filter, "H5Pget_filter2");
VERIFY(filter, H5Z_FILTER_FLETCHER32, "H5Pget_filter2");
/* Check fill value */
2020-09-30 22:27:10 +08:00
dtype1_id = make_dtype_1();
CHECK_I(dtype1_id, "make_dtype_1");
HDmemset(&fill1_correct, '1', sizeof(fill1_correct));
ret = H5Pget_fill_value(plist, dtype1_id, &fill1);
CHECK_I(ret, "H5Pget_fill_value");
2019-01-11 09:51:42 +08:00
ret = HDmemcmp(&fill1, &fill1_correct, sizeof(fill1_correct));
VERIFY(ret, 0, "memcmp");
ret = H5Tclose(dtype1_id);
CHECK_I(ret, "H5Tclose");
} /* size2_verify_plist1 */
/*-------------------------------------------------------------------------
* Function: size2_verify_plist2
*
* Purpose: Verify that the property list passed in is in fact the
* same property list used as dcpl2_id in the size2 helper
* function. This ensures that the filters can be read.
*
* Programmer: James Laird
* Wednesday, November 22, 2006
*
*-------------------------------------------------------------------------
*/
static void
size2_verify_plist2(hid_t plist)
{
2020-09-30 22:27:10 +08:00
size_t cd_nelmts;
unsigned int cd_value;
2020-09-30 22:27:10 +08:00
char name[NAME_BUF_SIZE];
H5Z_filter_t filter;
2020-09-30 22:27:10 +08:00
hid_t dtype2_id;
char fill2[DTYPE2_SIZE];
char fill2_correct[DTYPE2_SIZE];
herr_t ret;
/* Hardcoded to correspond to dcpl1_id created in size2_helper */
/* Check filters */
cd_nelmts = 1;
2020-09-30 22:27:10 +08:00
filter = H5Pget_filter2(plist, 0, NULL, &cd_nelmts, &cd_value, (size_t)NAME_BUF_SIZE, name, NULL);
CHECK_I(filter, "H5Pget_filter2");
VERIFY(filter, H5Z_FILTER_DEFLATE, "H5Pget_filter2");
VERIFY(cd_value, 1, "H5Pget_filter2");
cd_nelmts = 1;
2020-09-30 22:27:10 +08:00
filter = H5Pget_filter2(plist, 1, NULL, &cd_nelmts, &cd_value, (size_t)NAME_BUF_SIZE, name, NULL);
CHECK_I(filter, "H5Pget_filter2");
VERIFY(filter, H5Z_FILTER_DEFLATE, "H5Pget_filter2");
VERIFY(cd_value, 2, "H5Pget_filter2");
cd_nelmts = 1;
2020-09-30 22:27:10 +08:00
filter = H5Pget_filter2(plist, 2, NULL, &cd_nelmts, &cd_value, (size_t)NAME_BUF_SIZE, name, NULL);
CHECK_I(filter, "H5Pget_filter2");
VERIFY(filter, H5Z_FILTER_DEFLATE, "H5Pget_filter2");
VERIFY(cd_value, 2, "H5Pget_filter2");
cd_nelmts = 1;
2020-09-30 22:27:10 +08:00
filter = H5Pget_filter2(plist, 3, NULL, &cd_nelmts, &cd_value, (size_t)NAME_BUF_SIZE, name, NULL);
CHECK_I(filter, "H5Pget_filter2");
VERIFY(filter, H5Z_FILTER_DEFLATE, "H5Pget_filter2");
VERIFY(cd_value, 1, "H5Pget_filter2");
cd_nelmts = 1;
2020-09-30 22:27:10 +08:00
filter = H5Pget_filter2(plist, 4, NULL, &cd_nelmts, &cd_value, (size_t)NAME_BUF_SIZE, name, NULL);
CHECK_I(filter, "H5Pget_filter2");
VERIFY(filter, H5Z_FILTER_DEFLATE, "H5Pget_filter2");
VERIFY(cd_value, 5, "H5Pget_filter2");
/* Check fill value */
dtype2_id = make_dtype_2();
CHECK_I(dtype2_id, "make_dtype_2");
HDmemset(&fill2_correct, '2', (size_t)DTYPE2_SIZE);
ret = H5Pget_fill_value(plist, dtype2_id, &fill2);
CHECK_I(ret, "H5Pget_fill_value");
ret = HDmemcmp(&fill2, &fill2_correct, (size_t)DTYPE2_SIZE);
VERIFY(ret, 0, "memcmp");
ret = H5Tclose(dtype2_id);
CHECK_I(ret, "H5Tclose");
} /* size2_verify_plist2 */
#ifdef NOT_NOW
2020-09-30 22:27:10 +08:00
/*-------------------------------------------------------------------------
* Function: size2_dump_struct
*
* Purpose: A debugging function to print the contents of a
* size2_helper_struct (which holds the various sizes for a
* given file during the size2_helper function).
*
* Programmer: James Laird
* Friday, January 26, 2007
*
*-------------------------------------------------------------------------
*/
static void
size2_dump_struct(const char *name, size2_helper_struct *sizes)
{
2020-09-30 22:27:10 +08:00
HDputs(name);
HDprintf(" empty size: %llu\n", (unsigned long long)sizes->empty_size);
HDprintf(" first dataset: %llu \tdelta: %llu\n", (unsigned long long)sizes->first_dset,
(unsigned long long)(sizes->first_dset - sizes->empty_size));
HDprintf("second dataset: %llu \tdelta: %llu\n", (unsigned long long)sizes->second_dset,
(unsigned long long)(sizes->second_dset - sizes->first_dset));
HDprintf(" dsets 1: %llu \tdelta: %llu\n", (unsigned long long)sizes->dsets1,
(unsigned long long)(sizes->dsets1 - sizes->second_dset));
HDprintf(" dsets 2: %llu \tdelta: %llu\n", (unsigned long long)sizes->dsets2,
(unsigned long long)(sizes->dsets2 - sizes->dsets1));
HDprintf(" interleaved: %llu \tdelta: %llu\n", (unsigned long long)sizes->interleaved,
(unsigned long long)(sizes->interleaved - sizes->dsets2));
HDprintf(" attributes: %llu \tdelta: %llu\n", (unsigned long long)sizes->attrs1,
(unsigned long long)(sizes->attrs1 - sizes->interleaved));
HDprintf(" attributes 2: %llu \tdelta: %llu\n", (unsigned long long)sizes->attrs2,
(unsigned long long)(sizes->attrs2 - sizes->attrs1));
} /* size2_dump_struct */
#endif /* NOT_NOW */
/*-------------------------------------------------------------------------
* Function: size2_helper
*
* Purpose: A helper function for test_sohm_size2.
*
* Creates a file using the given fcpl, then creates lots
* of different kinds of messages within the file and
* returns the size of the file for comparison.
*
* If test_file_closing is not zero, closes and re-opens
* the file after every write.
*
* Doesn't close the property list. Prints an error message
* if there's a failure, but doesn't alter its return value.
*
* Programmer: James Laird
* Friday, November 17, 2006
*
*-------------------------------------------------------------------------
*/
static int
size2_helper(hid_t fcpl_id, int test_file_closing, size2_helper_struct *ret_sizes)
{
2020-09-30 22:27:10 +08:00
hid_t file_id = -1;
hid_t dtype1_id = -1;
hid_t dtype2_id = -1;
hid_t dspace1_id = -1;
hid_t dspace2_id = -1;
hid_t dcpl1_id = -1;
hid_t dcpl2_id = -1;
hid_t dset_id = -1;
hid_t attr_type_id = -1;
hid_t attr_space_id = -1;
hid_t attr_id = -1;
hid_t group_id = -1;
char attr_string1[NAME_BUF_SIZE];
char attr_string2[NAME_BUF_SIZE];
char attr_name[NAME_BUF_SIZE];
int x;
herr_t ret;
/* Constants used in this function */
2020-09-30 22:27:10 +08:00
const int rank1 = SIZE2_RANK1;
const int rank2 = SIZE2_RANK2;
const hsize_t dims[SIZE2_RANK2] = SIZE2_DIMS;
dtype1_struct fill1;
2020-09-30 22:27:10 +08:00
char fill2[DTYPE2_SIZE];
/* Closing and re-opening the file takes a long time on systems without
* local disks. Don't close and reopen if express testing is enabled.
*/
2020-09-30 22:27:10 +08:00
if (GetTestExpress() > 1)
test_file_closing = 0;
/* Create a file and get its size */
file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(file_id, "H5Fcreate");
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
/* Get the file size */
ret_sizes->empty_size = h5_get_file_size(FILENAME, H5P_DEFAULT);
/* Re-open the file and set up messages to write */
file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK_I(file_id, "H5Fopen");
/* Create two large datatype messages */
dtype1_id = make_dtype_1();
CHECK_I(dtype1_id, "make_dtype_1");
dtype2_id = make_dtype_2();
CHECK_I(dtype2_id, "make_dtype_2");
/* Create some large dataspaces */
dspace1_id = H5Screate_simple(rank1, dims, dims);
CHECK_I(dspace1_id, "H5Screate_simple");
dspace2_id = H5Screate_simple(rank2, dims, dims);
CHECK_I(dspace2_id, "H5Screate_simple");
/* fill1 and fill2 are fill values for the two datatypes.
* We'll set them in the DCPL.
*/
HDmemset(&fill1, '1', sizeof(dtype1_struct));
HDmemset(&fill2, '2', (size_t)DTYPE2_SIZE);
dcpl1_id = H5Pcreate(H5P_DATASET_CREATE);
CHECK_I(dcpl1_id, "H5Pcreate");
H5Pset_fill_value(dcpl1_id, dtype1_id, &fill1);
dcpl2_id = H5Pcreate(H5P_DATASET_CREATE);
CHECK_I(dcpl2_id, "H5Pcreate");
H5Pset_fill_value(dcpl2_id, dtype2_id, &fill2);
/* Filter messages we'll create by setting them in a DCPL. These
* values don't need to make sense, they just need to take up space.
*/
ret = H5Pset_chunk(dcpl1_id, rank1, dims);
CHECK_I(ret, "H5Pset_chunk");
ret = H5Pset_shuffle(dcpl1_id);
CHECK_I(ret, "H5Pset_shuffle");
ret = H5Pset_deflate(dcpl1_id, 1);
CHECK_I(ret, "H5Pset_deflate");
ret = H5Pset_shuffle(dcpl1_id);
CHECK_I(ret, "H5Pset_shuffle");
ret = H5Pset_fletcher32(dcpl1_id);
CHECK_I(ret, "H5Pset_fletcher32");
/* Make sure that this property list is what it should be */
size2_verify_plist1(dcpl1_id);
/* Second dcpl */
ret = H5Pset_chunk(dcpl2_id, rank2, dims);
CHECK_I(ret, "H5Pset_chunk");
ret = H5Pset_deflate(dcpl2_id, 1);
CHECK_I(ret, "H5Pset_deflate");
ret = H5Pset_deflate(dcpl2_id, 2);
CHECK_I(ret, "H5Pset_deflate");
ret = H5Pset_deflate(dcpl2_id, 2);
CHECK_I(ret, "H5Pset_deflate");
ret = H5Pset_deflate(dcpl2_id, 1);
CHECK_I(ret, "H5Pset_deflate");
ret = H5Pset_deflate(dcpl2_id, 5);
CHECK_I(ret, "H5Pset_deflate");
/* Make sure that this property list is what it should be */
size2_verify_plist2(dcpl2_id);
/* Set up attribute data */
HDmemset(attr_string1, 0, (size_t)NAME_BUF_SIZE);
HDmemset(attr_string2, 0, (size_t)NAME_BUF_SIZE);
HDstrcpy(attr_string1, LONG_STRING);
HDstrcpy(attr_string2, LONG_STRING);
2020-09-30 22:27:10 +08:00
attr_string2[1] = '1'; /* The second string starts "01 index..." */
/* Set up attribute metadata */
attr_type_id = H5Tcopy(H5T_C_S1);
CHECK_I(attr_type_id, "H5Tcopy");
ret = H5Tset_size(attr_type_id, (size_t)NAME_BUF_SIZE);
CHECK_I(ret, "H5Tset_size");
attr_space_id = H5Screate_simple(1, dims, dims);
CHECK_I(attr_space_id, "H5Screate_simple");
/* Create datasets with a big datatype, dataspace, fill value,
* and filter pipeline.
*/
2020-09-30 22:27:10 +08:00
for (x = 0; x < NUM_DATASETS; ++x) {
dset_id = H5Dcreate2(file_id, DSETNAME[x], dtype1_id, dspace1_id, H5P_DEFAULT, dcpl1_id, H5P_DEFAULT);
CHECK_I(dset_id, "H5Dcreate2");
attr_id = H5Acreate2(dset_id, "attr_name", attr_type_id, attr_space_id, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(attr_id, "H5Acreate2");
ret = H5Awrite(attr_id, attr_type_id, attr_string1);
CHECK_I(ret, "H5Awrite");
ret = H5Aclose(attr_id);
CHECK_I(ret, "H5Aclose");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
/* Gather extra statistics on first two datasets in file */
2020-09-30 22:27:10 +08:00
if (x < 2) {
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
/* Get the file's size now */
2020-09-30 22:27:10 +08:00
if (x == 0)
ret_sizes->first_dset = h5_get_file_size(FILENAME, H5P_DEFAULT);
else
ret_sizes->second_dset = h5_get_file_size(FILENAME, H5P_DEFAULT);
file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK_I(file_id, "H5Fopen");
} /* end if */
/* Close & reopen file if requested */
2020-09-30 22:27:10 +08:00
else if (test_file_closing) {
file_id = close_reopen_file(file_id, FILENAME, H5P_DEFAULT);
CHECK_I(file_id, "H5Fopen");
} /* end if */
2020-09-30 22:27:10 +08:00
} /* end for */
/* Close file and get its size now */
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
ret_sizes->dsets1 = h5_get_file_size(FILENAME, H5P_DEFAULT);
/* Create new group filled with datasets that use all different messages */
file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK_I(file_id, "H5Fopen");
group_id = H5Gcreate2(file_id, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(group_id, "H5Gcreate2");
/* Create NUM_DATASETS datasets in the new group */
2020-09-30 22:27:10 +08:00
for (x = 0; x < NUM_DATASETS; ++x) {
dset_id =
H5Dcreate2(group_id, DSETNAME[x], dtype2_id, dspace2_id, H5P_DEFAULT, dcpl2_id, H5P_DEFAULT);
CHECK_I(dset_id, "H5Dcreate2");
attr_id = H5Acreate2(dset_id, "attr_name", attr_type_id, attr_space_id, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(attr_id, "H5Acreate2");
ret = H5Awrite(attr_id, attr_type_id, attr_string2);
CHECK_I(ret, "H5Awrite");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
ret = H5Aclose(attr_id);
CHECK_I(ret, "H5Aclose");
/* Close everything & reopen file if requested */
2020-09-30 22:27:10 +08:00
if (test_file_closing) {
ret = H5Gclose(group_id);
CHECK_I(ret, "H5Gclose");
file_id = close_reopen_file(file_id, FILENAME, H5P_DEFAULT);
CHECK_I(file_id, "H5Fopen");
group_id = H5Gopen2(file_id, "group", H5P_DEFAULT);
CHECK_I(group_id, "H5Gopen2");
}
}
/* Close file and get its size now */
ret = H5Gclose(group_id);
CHECK_I(ret, "H5Gclose");
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
ret_sizes->dsets2 = h5_get_file_size(FILENAME, H5P_DEFAULT);
/* Create a new group and interleave writes of datasets types 1 and 2. */
file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK_I(file_id, "H5Fopen");
group_id = H5Gcreate2(file_id, "interleaved group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(group_id, "H5Gcreate2");
/* Create NUM_DATASETS datasets in the new group */
2020-09-30 22:27:10 +08:00
for (x = 0; x < NUM_DATASETS; x += 2) {
dset_id =
H5Dcreate2(group_id, DSETNAME[x], dtype1_id, dspace1_id, H5P_DEFAULT, dcpl1_id, H5P_DEFAULT);
CHECK_I(dset_id, "H5Dcreate2");
attr_id = H5Acreate2(dset_id, "attr_name", attr_type_id, attr_space_id, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(attr_id, "H5Acreate2");
ret = H5Awrite(attr_id, attr_type_id, attr_string1);
CHECK_I(ret, "H5Awrite");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
ret = H5Aclose(attr_id);
CHECK_I(ret, "H5Aclose");
2020-09-30 22:27:10 +08:00
dset_id =
H5Dcreate2(group_id, DSETNAME[x + 1], dtype2_id, dspace2_id, H5P_DEFAULT, dcpl2_id, H5P_DEFAULT);
CHECK_I(dset_id, "H5Dcreate2");
attr_id = H5Acreate2(dset_id, "attr_name", attr_type_id, attr_space_id, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(attr_id, "H5Acreate2");
ret = H5Awrite(attr_id, attr_type_id, attr_string2);
CHECK_I(ret, "H5Awrite");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
ret = H5Aclose(attr_id);
CHECK_I(ret, "H5Aclose");
/* Close everything & reopen file if requested */
2020-09-30 22:27:10 +08:00
if (test_file_closing) {
ret = H5Gclose(group_id);
CHECK_I(ret, "H5Gclose");
file_id = close_reopen_file(file_id, FILENAME, H5P_DEFAULT);
CHECK_I(file_id, "H5Fopen");
group_id = H5Gopen2(file_id, "interleaved group", H5P_DEFAULT);
CHECK_I(group_id, "H5Gopen2");
}
}
/* Close file and get its size now */
ret = H5Gclose(group_id);
CHECK_I(ret, "H5Gclose");
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
ret_sizes->interleaved = h5_get_file_size(FILENAME, H5P_DEFAULT);
/* Create lots of new attribute messages on the group
* (using different strings for the attribute)
*/
file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK_I(file_id, "H5Fopen");
group_id = H5Gopen2(file_id, "group", H5P_DEFAULT);
CHECK_I(group_id, "H5Gopen2");
HDstrcpy(attr_name, "00 index");
2020-09-30 22:27:10 +08:00
for (x = 0; x < NUM_ATTRIBUTES; ++x) {
/* Create a unique name and value for each attribute */
attr_string1[0] = attr_name[0] = (char)((x / 10) + '0');
attr_string1[1] = attr_name[1] = (char)((x % 10) + '0');
/* Create an attribute on the group */
attr_id = H5Acreate2(group_id, attr_name, attr_type_id, attr_space_id, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(attr_id, "H5Acreate2");
ret = H5Awrite(attr_id, attr_type_id, attr_string1);
CHECK_I(ret, "H5Awrite");
ret = H5Aclose(attr_id);
CHECK_I(ret, "H5Aclose");
/* Close everything & reopen file if requested */
2020-09-30 22:27:10 +08:00
if (test_file_closing) {
ret = H5Gclose(group_id);
CHECK_I(ret, "H5Gclose");
file_id = close_reopen_file(file_id, FILENAME, H5P_DEFAULT);
CHECK_I(file_id, "H5Fopen");
group_id = H5Gopen2(file_id, "group", H5P_DEFAULT);
CHECK_I(group_id, "H5Gopen2");
}
}
/* Close file and get its size now */
ret = H5Gclose(group_id);
CHECK_I(ret, "H5Gclose");
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
ret_sizes->attrs1 = h5_get_file_size(FILENAME, H5P_DEFAULT);
/* Create all of the attributes again on the other group */
file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK_I(file_id, "H5Fopen");
group_id = H5Gopen2(file_id, "interleaved group", H5P_DEFAULT);
CHECK_I(group_id, "H5Gopen2");
2020-09-30 22:27:10 +08:00
for (x = 0; x < NUM_ATTRIBUTES; ++x) {
/* Create the same name and value for each attribute as before */
attr_string1[0] = attr_name[0] = (char)((x / 10) + '0');
attr_string1[1] = attr_name[1] = (char)((x % 10) + '0');
/* Create an attribute on the group */
attr_id = H5Acreate2(group_id, attr_name, attr_type_id, attr_space_id, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(attr_id, "H5Acreate2");
ret = H5Awrite(attr_id, attr_type_id, attr_string1);
CHECK_I(ret, "H5Awrite");
ret = H5Aclose(attr_id);
CHECK_I(ret, "H5Aclose");
/* Close everything & reopen file if requested */
2020-09-30 22:27:10 +08:00
if (test_file_closing) {
ret = H5Gclose(group_id);
CHECK_I(ret, "H5Gclose");
file_id = close_reopen_file(file_id, FILENAME, H5P_DEFAULT);
CHECK_I(file_id, "H5Fopen");
group_id = H5Gopen2(file_id, "interleaved group", H5P_DEFAULT);
CHECK_I(group_id, "H5Gopen2");
}
}
/* Close file and get its size now */
ret = H5Gclose(group_id);
CHECK_I(ret, "H5Gclose");
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
ret_sizes->attrs2 = h5_get_file_size(FILENAME, H5P_DEFAULT);
/* Close everything */
ret = H5Sclose(attr_space_id);
CHECK_I(ret, "H5Sclose");
ret = H5Tclose(attr_type_id);
CHECK_I(ret, "H5Sclose");
ret = H5Tclose(dtype1_id);
CHECK_I(ret, "H5Tclose");
ret = H5Tclose(dtype2_id);
CHECK_I(ret, "H5Tclose");
ret = H5Sclose(dspace1_id);
CHECK_I(ret, "H5Sclose");
ret = H5Sclose(dspace2_id);
CHECK_I(ret, "H5Sclose");
ret = H5Pclose(dcpl1_id);
CHECK_I(ret, "H5Pclose");
ret = H5Pclose(dcpl2_id);
CHECK_I(ret, "H5Pclose");
return 0;
} /* size2_helper */
/*-------------------------------------------------------------------------
* Function: size2_verify
*
* Purpose: A helper function to verify the file created by size2_helper.
*
* Runs various tests (not exhaustive) to ensure that the
* file FILENAME actually has the structure that size2_helper
* should have created.
*
* Programmer: James Laird
* Friday, November 17, 2006
*
*-------------------------------------------------------------------------
*/
static void
size2_verify(void)
{
2020-09-30 22:27:10 +08:00
hid_t file_id = -1;
hid_t dset_id = -1;
hid_t plist_id = -1;
hid_t space_id = -1;
hid_t group1_id, group2_id;
hid_t attr1_id, attr2_id;
hid_t attr_type_id;
int x, y;
herr_t ret;
char attr_string[NAME_BUF_SIZE];
char attr_correct_string[NAME_BUF_SIZE];
char attr_name[NAME_BUF_SIZE];
int ndims;
hsize_t dims[SIZE2_RANK2];
hsize_t correct_dims[SIZE2_RANK2] = SIZE2_DIMS;
file_id = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
CHECK_I(file_id, "H5Fopen");
/* Verify property lists and dataspaces */
/* Get property lists from first batch of datasets */
2020-09-30 22:27:10 +08:00
for (x = 0; x < NUM_DATASETS; ++x) {
dset_id = H5Dopen2(file_id, DSETNAME[x], H5P_DEFAULT);
CHECK_I(dset_id, "H5Dopen2");
plist_id = H5Dget_create_plist(dset_id);
CHECK_I(plist_id, "H5Dget_create_plist");
size2_verify_plist1(plist_id);
ret = H5Pclose(plist_id);
CHECK_I(ret, "H5Pclose");
space_id = H5Dget_space(dset_id);
CHECK_I(space_id, "H5Dget_space");
ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
CHECK_I(ndims, "H5Sget_simple_extent_dims");
VERIFY(ndims, SIZE2_RANK1, "H5Sget_simple_extent_dims");
2020-09-30 22:27:10 +08:00
for (y = 0; y < ndims; ++y)
VERIFY(dims[y], correct_dims[y], "H5Sget_simple_extent_dims");
ret = H5Sclose(space_id);
CHECK_I(ret, "H5Sclose");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
}
/* Get property lists from second batch of datasets */
group1_id = H5Gopen2(file_id, "group", H5P_DEFAULT);
CHECK_I(group1_id, "H5Gopen2");
2020-09-30 22:27:10 +08:00
for (x = 0; x < NUM_DATASETS; ++x) {
dset_id = H5Dopen2(group1_id, DSETNAME[x], H5P_DEFAULT);
CHECK_I(dset_id, "H5Dopen2");
plist_id = H5Dget_create_plist(dset_id);
CHECK_I(plist_id, "H5Dget_create_plist");
size2_verify_plist2(plist_id);
ret = H5Pclose(plist_id);
CHECK_I(ret, "H5Pclose");
space_id = H5Dget_space(dset_id);
CHECK_I(space_id, "H5Dget_space");
ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
CHECK_I(ndims, "H5Sget_simple_extent_dims");
VERIFY(ndims, SIZE2_RANK2, "H5Sget_simple_extent_dims");
2020-09-30 22:27:10 +08:00
for (y = 0; y < ndims; ++y)
VERIFY(dims[y], correct_dims[y], "H5Sget_simple_extent_dims");
ret = H5Sclose(space_id);
CHECK_I(ret, "H5Sclose");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
} /* end for */
ret = H5Gclose(group1_id);
CHECK_I(ret, "H5Gclose");
/* Get property lists from interleaved group of datasets */
group1_id = H5Gopen2(file_id, "interleaved group", H5P_DEFAULT);
CHECK_I(group1_id, "H5Gopen2");
2020-09-30 22:27:10 +08:00
for (x = 0; x < NUM_DATASETS; x += 2) {
/* First "type 1" dataset */
dset_id = H5Dopen2(group1_id, DSETNAME[x], H5P_DEFAULT);
CHECK_I(dset_id, "H5Dopen2");
plist_id = H5Dget_create_plist(dset_id);
CHECK_I(plist_id, "H5Dget_create_plist");
size2_verify_plist1(plist_id);
ret = H5Pclose(plist_id);
CHECK_I(ret, "H5Pclose");
space_id = H5Dget_space(dset_id);
CHECK_I(space_id, "H5Dget_space");
ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
CHECK_I(ndims, "H5Sget_simple_extent_dims");
VERIFY(ndims, SIZE2_RANK1, "H5Sget_simple_extent_dims");
2020-09-30 22:27:10 +08:00
for (y = 0; y < ndims; ++y)
VERIFY(dims[y], correct_dims[y], "H5Sget_simple_extent_dims");
ret = H5Sclose(space_id);
CHECK_I(ret, "H5Sclose");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
/* Second "type 2" dataset */
dset_id = H5Dopen2(group1_id, DSETNAME[x + 1], H5P_DEFAULT);
CHECK_I(dset_id, "H5Dopen2");
plist_id = H5Dget_create_plist(dset_id);
CHECK_I(plist_id, "H5Dget_create_plist");
size2_verify_plist2(plist_id);
ret = H5Pclose(plist_id);
CHECK_I(ret, "H5Pclose");
space_id = H5Dget_space(dset_id);
CHECK_I(space_id, "H5Dget_space");
ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
CHECK_I(ndims, "H5Sget_simple_extent_dims");
VERIFY(ndims, SIZE2_RANK2, "H5Sget_simple_extent_dims");
2020-09-30 22:27:10 +08:00
for (y = 0; y < ndims; ++y)
VERIFY(dims[y], correct_dims[y], "H5Sget_simple_extent_dims");
ret = H5Sclose(space_id);
CHECK_I(ret, "H5Sclose");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
} /* end for */
ret = H5Gclose(group1_id);
CHECK_I(ret, "H5Gclose");
/* Verify attributes */
/* Create attribute data type */
attr_type_id = H5Tcopy(H5T_C_S1);
CHECK_I(attr_type_id, "H5Tcopy");
ret = H5Tset_size(attr_type_id, (size_t)NAME_BUF_SIZE);
CHECK_I(ret, "H5Tset_size");
/* Read attributes on both groups and verify that they are correct */
group1_id = H5Gopen2(file_id, "group", H5P_DEFAULT);
CHECK_I(group1_id, "H5Gopen2");
group2_id = H5Gopen2(file_id, "interleaved group", H5P_DEFAULT);
CHECK_I(group2_id, "H5Gopen2");
HDmemset(attr_string, 0, (size_t)NAME_BUF_SIZE);
HDmemset(attr_correct_string, 0, (size_t)NAME_BUF_SIZE);
HDstrcpy(attr_correct_string, LONG_STRING);
HDstrcpy(attr_name, "00 index");
2020-09-30 22:27:10 +08:00
for (x = 0; x < NUM_ATTRIBUTES; ++x) {
/* Create the name and correct value for each attribute */
attr_correct_string[0] = attr_name[0] = (char)((x / 10) + '0');
attr_correct_string[1] = attr_name[1] = (char)((x % 10) + '0');
attr1_id = H5Aopen(group1_id, attr_name, H5P_DEFAULT);
CHECK_I(attr1_id, "H5Aopen");
attr2_id = H5Aopen(group2_id, attr_name, H5P_DEFAULT);
CHECK_I(attr2_id, "H5Aopen");
ret = H5Aread(attr1_id, attr_type_id, attr_string);
CHECK_I(ret, "H5Aread");
VERIFY_STR(attr_string, attr_correct_string, "H5Aread");
ret = H5Aread(attr2_id, attr_type_id, attr_string);
CHECK_I(ret, "H5Aread");
VERIFY_STR(attr_string, attr_correct_string, "H5Aread");
ret = H5Aclose(attr1_id);
CHECK_I(attr1_id, "H5Aclose");
ret = H5Aclose(attr2_id);
CHECK_I(attr2_id, "H5Aclose");
}
/* Close everything */
ret = H5Tclose(attr_type_id);
CHECK_I(ret, "H5Tclose");
ret = H5Gclose(group1_id);
CHECK_I(ret, "H5Gclose");
ret = H5Gclose(group2_id);
CHECK_I(ret, "H5Gclose");
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
} /* size2_verify */
/*-------------------------------------------------------------------------
* Function: test_sohm_size2
*
* Purpose: Tests shared object header messages using size2_helper to
* create different kinds of big messages.
*
* If close_reopen is set, closes and reopens the HDF5 file
* repeatedly while writing.
*
* This test works by first creating FCPLs with various
* parameters, then creating a standard file that includes
* every kind of message that can be shared using the helper
* function size2_helper. The test measures the size of the
* file at various points. Once all of the files have been
* generated, the test compares the measured sizes of the files.
*
*
* Programmer: James Laird
* Friday, November 17, 2006
*
*-------------------------------------------------------------------------
*/
static void
test_sohm_size2(int close_reopen)
{
2020-09-30 22:27:10 +08:00
hid_t fcpl_id = -1;
/* Sizes for file with no shared messages at all */
size2_helper_struct norm_sizes;
/* Sizes for files with all messages in one index */
size2_helper_struct list_index_med, list_index_big;
size2_helper_struct btree_index, list_index_small;
/* Sizes for files with messages in three different indexes */
size2_helper_struct mult_index_med, mult_index_btree;
/* Sizes for files that don't share all kinds of messages */
size2_helper_struct share_some_med, share_some_btree;
/* Sizes for files that share different sizes of messages */
size2_helper_struct share_some_toobig_index, share_tiny_index, type_space_index;
2020-09-30 22:27:10 +08:00
herr_t ret;
2020-09-30 22:27:10 +08:00
if (close_reopen == 0)
MESSAGE(5, ("Testing that shared object header messages save space\n"))
else
MESSAGE(5, ("Testing that shared messages save space when file is closed and reopened\n"))
/* Create an fcpl with SOHMs disabled */
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 0);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
/* Get the file size & verify its contents */
size2_helper(fcpl_id, close_reopen, &norm_sizes);
size2_verify();
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
/* Create an fcpl with one big index */
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_ALL_FLAG, 20);
CHECK_I(ret, "H5Pset_shared_mesg_index");
/* Set the indexes to use a medium-sized list */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 30, 25);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
/* Get the file size & verify its contents */
size2_helper(fcpl_id, close_reopen, &list_index_med);
size2_verify();
/* Try making the list really big */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 1000, 900);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
/* Get the file size & verify its contents */
size2_helper(fcpl_id, close_reopen, &list_index_big);
size2_verify();
/* Use a B-tree instead of a list */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
/* Get the file size & verify its contents */
size2_helper(fcpl_id, close_reopen, &btree_index);
size2_verify();
/* Use such a small list that it'll become a B-tree */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 10, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
/* Get the file size & verify its contents */
size2_helper(fcpl_id, close_reopen, &list_index_small);
size2_verify();
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
/* Create a new property list that puts messages in different indexes. */
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 3);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_SDSPACE_FLAG | H5O_SHMESG_DTYPE_FLAG, 20);
CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_SHMESG_FILL_FLAG | H5O_SHMESG_PLINE_FLAG, 20);
CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = H5Pset_shared_mesg_index(fcpl_id, 2, H5O_SHMESG_ATTR_FLAG, 20);
CHECK_I(ret, "H5Pset_shared_mesg_index");
/* Use lists that are the same size as the "medium" list on the previous
* run.
*/
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 30, 25);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
/* Get the file size & verify its contents */
size2_helper(fcpl_id, close_reopen, &mult_index_med);
size2_verify();
/* Use all B-trees */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
/* Get the file size & verify its contents */
size2_helper(fcpl_id, close_reopen, &mult_index_btree);
size2_verify();
/* Edit the same property list (this should work) and don't share all messages.
*/
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_PLINE_FLAG, 20);
CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_SHMESG_DTYPE_FLAG | H5O_SHMESG_FILL_FLAG, 100000);
CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = H5Pset_shared_mesg_index(fcpl_id, 2, H5O_SHMESG_ATTR_FLAG | H5O_SHMESG_SDSPACE_FLAG, 20);
CHECK_I(ret, "H5Pset_shared_mesg_index");
/* Use "normal-sized" lists. */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 30, 25);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
/* Get the file size & verify its contents */
size2_helper(fcpl_id, close_reopen, &share_some_med);
size2_verify();
/* Use btrees. */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
/* Get the file size & verify its contents */
size2_helper(fcpl_id, close_reopen, &share_some_btree);
size2_verify();
/* Change the second index to hold only gigantic messages. Result should
* be the same as the previous file.
*/
ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_SHMESG_DTYPE_FLAG | H5O_SHMESG_FILL_FLAG, 100000);
CHECK_I(ret, "H5Pset_shared_mesg_index");
/* Get the file size & verify its contents */
size2_helper(fcpl_id, close_reopen, &share_some_toobig_index);
size2_verify();
/* Share even tiny dataspace and datatype messages. This should result in
* attribute datatypes being shared. Make this one use "really big" lists.
* It turns out that attribute dataspaces are just big enough that it saves
* some space to share them, while sharing datatypes creates as much overhead
* as one gains from sharing them.
*/
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1);
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_DTYPE_FLAG | H5O_SHMESG_SDSPACE_FLAG, 1);
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 1000, 900);
/* Get the file size & verify its contents */
size2_helper(fcpl_id, close_reopen, &share_tiny_index);
size2_verify();
/* Create the same file but don't share the really tiny messages */
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_DTYPE_FLAG | H5O_SHMESG_SDSPACE_FLAG, 100);
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 1000, 900);
/* Get the file size & verify its contents */
size2_helper(fcpl_id, close_reopen, &type_space_index);
size2_verify();
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
/* Check that all sizes make sense. There is lots of room for inexact
* results here since so many different factors contribute to file size.
*/
/* Check sizes of all files created using a single index first */
/* The empty size of each file with shared messages enabled should be the
* same and should be bigger than a normal file.
*/
2020-09-30 22:27:10 +08:00
if (norm_sizes.empty_size > list_index_med.empty_size)
VERIFY(norm_sizes.empty_size, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_med.empty_size != list_index_big.empty_size)
VERIFY(list_index_med.empty_size, list_index_big.empty_size, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_med.empty_size != btree_index.empty_size)
VERIFY(list_index_med.empty_size, btree_index.empty_size, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_med.empty_size != list_index_small.empty_size)
VERIFY(list_index_med.empty_size, list_index_small.empty_size, "h5_get_file_size");
/* The files with indexes shouldn't be that much bigger than an
* empty file.
*/
2020-09-30 22:27:10 +08:00
if (list_index_med.empty_size > (h5_stat_size_t)((float)norm_sizes.empty_size * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
/* Once one dataset has been created (with one of every kind of message),
* the normal file should still be smallest. The very small list
* btree_convert should be smaller than the B-tree since it has no
* extra overhead. The small list should also be smaller than the B-tree.
* The very large list should be much larger than anything else.
*/
2020-09-30 22:27:10 +08:00
if (norm_sizes.first_dset >= list_index_small.first_dset)
VERIFY(norm_sizes.first_dset, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_small.first_dset >= btree_index.first_dset)
VERIFY(list_index_small.first_dset, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_med.first_dset >= btree_index.first_dset)
VERIFY(btree_index.first_dset, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (btree_index.first_dset >= list_index_big.first_dset)
VERIFY(list_index_med.first_dset, 1, "h5_get_file_size");
/* Once a few copies of the same dataset have been created, the
* very small list shouldn't have become a B-tree yet, so it should
* be the smallest file. A larger list should be next, followed
* by a B-tree, followed by a normal file, followed by a
* list that is too large.
*/
2020-09-30 22:27:10 +08:00
if (list_index_small.dsets1 >= list_index_med.dsets1)
VERIFY(btree_index.dsets1, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_med.dsets1 >= btree_index.dsets1)
VERIFY(list_index_med.dsets1, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (btree_index.dsets1 >= norm_sizes.dsets1)
VERIFY(btree_index.dsets1, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (norm_sizes.dsets1 >= list_index_big.dsets1)
VERIFY(list_index_big.dsets1, 1, "h5_get_file_size");
/* The size gain should have been the same for each of the lists;
* their overhead is fixed. The B-tree should have gained at least
* as much, and the normal file more than that.
*/
2020-09-30 22:27:10 +08:00
if ((list_index_small.dsets1 - list_index_small.first_dset) !=
(list_index_med.dsets1 - list_index_med.first_dset))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((list_index_med.dsets1 - list_index_med.first_dset) !=
(list_index_big.dsets1 - list_index_big.first_dset))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((list_index_big.dsets1 - list_index_big.first_dset) > (btree_index.dsets1 - btree_index.first_dset))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((btree_index.dsets1 - btree_index.first_dset) >= (norm_sizes.dsets1 - norm_sizes.first_dset))
VERIFY(0, 1, "h5_get_file_size");
/* Once another kind of each message has been written, the very small list
* should convert into a B-tree. Now the list should be smallest, then
* the B-trees (although the converted B-tree file may be a little bigger),
* then the normal file. The largest list may or may not be bigger than
* the normal file.
*/
2020-09-30 22:27:10 +08:00
if (list_index_med.dsets2 >= btree_index.dsets2)
VERIFY(list_index_med.dsets2, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (btree_index.dsets2 > (h5_stat_size_t)((float)list_index_small.dsets2 * OVERHEAD_ALLOWED))
VERIFY(btree_index.dsets2, list_index_small.dsets2, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_small.dsets2 >= norm_sizes.dsets2)
VERIFY(btree_index.dsets2, 1, "h5_get_file_size");
/* If the small list (now a B-tree) is bigger than the existing B-tree,
* it shouldn't be much bigger.
* It seems that the small lists tends to be pretty big anyway. Allow
* for it to have twice as much overhead.
*/
2020-09-30 22:27:10 +08:00
if (list_index_small.dsets2 >
(h5_stat_size_t)((float)btree_index.dsets2 * OVERHEAD_ALLOWED * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
/* The lists should have grown the least since they share messages and
* have no extra overhead. The normal file should have grown more than
* either the lists or the B-tree. The B-tree may not have grown more
* than the lists, depending on whether it needed to split nodes or not.
*/
2020-09-30 22:27:10 +08:00
if ((list_index_med.dsets2 - list_index_med.dsets1) != (list_index_big.dsets2 - list_index_big.dsets1))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((list_index_big.dsets2 - list_index_big.dsets1) > (btree_index.dsets2 - btree_index.dsets1))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((btree_index.dsets2 - btree_index.dsets1) >= (norm_sizes.dsets2 - norm_sizes.dsets1))
VERIFY(0, 1, "h5_get_file_size");
/* Interleaving the writes should have no effect on how the messages are
* shared. No new messages should be written to the indexes, so the
* sohm files will only get a little bit bigger.
*/
2020-09-30 22:27:10 +08:00
if (list_index_med.interleaved >= btree_index.interleaved)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (btree_index.interleaved > (h5_stat_size_t)((float)list_index_small.interleaved * OVERHEAD_ALLOWED))
VERIFY(btree_index.interleaved, list_index_small.interleaved, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_small.interleaved >= norm_sizes.interleaved)
VERIFY(0, 1, "h5_get_file_size");
/* The lists should still have grown the same amount. The converted
* B-tree shouldn't have grown more than the index that was originally
* a B-tree (although it might have grown less if there was extra free
* space within the file).
*/
2020-09-30 22:27:10 +08:00
if ((list_index_med.interleaved - list_index_med.dsets2) !=
(list_index_big.interleaved - list_index_big.dsets2))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((list_index_big.interleaved - list_index_big.dsets2) > (btree_index.interleaved - btree_index.dsets2))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((list_index_small.interleaved - list_index_small.dsets2) >
(btree_index.interleaved - btree_index.dsets2))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((btree_index.interleaved - btree_index.dsets2) >= (norm_sizes.interleaved - norm_sizes.dsets2))
VERIFY(0, 1, "h5_get_file_size");
/* After many attributes have been written, both the small and medium lists
* should have become B-trees and be about the same size as the index
* that started as a B-tree.
* Add in OVERHEAD_ALLOWED as a fudge factor here, since the allocation
* of file space can be hard to predict.
*/
2020-09-30 22:27:10 +08:00
if (btree_index.attrs1 > (h5_stat_size_t)((float)list_index_small.attrs1 * OVERHEAD_ALLOWED))
VERIFY(btree_index.attrs1, list_index_small.attrs1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (btree_index.attrs1 > (h5_stat_size_t)((float)list_index_med.attrs1 * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_med.attrs1 > (h5_stat_size_t)((float)btree_index.attrs1 * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_small.attrs1 > (h5_stat_size_t)((float)btree_index.attrs1 * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
/* Neither of the converted lists should be too much bigger than
* the index that was originally a B-tree.
*/
2020-09-30 22:27:10 +08:00
if (list_index_small.attrs1 > (h5_stat_size_t)((float)btree_index.attrs1 * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_med.attrs1 > (h5_stat_size_t)((float)btree_index.attrs1 * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
/* The "normal" file should have had less overhead, so should gain less
* size than any of the other indexes since none of these attribute
* messages could be shared. The large list should have gained
* less overhead than the B-tree indexes.
*/
2020-09-30 22:27:10 +08:00
if ((norm_sizes.attrs1 - norm_sizes.interleaved) >= (list_index_big.attrs1 - list_index_big.interleaved))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((list_index_big.attrs1 - list_index_big.interleaved) >=
(list_index_small.attrs1 - list_index_small.interleaved))
VERIFY(0, 1, "h5_get_file_size");
/* Give it some overhead (for checkin to move messages into continuation message) */
2020-09-30 22:27:10 +08:00
if ((list_index_small.attrs1 - list_index_small.interleaved) >
(h5_stat_size_t)((float)(btree_index.attrs1 - btree_index.interleaved) * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
/* Writing another copy of each attribute shouldn't change the ordering
* of sizes. The big list index is still too big to be smaller than a
* normal file. The B-tree indexes should all be about the same size.
*/
2020-09-30 22:27:10 +08:00
if (btree_index.attrs2 > (h5_stat_size_t)((float)list_index_small.attrs2 * OVERHEAD_ALLOWED))
VERIFY(btree_index.attrs2, list_index_small.attrs2, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_small.attrs2 > (h5_stat_size_t)((float)btree_index.attrs2 * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (btree_index.attrs2 > (h5_stat_size_t)((float)list_index_med.attrs2 * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_med.attrs2 > (h5_stat_size_t)((float)btree_index.attrs2 * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_med.attrs2 >= norm_sizes.attrs2)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (list_index_big.attrs2 >= norm_sizes.attrs2)
VERIFY(0, 1, "h5_get_file_size");
/* All of the B-tree indexes should have gained about the same amount
* of space; at least as much as the list index and less than a normal
* file.
*/
2020-09-30 22:27:10 +08:00
if ((list_index_small.attrs2 - list_index_small.attrs1) > (btree_index.attrs2 - btree_index.attrs1))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((list_index_med.attrs2 - list_index_med.attrs1) > (btree_index.attrs2 - btree_index.attrs1))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((list_index_big.attrs2 - list_index_big.attrs1) > (list_index_med.attrs2 - list_index_med.attrs1))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((btree_index.attrs2 - btree_index.attrs1) >= (norm_sizes.attrs2 - norm_sizes.attrs1))
VERIFY(0, 1, "h5_get_file_size");
/* Done checking the first few files that use a single index. */
/* Start comparing other kinds of files with these "standard"
* one-index files
*/
/* Check files with multiple indexes. */
/* These files should be larger when first created than one-index
* files.
*/
2020-09-30 22:27:10 +08:00
if (mult_index_med.empty_size <= list_index_med.empty_size)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (mult_index_btree.empty_size != mult_index_med.empty_size)
VERIFY(0, 1, "h5_get_file_size");
/* When the first dataset is written, they should grow quite a bit as
* many different indexes must be created.
*/
2020-09-30 22:27:10 +08:00
if ((mult_index_med.first_dset - mult_index_med.empty_size) <=
(list_index_med.first_dset - list_index_med.empty_size))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((mult_index_btree.first_dset - mult_index_btree.empty_size) <=
(btree_index.first_dset - btree_index.empty_size))
VERIFY(0, 1, "h5_get_file_size");
/* When the second dataset is written, they should grow less as
* some extra heap space is allocated, but no more indices.
*/
2020-09-30 22:27:10 +08:00
if ((mult_index_med.second_dset - mult_index_med.first_dset) >
(mult_index_med.first_dset - mult_index_med.empty_size))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((list_index_med.second_dset - list_index_med.first_dset) >
(list_index_med.first_dset - list_index_med.empty_size))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((mult_index_btree.second_dset - mult_index_btree.first_dset) >
(mult_index_btree.first_dset - mult_index_btree.empty_size))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((btree_index.second_dset - btree_index.first_dset) >
(btree_index.first_dset - btree_index.empty_size))
VERIFY(0, 1, "h5_get_file_size");
/* And the size delta for the second dataset is less in files with only
* one index.
*/
2020-09-30 22:27:10 +08:00
if ((mult_index_med.second_dset - mult_index_med.first_dset) <=
(list_index_med.second_dset - list_index_med.first_dset))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((mult_index_btree.first_dset - mult_index_btree.empty_size) <=
(btree_index.first_dset - btree_index.empty_size))
VERIFY(0, 1, "h5_get_file_size");
/* Once that initial overhead is out of the way and the lists/btrees
* have been created, files with more than one index should grow at
* the same rate or slightly faster than files with just one index
* and one heap.
*/
2020-09-30 22:27:10 +08:00
if ((mult_index_med.dsets1 - mult_index_med.second_dset) !=
(list_index_med.dsets1 - list_index_med.second_dset))
VERIFY((mult_index_med.dsets1 - mult_index_med.second_dset),
(list_index_med.dsets1 - list_index_med.second_dset), "h5_get_file_size");
if ((mult_index_btree.dsets1 - mult_index_btree.second_dset) !=
(btree_index.dsets1 - btree_index.second_dset))
VERIFY((mult_index_btree.dsets1 - mult_index_btree.second_dset),
(btree_index.dsets1 - btree_index.second_dset), "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((mult_index_med.dsets2 - mult_index_med.dsets1) >
(h5_stat_size_t)((float)(list_index_med.dsets2 - list_index_med.dsets1) * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((mult_index_btree.dsets2 - mult_index_btree.dsets1) >
(h5_stat_size_t)((float)(btree_index.dsets2 - btree_index.dsets1) * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((mult_index_med.interleaved - mult_index_med.dsets2) >
(h5_stat_size_t)((float)(list_index_med.interleaved - list_index_med.dsets2) * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((mult_index_btree.interleaved - mult_index_btree.dsets2) >
(h5_stat_size_t)((float)(btree_index.interleaved - btree_index.dsets2) * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
/* When all the attributes are added, only the index holding attributes
* will become a B-tree. Skip the interleaved to attrs1 interval when
* this happens because it's hard to predict exactly how much space this
* will take.
*/
2020-09-30 22:27:10 +08:00
if ((mult_index_med.attrs2 - mult_index_med.attrs1) >
(h5_stat_size_t)((float)(list_index_med.attrs2 - list_index_med.attrs1) * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((mult_index_btree.attrs2 - mult_index_btree.attrs1) >
(h5_stat_size_t)((float)(btree_index.attrs2 - btree_index.attrs1) * OVERHEAD_ALLOWED))
VERIFY(0, 1, "h5_get_file_size");
/* The final file size for both of the multiple index files should be
* smaller than a normal file but bigger than any of the one-index files.
*/
2020-09-30 22:27:10 +08:00
if (mult_index_med.attrs2 >= norm_sizes.attrs2)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (mult_index_btree.attrs2 >= norm_sizes.attrs2)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((h5_stat_size_t)((float)mult_index_med.attrs2 * OVERHEAD_ALLOWED) < btree_index.attrs2)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((h5_stat_size_t)((float)mult_index_btree.attrs2 * OVERHEAD_ALLOWED) < btree_index.attrs2)
VERIFY(0, 1, "h5_get_file_size");
/* Check files that don't share all messages. */
/* These files have three indexes like the files above, so they should be
* the same size when created.
*/
2020-09-30 22:27:10 +08:00
if (share_some_med.empty_size != mult_index_med.empty_size)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_some_med.empty_size != share_some_btree.empty_size)
VERIFY(0, 1, "h5_get_file_size");
/* When the first dataset is created, they should be not quite as big
* as equivalent files that share all messages (since shared messages
* have a little bit of overhead).
*/
2020-09-30 22:27:10 +08:00
if (share_some_med.first_dset >= mult_index_med.first_dset)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_some_btree.first_dset >= mult_index_btree.first_dset)
VERIFY(0, 1, "h5_get_file_size");
/* The files that share some should have a growth rate in between
* files that share all messages and normal files
*/
2020-09-30 22:27:10 +08:00
if ((share_some_med.interleaved - share_some_med.first_dset) <=
(mult_index_med.interleaved - mult_index_med.first_dset))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((share_some_med.interleaved - share_some_med.first_dset) >=
(norm_sizes.interleaved - norm_sizes.first_dset))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((share_some_btree.interleaved - share_some_btree.first_dset) <=
(mult_index_btree.interleaved - mult_index_btree.first_dset))
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((share_some_btree.interleaved - share_some_btree.first_dset) >=
(norm_sizes.interleaved - norm_sizes.first_dset))
VERIFY(0, 1, "h5_get_file_size");
/* Check the file that only stored gigantic messages in its second
* index. Since no messages were that big, it should be identical
* to the file with an empty index.
*/
2020-09-30 22:27:10 +08:00
if (share_some_btree.empty_size != share_some_toobig_index.empty_size)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_some_btree.first_dset != share_some_toobig_index.first_dset)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_some_btree.dsets1 != share_some_toobig_index.dsets1)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_some_btree.dsets2 != share_some_toobig_index.dsets2)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_some_btree.interleaved != share_some_toobig_index.interleaved)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_some_btree.attrs1 != share_some_toobig_index.attrs1)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_some_btree.attrs2 != share_some_toobig_index.attrs2)
VERIFY(0, 1, "h5_get_file_size");
/* Check the file that shares even very tiny messages. Once messages
* are written to it, it should gain a little space from sharing the
* messages and lose a little space to overhead so that it's just slightly
* smaller than a file that doesn't share tiny messages.
* If the overhead increases or the size of messages decreases, these
* numbers may be off.
*/
2020-09-30 22:27:10 +08:00
if (share_tiny_index.empty_size != type_space_index.empty_size)
VERIFY(share_tiny_index.empty_size, type_space_index.empty_size, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_tiny_index.first_dset >=
(h5_stat_size_t)((float)type_space_index.first_dset * OVERHEAD_ALLOWED))
VERIFY(share_tiny_index.first_dset, type_space_index.first_dset, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_tiny_index.first_dset < type_space_index.first_dset)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_tiny_index.second_dset >= type_space_index.second_dset)
VERIFY(share_tiny_index.second_dset, type_space_index.second_dset, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((h5_stat_size_t)((float)share_tiny_index.second_dset * OVERHEAD_ALLOWED) <
type_space_index.second_dset)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_tiny_index.dsets1 >= type_space_index.dsets1)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((h5_stat_size_t)((float)share_tiny_index.dsets1 * OVERHEAD_ALLOWED) < type_space_index.dsets1)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_tiny_index.dsets2 >= type_space_index.dsets2)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((h5_stat_size_t)((float)share_tiny_index.dsets2 * OVERHEAD_ALLOWED) < type_space_index.dsets2)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_tiny_index.interleaved >= type_space_index.interleaved)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((h5_stat_size_t)((float)share_tiny_index.interleaved * OVERHEAD_ALLOWED) <
type_space_index.interleaved)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_tiny_index.attrs1 >= type_space_index.attrs1)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((h5_stat_size_t)((float)share_tiny_index.attrs1 * OVERHEAD_ALLOWED) < type_space_index.attrs1)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (share_tiny_index.attrs2 >= type_space_index.attrs2)
VERIFY(0, 1, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if ((h5_stat_size_t)((float)share_tiny_index.attrs2 * OVERHEAD_ALLOWED) < type_space_index.attrs2)
VERIFY(0, 1, "h5_get_file_size");
} /* test_sohm_size2 */
/*-------------------------------------------------------------------------
* Function: delete_helper_write
*
* Purpose: Creates a dataset and attribute in file FILE_ID using value X
* in the DSPACE_ID and DCPL_ID arrays.
*
* Programmer: James Laird
* Tuesday, December 19, 2006
*
*-------------------------------------------------------------------------
*/
static void
delete_helper_write(hid_t file_id, hid_t *dspace_id, hid_t *dcpl_id, int x)
{
hid_t dset_id = -1;
hid_t attr_id = -1;
char wdata;
herr_t ret;
2020-09-30 22:27:10 +08:00
dset_id =
H5Dcreate2(file_id, DSETNAME[x], H5T_NATIVE_CHAR, dspace_id[x], H5P_DEFAULT, dcpl_id[x], H5P_DEFAULT);
CHECK_I(dset_id, "H5Dcreate2");
wdata = (char)(x + 'a');
2020-09-30 22:27:10 +08:00
ret = H5Dwrite(dset_id, H5T_NATIVE_CHAR, dspace_id[x], dspace_id[x], H5P_DEFAULT, &wdata);
CHECK_I(ret, "H5Dwrite");
attr_id = H5Acreate2(dset_id, "attr_name", H5T_NATIVE_CHAR, dspace_id[x], H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(attr_id, "H5Acreate2");
ret = H5Awrite(attr_id, H5T_NATIVE_CHAR, &wdata);
CHECK_I(ret, "H5Awrite");
ret = H5Aclose(attr_id);
CHECK_I(ret, "H5Aclose");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
} /* delete_helper_write */
/*-------------------------------------------------------------------------
* Function: delete_helper_read
*
* Purpose: Checks the value of the dataset and attribute created by
* delete_helper_write.
*
* Programmer: James Laird
* Tuesday, December 19, 2006
*
*-------------------------------------------------------------------------
*/
static void
delete_helper_read(hid_t file_id, hid_t *dspace_id, int x)
{
2020-09-30 22:27:10 +08:00
hid_t dset_id = -1;
hid_t attr_id = -1;
char rdata;
herr_t ret;
dset_id = H5Dopen2(file_id, DSETNAME[x], H5P_DEFAULT);
CHECK_I(dset_id, "H5Dopen2");
rdata = '\0';
2020-09-30 22:27:10 +08:00
ret = H5Dread(dset_id, H5T_NATIVE_CHAR, dspace_id[x], dspace_id[x], H5P_DEFAULT, &rdata);
CHECK_I(ret, "H5Dread");
VERIFY(rdata, (x + 'a'), "H5Dread");
attr_id = H5Aopen(dset_id, "attr_name", H5P_DEFAULT);
CHECK_I(attr_id, "H5Aopen");
rdata = '\0';
2020-09-30 22:27:10 +08:00
ret = H5Aread(attr_id, H5T_NATIVE_CHAR, &rdata);
CHECK_I(ret, "H5Dread");
VERIFY(rdata, (x + 'a'), "H5Dread");
ret = H5Aclose(attr_id);
CHECK_I(ret, "H5Aclose");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
} /* delete_helper_read */
/*-------------------------------------------------------------------------
* Function: delete_helper
*
* Purpose: Creates some shared messages, deletes them, and creates some
* more messages. The second batch of messages should use the
* space freed by the first batch, so should be about the same
* size as a file that never had the first batch of messages
* created.
*
* FCPL_ID is the file creation property list to use.
* DSPACE_ID and DCPL_ID are arrays of different dataspaces
* and property lists with filter pipelines used to create the
* messages.
*
* Programmer: James Laird
* Tuesday, December 19, 2006
*
*-------------------------------------------------------------------------
*/
static void
delete_helper(hid_t fcpl_id, hid_t *dspace_id, hid_t *dcpl_id)
{
2020-09-30 22:27:10 +08:00
hid_t file_id = -1;
int x;
h5_stat_size_t norm_filesize;
h5_stat_size_t deleted_filesize;
2020-09-30 22:27:10 +08:00
herr_t ret;
/* Get the size of a "normal" file with no deleted messages */
file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(file_id, "H5Fcreate");
/* Create batch of messages in the file starting at message 2 */
2020-09-30 22:27:10 +08:00
for (x = HALF_DELETE_NUM_MESGS; x < DELETE_NUM_MESGS; ++x) {
delete_helper_write(file_id, dspace_id, dcpl_id, x);
}
/* Check that messages can be read */
2020-09-30 22:27:10 +08:00
for (x = HALF_DELETE_NUM_MESGS; x < DELETE_NUM_MESGS; ++x) {
delete_helper_read(file_id, dspace_id, x);
}
/* Close file and get filesize */
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
norm_filesize = h5_get_file_size(FILENAME, H5P_DEFAULT);
/* Create a new file with messages 0 to (HALF_DELETE_NUM_MESGS - 1) */
file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(file_id, "H5Fcreate");
2020-09-30 22:27:10 +08:00
for (x = 0; x < HALF_DELETE_NUM_MESGS; ++x) {
delete_helper_write(file_id, dspace_id, dcpl_id, x);
}
/* Verify each dataset, then delete it (which should delete
* its shared messages as well
*/
2020-09-30 22:27:10 +08:00
for (x = 0; x < HALF_DELETE_NUM_MESGS; ++x) {
delete_helper_read(file_id, dspace_id, x);
ret = H5Ldelete(file_id, DSETNAME[x], H5P_DEFAULT);
CHECK_I(ret, "H5Ldelete");
}
/* The file is now empty. Write and verify the second batch of messages
* again.
*/
2020-09-30 22:27:10 +08:00
for (x = HALF_DELETE_NUM_MESGS; x < DELETE_NUM_MESGS; ++x) {
delete_helper_write(file_id, dspace_id, dcpl_id, x);
}
2020-09-30 22:27:10 +08:00
for (x = HALF_DELETE_NUM_MESGS; x < DELETE_NUM_MESGS; ++x) {
delete_helper_read(file_id, dspace_id, x);
}
/* Close file and get filesize */
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
deleted_filesize = h5_get_file_size(FILENAME, H5P_DEFAULT);
/* The two filesizes should be almost the same */
2020-09-30 22:27:10 +08:00
if (norm_filesize > (h5_stat_size_t)((float)deleted_filesize * OVERHEAD_ALLOWED))
VERIFY(norm_filesize, deleted_filesize, "h5_get_file_size");
2020-09-30 22:27:10 +08:00
if (deleted_filesize > (h5_stat_size_t)((float)norm_filesize * OVERHEAD_ALLOWED))
VERIFY(deleted_filesize, norm_filesize, "h5_get_file_size");
} /* delete_helper */
/*-------------------------------------------------------------------------
* Function: test_sohm_delete
*
* Purpose: Tests shared object header message deletion.
*
* Creates lots of shared messages, then ensures that they
* can be deleted without corrupting the remaining messages.
* Also checks that indexes convert from B-trees back into
* lists.
*
* Programmer: James Laird
* Tuesday, December 19, 2006
*
*-------------------------------------------------------------------------
*/
static void
test_sohm_delete(void)
{
hid_t fcpl_id;
/* We'll use dataspaces and filter pipelines for this test.
* Create a number of distinct messages of each type.
*/
2020-09-30 22:27:10 +08:00
hid_t dspace_id[DELETE_NUM_MESGS] = {0};
hid_t dcpl_id[DELETE_NUM_MESGS] = {0};
unsigned u;
2020-09-30 22:27:10 +08:00
int x;
hsize_t dims[] = DELETE_DIMS;
herr_t ret;
MESSAGE(5, ("Testing deletion of SOHMs\n"));
/* Create a number of different dataspaces.
* For simplicity, each dataspace has only one element.
*/
2020-09-30 22:27:10 +08:00
for (u = 0; u < DELETE_NUM_MESGS; ++u) {
dspace_id[u] = H5Screate_simple((int)(u + 1), dims, dims);
CHECK_I(dspace_id[u], "H5Screate_simple");
} /* end for */
/* Create a number of different filter pipelines. */
dcpl_id[0] = H5Pcreate(H5P_DATASET_CREATE);
CHECK_I(dcpl_id[0], "H5Pcreate");
ret = H5Pset_chunk(dcpl_id[0], 1, dims);
CHECK_I(ret, "H5Pset_chunk");
ret = H5Pset_shuffle(dcpl_id[0]);
CHECK_I(ret, "H5Pset_shuffle");
2020-09-30 22:27:10 +08:00
for (u = 1; u < DELETE_NUM_MESGS; u += 2) {
dcpl_id[u] = H5Pcopy(dcpl_id[u - 1]);
CHECK_I(dcpl_id[u], "H5Pcopy");
ret = H5Pset_chunk(dcpl_id[u], (int)(u + 1), dims);
CHECK_I(ret, "H5Pset_chunk");
ret = H5Pset_deflate(dcpl_id[u], 1);
CHECK_I(ret, "H5Pset_deflate");
dcpl_id[u + 1] = H5Pcopy(dcpl_id[u]);
CHECK_I(dcpl_id[u + 1], "H5Pcopy");
ret = H5Pset_chunk(dcpl_id[u + 1], (int)(u + 2), dims);
CHECK_I(ret, "H5Pset_chunk");
ret = H5Pset_shuffle(dcpl_id[u + 1]);
CHECK_I(ret, "H5Pset_shuffle");
} /* end for */
/* Create an fcpl where all messages are shared in the same index */
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_ALL_FLAG, 16);
CHECK_I(ret, "H5Pset_shared_mesg_index");
/* Use big list indexes */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 4 * DELETE_NUM_MESGS, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
/* Test that messages can be created and deleted properly */
delete_helper(fcpl_id, dspace_id, dcpl_id);
/* Use B-tree indexes */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
delete_helper(fcpl_id, dspace_id, dcpl_id);
/* Use small list indexes that will convert from lists to B-trees and back */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, HALF_DELETE_NUM_MESGS, HALF_DELETE_NUM_MESGS - 1);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
delete_helper(fcpl_id, dspace_id, dcpl_id);
/* Use two indexes */
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 2);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_SDSPACE_FLAG | H5O_SHMESG_ATTR_FLAG, 16);
CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_SHMESG_DTYPE_FLAG, 16);
CHECK_I(ret, "H5Pset_shared_mesg_index");
/* Use big list indexes */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 5000, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
/* Use B-tree indexes */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
delete_helper(fcpl_id, dspace_id, dcpl_id);
/* Set phase change values so that one index converts to a B-tree and one doesn't */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, HALF_DELETE_NUM_MESGS + 1, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
delete_helper(fcpl_id, dspace_id, dcpl_id);
/* Test with varying message sizes (ideally, so some messages are too
* small to be written but some are big enough that they are still written
*/
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
2020-09-30 22:27:10 +08:00
for (u = DELETE_MIN_MESG_SIZE; u <= DELETE_MAX_MESG_SIZE; u += 10) {
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_ALL_FLAG, u);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
delete_helper(fcpl_id, dspace_id, dcpl_id);
} /* end for */
/* Cleanup */
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
2020-09-30 22:27:10 +08:00
for (x = DELETE_NUM_MESGS - 1; x >= 0; --x) {
ret = H5Sclose(dspace_id[x]);
CHECK_I(ret, "H5Sclose");
ret = H5Pclose(dcpl_id[x]);
CHECK_I(ret, "H5Pclose");
} /* end for */
} /* test_sohm_delete */
/*-------------------------------------------------------------------------
* Function: verify_dset_create_and_delete_does_not_grow_file
*
* Purpose: Tests that shared object header message deletion returns
* the file to its previous state using the supplied FCPL.
*
* Creates a file according to the supplied FCPL,
* then creates datasets and deletes them.
* Done in two passes: once with one dataset, once with two.
*
* Programmer: James Laird
* Wednesday, January 3, 2007
*
*-------------------------------------------------------------------------
*/
static int
verify_dset_create_and_delete_does_not_grow_file(hid_t fcpl_id)
{
2020-09-30 22:27:10 +08:00
hid_t file_id;
hid_t dspace_id;
hid_t dset_id;
hsize_t dims[1] = {1};
h5_stat_size_t initial_filesize, deleted_filesize;
2020-09-30 22:27:10 +08:00
int old_nerrs; /* Number of errors when entering this routine */
herr_t ret;
/* Retrieve the current # of reported errors */
old_nerrs = GetTestNumErrs();
/* Create a dataspace for later */
dspace_id = H5Screate_simple(1, dims, dims);
CHECK_I(dspace_id, "H5Screate_simple");
/* Create a file using the FCPL supplied*/
file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(file_id, "H5Fcreate");
/* Close the file and get its size */
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
initial_filesize = h5_get_file_size(FILENAME, H5P_DEFAULT);
/* Re-create the file and create a dataset in it */
file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(file_id, "H5Fcreate");
dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_SHORT, dspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(dset_id, "H5Dcreate2");
/* Close the dataset and delete it */
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
ret = H5Ldelete(file_id, "dset", H5P_DEFAULT);
CHECK_I(ret, "H5Ldelete");
/* Close the file and get its size */
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
deleted_filesize = h5_get_file_size(FILENAME, H5P_DEFAULT);
VERIFY(deleted_filesize, initial_filesize, "h5_get_file_size");
/* Repeat, creating two datasets in the file */
file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(file_id, "H5Fcreate");
/* Create and close the first dataset */
dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_SHORT, dspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(dset_id, "H5Dcreate2");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
/* Create and close the second. These messages should be shared */
2020-09-30 22:27:10 +08:00
dset_id =
H5Dcreate2(file_id, "dset2", H5T_NATIVE_SHORT, dspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(dset_id, "H5Dcreate2");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
/* Delete both datasets */
ret = H5Ldelete(file_id, "dset", H5P_DEFAULT);
CHECK_I(ret, "H5Ldelete");
ret = H5Ldelete(file_id, "dset2", H5P_DEFAULT);
CHECK_I(ret, "H5Ldelete");
/* Close the file and get its size */
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
deleted_filesize = h5_get_file_size(FILENAME, H5P_DEFAULT);
VERIFY(deleted_filesize, initial_filesize, "h5_get_file_size");
/* Cleanup */
ret = H5Sclose(dspace_id);
CHECK_I(ret, "H5Sclose");
/* Retrieve current # of errors */
2020-09-30 22:27:10 +08:00
if (old_nerrs == GetTestNumErrs())
return (0);
else
2020-09-30 22:27:10 +08:00
return (-1);
} /* verify_dset_create_and_delete_does_not_grow_file */
/*-------------------------------------------------------------------------
* Function: test_sohm_delete_revert
*
* Purpose: Verifies that creation and deletion of datasets with shared
* message headers will not increase file size.
*
* Programmer: James Laird
* Wednesday, January 3, 2007
*
*-------------------------------------------------------------------------
*/
static void
test_sohm_delete_revert(void)
{
2020-09-30 22:27:10 +08:00
hid_t fcpl_id;
herr_t ret;
MESSAGE(5, ("Testing that file reverts to original size on SOHM deletion\n"));
/* Create an fcpl with messages in two indexes */
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 2);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_DTYPE_FLAG, 10);
CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = H5Pset_shared_mesg_index(fcpl_id, 1, H5O_SHMESG_SDSPACE_FLAG, 10);
CHECK_I(ret, "H5Pset_shared_mesg_index");
/* Call the helper function to test this FCPL. */
ret = verify_dset_create_and_delete_does_not_grow_file(fcpl_id);
CHECK_I(ret, "verify_dset_create_and_delete_does_not_grow_file");
/* Try using B-trees */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
ret = verify_dset_create_and_delete_does_not_grow_file(fcpl_id);
CHECK_I(ret, "verify_dset_create_and_delete_does_not_grow_file");
/* Try sharing all messages */
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_ALL_FLAG, 10);
CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 10, 5);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
ret = verify_dset_create_and_delete_does_not_grow_file(fcpl_id);
CHECK_I(ret, "verify_dset_create_and_delete_does_not_grow_file");
/* Try using B-trees */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
ret = verify_dset_create_and_delete_does_not_grow_file(fcpl_id);
CHECK_I(ret, "verify_dset_create_and_delete_does_not_grow_file");
/* There should be at least two messages in the test (datatype and
* dataspace). Use an index that will transition from a list to
* a B-tree and back.
*/
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 1, 2);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
ret = verify_dset_create_and_delete_does_not_grow_file(fcpl_id);
CHECK_I(ret, "verify_dset_create_and_delete_does_not_grow_file");
/* Try with shared messages enabled, but when messages are too big
* to be shared.
*/
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_ALL_FLAG, 35);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
ret = verify_dset_create_and_delete_does_not_grow_file(fcpl_id);
CHECK_I(ret, "verify_dset_create_and_delete_does_not_grow_file");
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
} /* test_sohm_delete_revert */
/*-------------------------------------------------------------------------
* Function: verify_dset_create_and_open_through_extlink_with_sohm
*
* Purpose: Tests that a dataset created through an external link can
* be opened (that shared messages were created or not and
* were shared in the right file).
*
* Programmer: James Laird
* Friday, December 22, 2006
*
*-------------------------------------------------------------------------
*/
static void
verify_dset_create_and_open_through_extlink_with_sohm(hid_t src_fcpl_id, hid_t dst_fcpl_id)
{
2020-09-30 22:27:10 +08:00
hid_t src_file_id = -1;
hid_t dst_file_id = -1;
hid_t space_id = -1;
hid_t dset_id = -1;
hsize_t dims[] = {1, 1};
herr_t ret;
/* Create files */
src_file_id = H5Fcreate(FILENAME_SRC, H5F_ACC_TRUNC, src_fcpl_id, H5P_DEFAULT);
CHECK_I(src_file_id, "H5Fcreate");
dst_file_id = H5Fcreate(FILENAME_DST, H5F_ACC_TRUNC, dst_fcpl_id, H5P_DEFAULT);
CHECK_I(dst_file_id, "H5Fcreate");
/* Create an external link from the source file to the destination file */
ret = H5Lcreate_external(FILENAME_DST, "/", src_file_id, "ext_link", H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(ret, "H5Lcreate_external");
/* Create a dataset through the external link */
space_id = H5Screate_simple(2, dims, dims);
CHECK_I(space_id, "H5Screate_simple");
2020-09-30 22:27:10 +08:00
dset_id = H5Dcreate2(src_file_id, "ext_link/dataset", H5T_NATIVE_FLOAT, space_id, H5P_DEFAULT,
H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(dset_id, "H5Dcreate2");
/* Close the dataset and both files to make sure everything gets flushed
* out of memory
*/
ret = H5Sclose(space_id);
CHECK_I(ret, "H5Sclose");
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
ret = H5Fclose(src_file_id);
CHECK_I(ret, "H5Fclose");
ret = H5Fclose(dst_file_id);
CHECK_I(ret, "H5Fclose");
/* Ensure that the dataset can be opened. If the messages were written in
* the wrong file, it'll be impossible to read the dataset's object
* header.
*/
dst_file_id = H5Fopen(FILENAME_DST, H5F_ACC_RDONLY, H5P_DEFAULT);
CHECK_I(dst_file_id, "H5Fopen");
dset_id = H5Dopen2(dst_file_id, "dataset", H5P_DEFAULT);
CHECK_I(dset_id, "H5Dopen2");
/* Cleanup */
ret = H5Dclose(dset_id);
CHECK_I(ret, "H5Dclose");
ret = H5Fclose(dst_file_id);
CHECK_I(ret, "H5Fclose");
} /* verify_dset_create_and_open_through_extlink_with_sohm */
/*-------------------------------------------------------------------------
* Function: test_sohm_extlink
*
* Purpose: Test creating SOHMs through external links (to make sure that
* they're created in the correct file).
*
* Programmer: James Laird
* Friday, December 22, 2006
*
*-------------------------------------------------------------------------
*/
static void
test_sohm_extlink(void)
{
Subfiling VFD (#1883) * Added support for vector I/O calls to the VFD layer, and associated test code. Note that this includes the optimization to allow shortened sizes and types arrays to allow more space efficient representations of vectors in which all entries are of the same size and/or type. See the Selection I/o RFC for further details. Tested serial and parallel, debug and production on Charis. serial and parallel debug only on Jelly. * ran code formatter quick serial build and test on jelly * Add H5FD_read_selection() and H5FD_write_selection(). Currently only translate to scalar calls. Fix const buf in H5FD_write_vector(). * Format source * Fix comments * Add selection I/O to chunk code, used when: not using chunk cache, no datatype conversion, no I/O filters, no page buffer, not using collective I/O. Requires global variable H5_use_selection_io_g be set to TRUE. Implemented selection to vector I/O transaltion at the file driver layer. * Fix formatting unrelated to previous change to stop github from complaining. * Add full API support for selection I/O. Add tests for this. * Implement selection I/O for contiguous datasets. Fix bug in selection I/O translation. Add const qualifiers to some internal selection I/O routines to maintain const-correctness while avoiding memcpys. * Added vector read / write support to the MPIO VFD, with associated test code (see testpar/t_vfd.c). Note that this implementation does NOT support vector entries of size greater than 2 GB. This must be repaired before release, but it should be good enough for correctness testing. As MPIO requires vector I/O requests to be sorted in increasing address order, also added a vector sort utility in H5FDint.c This function is tested in passing by the MPIO vector I/O extension. In passing, repaired a bug in size / type vector extension management in H5FD_read/write_vector() Tested parallel debug and production on charis and Jelly. * Ran source code formatter * Add support for independent parallel I/O with selection I/O. Add HDF5_USE_SELECTION_IO env var to control selection I/O (default off). * Implement parallel collective support for selection I/O. * Fix comments and run formatter. * Update selection IO branch with develop (#1215) Merged branch 'develop' into selection_io * Sync with develop (#1262) Updated the branch with develop changes. * Implement big I/O support for vector I/O requests in the MPIO file driver. * Free arrays in H5FD__mpio_read/write_vector() as soon as they're not needed, to cut down on memory usage during I/O. * Address comments from code review. Fix const warnings with H5S_SEL_ITER_INIT(). * Committing clang-format changes * Feature/subfiling (#1464) * Initial checkin of merged sub-filing VFD. Passes regression tests (debug/shared/paralle) on Jelly. However, bugs and many compiler warnings remain -- not suitable for merge to develop. * Minor mods to src/H5FDsubfile_mpi.c to address errors reported by autogen.sh * Code formatting run -- no test * Merged my subfiling code fixes into the new selection_io_branch * Forgot to add the FindMERCURY.cmake file. This will probably disappear soon * attempting to make a more reliable subfile file open which doesn't return errors. For some unknown reason, the regular posix open will occasionally fail to create a subfile. Some better error handling for file close has been added. * added NULL option for H5FD_subfiling_config_t in H5Pset_fapl_subfiling (#1034) * NULL option automatically stacks IOC VFD for subfiling and returns a valid fapl. * added doxygen subfiling APIs * Various fixes which allow the IOR benchmark to run correctly * Lots of updates including the packaging up of the mercury_util source files to enable easier builds for our Benchmarking * Interim checkin of selection_io_with_subfiling_vfd branch Moddified testpar/t_vfd.c to test the subfiling vfd with default configuration. Must update this code to run with a variety of configurations -- most particularly multiple IO concentrators, and stripe depth small enough to test the other IO concentrators. testpar/t_vfd.c exposed a large number of race condidtions -- symtoms included: 1) Crashes (usually seg faults) 2) Heap corruption 3) Stack corruption 4) Double frees of heap space 5) Hangs 6) Out of order execution of I/O requests / violations of POSIX semantics 7) Swapped write requests Items 1 - 4 turned out to be primarily caused by file close issues -- specifically, the main I/O concentrator thread and its pool of worker threads were not being shut down properly on file close. Addressing this issue in combination with some other minor fixes seems to have addressed these issues. Items 5 & 6 appear to have been caused by issue of I/O requests to the thread pool in an order that did not maintain POSIX semantics. A rewrite of the I/O request dispatch code appears to have solved these issues. Item 7 seems to have been caused by multiple write requests from a given rank being read by the wrong worker thread. Code to issue "unique" tags for each write request via the ACK message appears to have cleaned this up. Note that the code is still in poor condtition. A partial list of known defects includes: a) Race condiditon on file close that allows superblock writes to arrive at the I/O concentrator after it has been shutdown. This defect is most evident when testpar/t_subfiling_vfd is run with 8 ranks. b) No error reporting from I/O concentrators -- must design and implement this. For now, mostly just asserts, which suggests that it should be run in debug mode. c) Much commented out and/or un-used code. d) Code orgnaization e) Build system with bits of Mercury is awkward -- think of shifting to pthreads with our own thread pool code. f) Need to add native support for vector and selection I/O to the subfiling VFD. g) Need to review, and posibly rework configuration code. h) Need to store subfile configuration data in a superblock extension message, and add code to use this data on file open. i) Test code is inadequate -- expect more issues as it is extended. In particular, there is no unit test code for the I/O request dispatch code. While I think it is correct at present, we need test code to verify this. Similarly, we need to test with multiple I/O concentrators and much smaller stripe depth. My actual code changes were limited to: src/H5FDioc.c src/H5FDioc_threads.c src/H5FDsubfile_int.c src/H5FDsubfile_mpi.c src/H5FDsubfiling.c src/H5FDsubfiling.h src/H5FDsubfiling_priv.h testpar/t_subfiling_vfd.c testpar/t_vfd.c I'm not sure what is going on with the deletions in src/mercury/src/util. Tested parallel/debug on Charis and Jelly * subfiling with selection IO (#1219) Merged branch 'selection_io' into subfiling branch. * Subfile name fixes (#1250) * fixed subfiling naming convention, and added leading zero to rank names. * Merge branch 'selection_io' into selection_io_with_subfiling_vfd (#1265) * Added script to join subfiles into a single HDF5 file (#1350) * Modified H5FD__subfiling_query() to report that the sub-filing VFD supports MPI This exposed issues with truncate and get EOF in the sub-filing VFD. I believe I have addressed these issues (get EOF not as fully tested as it should be), howeer, it exposed race conditions resulting in hangs. As of this writing, I have not been able to chase these down. Note that the tests that expose these race conditions are in testpar/t_subfiling_vfd.c, and are currently skipped. Unskip these tests to reproduce the race conditions. tested (to the extent possible) debug/parallel on charis and jelly. * Committing clang-format changes * fixed H5MM_free Co-authored-by: mainzer <mainzer#hdfgroup.org> Co-authored-by: jrmainzer <72230804+jrmainzer@users.noreply.github.com> Co-authored-by: Richard Warren <Richard.Warren@hdfgroup.org> Co-authored-by: Richard.Warren <richard.warren@jelly.ad.hdfgroup.org> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> * Move Subfiling VFD components into H5FDsubfiling source directory * Update Autotools build and add H5_HAVE_SUBFILING_VFD macro to H5pubconf.h * Tidy up CMake build of subfiling sources * Merge branch 'develop' into feature/subfiling (#1539) Merge branch 'develop' into feature/subfiling * Add VFD interface version field to Subfiling and IOC VFDs * Merge branch 'develop' into feature/subfiling (#1557) Merge branch 'develop' into feature/subfiling * Merge branch 'develop' into feature/subfiling (#1563) Merge branch 'develop' into feature/subfiling * Tidy up merge artifacts after rebase on develop * Fix incorrect variable in mirror VFD utils CMake * Ensure VFD values are always defined * Add subfiling to CMake VFD_LIST if built * Mark MPI I/O driver self-initialization global as static * Add Subfiling VFD to predefined VFDs for HDF5_DRIVER env. variable * Initial progress towards separating private vs. public subfiling code * include libgen.h in t_vfd tests for correct dirname/basename * Committing clang-format changes * removed mercury option, included subfiling header path (#1577) Added subfiling status to configure output, installed h5fuse.sh to build directory for use in future tests. * added check for stdatomic.h (#1578) * added check for stdatomic.h with subfiling * added H5_HAVE_SUBFILING_VFD for cmake * fix old-style-definition warning (#1582) * fix old-style-definition warning * added test for enable parallel with subfiling VFD (#1586) Fails if subfiling VFD is not used with parallel support. * Subfiling/IOC VFD fixes and tidying (#1619) * Rename CMake option for Subfiling VFD to be consistent with other VFDs * Miscellaneous Subfiling fixes Add error message for unset MPI communicator Support dynamic loading of subfiling VFD with default configuration * Temporary fix for subfile name issue * Added subfile checks (#1634) * added subfile checks * Feature/subfiling (#1655) * Subfiling/IOC VFD cleanup Fix misuse of MPI_COMM_WORLD in IOC VFD Propagate Subfiling FAPL MPI settings down to IOC FAPL in default configuration case Cleanup IOC VFD debugging code Change sprintf to snprintf in a few places * Major work on separating Subfiling and IOC VFDs from each other * Re-write async_completion func to not overuse stack * Replace usage of MPI_COMM_WORLD with file's actual MPI communicator * Refactor H5FDsubfile_mpi.c * Remove empty file H5FDsubfile_mpi.c * Separate IOC VFD errors to its own error stack * Committing clang-format changes * Remove H5TRACE macros from H5FDioc.c * Integrate H5FDioc_threads.c with IOC error stack * Fix for subfile name generation Use number of I/O concentrators from existing subfiling configuration file, if one exists * Add temporary barrier in "Get EOF" operation to prevent races on EOF Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> * Fix for retrieval of machine Host ID * Default to MPI_COMM_WORLD if no MPI params set * added libs rt and pthreads (#1673) * added libs rt and pthreads * Feature/subfiling (#1689) * More tidying of IOC VFD and subfiling debug code * Remove old unused log file code * Clear FID from active file map on failure * Fix bug in generation of subfile names when truncating file * Change subfile names to start from 1 instead of 0 * Use long long for user-specified stripe size from environment variable * Skip 0-sized I/Os in low-level IOC I/O routines * Don't update EOF on read * Convert printed warning about data size mismatch to assertion * Don't add base file address to I/O addresses twice Base address should already be applied as part of H5FDwrite/read_vector calls * Account for 0-sized I/O vector entries in subfile write/read functions * Rewrite init_indep_io for clarity * Correction for IOC wraparound calculations * Some corrections to iovec calculations * Remove temporary barrier on EOF retrieval * Complete work request queue entry on error instead of skipping over * Account for stripe size wraparound for sf_col_offset calculation * Committing clang-format changes Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> * Re-write and fix bugs in I/O vector filling routines (#1703) * Rewrite I/O vector filling routines for clarity * Fix bug with iovec_fill_last when last I/O size is 0 * added subfiling_dir line read (#1714) * added subfiling_dir line read and use it * shellcheck fixes * I/O request dispatch logic update (#1731) Short-circuit I/O request dispatch when head of I/O queue is an in-progress get EOF or truncate operation. This prevents an issue where a write operation can be dispatched alongside a get EOF/truncate operation, whereas all I/O requests are supposed to be ineligible for dispatch until the get EOF/truncate is completed * h5fuse.sh.in clean-up (#1757) * Added command-line options * Committing clang-format changes * Align with changes from develop * Mimic MPI I/O VFD for EOF handling * Initialize context_id field for work request objects * Use logfile for some debugging information * Use atomic store to set IOC ready flag * Use separate communicator for sending file EOF data Minor IOC cleanup * Use H5_subfile_fid_to_context to get context ID for file in Subfiling VFD * IOVEC calculation fixes * Updates for debugging code * Minor fixes for threaded code * Committing clang-format changes * Use separate MPI communicator for barrier operations * Committing clang-format changes * Rewrite EOF routine to use nonblocking MPI communication * Committing clang-format changes * Always dispatch I/O work requests in IOC main loop * Return distinct MPI communicator to library when requested * Minor warning cleanup * Committing clang-format changes * Generate h5fuse.sh from h5fuse.sh.in in CMake * Send truncate messages to correct IOC rank * Committing clang-format changes * Miscellaneous cleanup Post some MPI receives before sends Free some duplicated MPI communicator/Info objects Remove unnecessary extra MPI_Barrier * Warning cleanup * Fix for leaked MPI communicator * Retrieve file EOF on single rank and bcast it * Fixes for a few failure paths * Cleanup of IOC file opens * Committing clang-format changes * Use plan MPI_Send for send of EOF messages * Always check MPI thread support level during Subfiling init * Committing clang-format changes * Handle a hang on failure when IOCs can't open subfiles * Committing clang-format changes * Refactor file open status consensus check * Committing clang-format changes * Fix for MPI_Comm_free being called after MPI_Finalize * Fix VFD test by setting MPI params before setting subfiling on FAPL * Update Subfiling VFD error handling and error stack usage * Improvements for Subfiling logfiles * Remove prototypes for currently unused routines * Disable I/O queue stat collecting by default * Remove unused serialization mutex variable * Update VFD testing to take subfiling VFD into account * Fix usage of global subfiling application layout object * Minor fixes for failure pathways * Keep track of the number of failures in an IOC I/O queue * Make sure not to exceed MPI_TAG_UB value for data communication messages * Committing clang-format changes * Update for rename of some H5FD 'ctl' opcodes * Always include Subfiling's public header files in hdf5.h * Remove old unused code and comments * Implement support for per-file I/O queues Allows the subfiling VFD to have multiple HDF5 files open simultaneously * Use simple MPI_Iprobe over unnecessary MPI_Improbe * Committing clang-format changes * Update HDF5 testing to query driver for H5FD_FEAT_DEFAULT_VFD_COMPATIBLE flag * Fix a few bugs related to file multi-opens * Avoid calling MPI routines if subfiling gets reinitialized * Fix issue when files are closed in a random order * Update HDF5 testing to query VFD for "using MPI" feature flag * Register atexit handler in subfiling VFD to call MPI_Finalize after HDF5 closes * Fail for collective I/O requests until support is implemented * Correct VOL test function prototypes * Minor cleanup of old code and comments * Update mercury dependency * Cleanup of subfiling configuration structure * Committing clang-format changes * Build system updates for Subfiling VFD * Fix possible hang on failure in t_vfd tests caused by mismatched MPI_Barrier calls * Copy subfiling IOC fapl in "fapl get" method * Mirror subfiling superblock writes to stub file for legacy POSIX-y HDF5 applications * Allow collective I/O for MPI_BYTE types and rank 0 bcast strategy * Committing clang-format changes * Use different scheme for subfiling write message MPI tag calculations * Committing clang-format changes * Avoid performing fstat calls on all MPI ranks * Add MPI_Barrier before finalizing IOC threads * Use try_lock in I/O queue dispatch to minimize contention from worker threads * Use simple Waitall for nonblocking I/O waits * Add configurable IOC main thread delay and try_lock option to I/O queue dispatch * Fix bug that could cause serialization of non-overlapping I/O requests * Temporarily treat collective subfiling vector I/O calls as independent * Removed unused mercury bits * Add stubs for subfiling and IOC file delete callback * Update VFD testing for Subfiling VFD * Work around HDF5 metadata cache bug for Subfiling VFD when MPI Comm size = 1 * Committing clang-format changes Co-authored-by: mainzer <mainzer#hdfgroup.org> Co-authored-by: Neil Fortner <nfortne2@hdfgroup.org> Co-authored-by: Scot Breitenfeld <brtnfld@hdfgroup.org> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: jrmainzer <72230804+jrmainzer@users.noreply.github.com> Co-authored-by: Richard Warren <Richard.Warren@hdfgroup.org> Co-authored-by: Richard.Warren <richard.warren@jelly.ad.hdfgroup.org>
2022-07-23 04:03:12 +08:00
hid_t fcpl_id = -1;
hbool_t driver_is_default_compatible;
herr_t ret;
MESSAGE(5, ("Testing SOHM creation through external links\n"));
Subfiling VFD (#1883) * Added support for vector I/O calls to the VFD layer, and associated test code. Note that this includes the optimization to allow shortened sizes and types arrays to allow more space efficient representations of vectors in which all entries are of the same size and/or type. See the Selection I/o RFC for further details. Tested serial and parallel, debug and production on Charis. serial and parallel debug only on Jelly. * ran code formatter quick serial build and test on jelly * Add H5FD_read_selection() and H5FD_write_selection(). Currently only translate to scalar calls. Fix const buf in H5FD_write_vector(). * Format source * Fix comments * Add selection I/O to chunk code, used when: not using chunk cache, no datatype conversion, no I/O filters, no page buffer, not using collective I/O. Requires global variable H5_use_selection_io_g be set to TRUE. Implemented selection to vector I/O transaltion at the file driver layer. * Fix formatting unrelated to previous change to stop github from complaining. * Add full API support for selection I/O. Add tests for this. * Implement selection I/O for contiguous datasets. Fix bug in selection I/O translation. Add const qualifiers to some internal selection I/O routines to maintain const-correctness while avoiding memcpys. * Added vector read / write support to the MPIO VFD, with associated test code (see testpar/t_vfd.c). Note that this implementation does NOT support vector entries of size greater than 2 GB. This must be repaired before release, but it should be good enough for correctness testing. As MPIO requires vector I/O requests to be sorted in increasing address order, also added a vector sort utility in H5FDint.c This function is tested in passing by the MPIO vector I/O extension. In passing, repaired a bug in size / type vector extension management in H5FD_read/write_vector() Tested parallel debug and production on charis and Jelly. * Ran source code formatter * Add support for independent parallel I/O with selection I/O. Add HDF5_USE_SELECTION_IO env var to control selection I/O (default off). * Implement parallel collective support for selection I/O. * Fix comments and run formatter. * Update selection IO branch with develop (#1215) Merged branch 'develop' into selection_io * Sync with develop (#1262) Updated the branch with develop changes. * Implement big I/O support for vector I/O requests in the MPIO file driver. * Free arrays in H5FD__mpio_read/write_vector() as soon as they're not needed, to cut down on memory usage during I/O. * Address comments from code review. Fix const warnings with H5S_SEL_ITER_INIT(). * Committing clang-format changes * Feature/subfiling (#1464) * Initial checkin of merged sub-filing VFD. Passes regression tests (debug/shared/paralle) on Jelly. However, bugs and many compiler warnings remain -- not suitable for merge to develop. * Minor mods to src/H5FDsubfile_mpi.c to address errors reported by autogen.sh * Code formatting run -- no test * Merged my subfiling code fixes into the new selection_io_branch * Forgot to add the FindMERCURY.cmake file. This will probably disappear soon * attempting to make a more reliable subfile file open which doesn't return errors. For some unknown reason, the regular posix open will occasionally fail to create a subfile. Some better error handling for file close has been added. * added NULL option for H5FD_subfiling_config_t in H5Pset_fapl_subfiling (#1034) * NULL option automatically stacks IOC VFD for subfiling and returns a valid fapl. * added doxygen subfiling APIs * Various fixes which allow the IOR benchmark to run correctly * Lots of updates including the packaging up of the mercury_util source files to enable easier builds for our Benchmarking * Interim checkin of selection_io_with_subfiling_vfd branch Moddified testpar/t_vfd.c to test the subfiling vfd with default configuration. Must update this code to run with a variety of configurations -- most particularly multiple IO concentrators, and stripe depth small enough to test the other IO concentrators. testpar/t_vfd.c exposed a large number of race condidtions -- symtoms included: 1) Crashes (usually seg faults) 2) Heap corruption 3) Stack corruption 4) Double frees of heap space 5) Hangs 6) Out of order execution of I/O requests / violations of POSIX semantics 7) Swapped write requests Items 1 - 4 turned out to be primarily caused by file close issues -- specifically, the main I/O concentrator thread and its pool of worker threads were not being shut down properly on file close. Addressing this issue in combination with some other minor fixes seems to have addressed these issues. Items 5 & 6 appear to have been caused by issue of I/O requests to the thread pool in an order that did not maintain POSIX semantics. A rewrite of the I/O request dispatch code appears to have solved these issues. Item 7 seems to have been caused by multiple write requests from a given rank being read by the wrong worker thread. Code to issue "unique" tags for each write request via the ACK message appears to have cleaned this up. Note that the code is still in poor condtition. A partial list of known defects includes: a) Race condiditon on file close that allows superblock writes to arrive at the I/O concentrator after it has been shutdown. This defect is most evident when testpar/t_subfiling_vfd is run with 8 ranks. b) No error reporting from I/O concentrators -- must design and implement this. For now, mostly just asserts, which suggests that it should be run in debug mode. c) Much commented out and/or un-used code. d) Code orgnaization e) Build system with bits of Mercury is awkward -- think of shifting to pthreads with our own thread pool code. f) Need to add native support for vector and selection I/O to the subfiling VFD. g) Need to review, and posibly rework configuration code. h) Need to store subfile configuration data in a superblock extension message, and add code to use this data on file open. i) Test code is inadequate -- expect more issues as it is extended. In particular, there is no unit test code for the I/O request dispatch code. While I think it is correct at present, we need test code to verify this. Similarly, we need to test with multiple I/O concentrators and much smaller stripe depth. My actual code changes were limited to: src/H5FDioc.c src/H5FDioc_threads.c src/H5FDsubfile_int.c src/H5FDsubfile_mpi.c src/H5FDsubfiling.c src/H5FDsubfiling.h src/H5FDsubfiling_priv.h testpar/t_subfiling_vfd.c testpar/t_vfd.c I'm not sure what is going on with the deletions in src/mercury/src/util. Tested parallel/debug on Charis and Jelly * subfiling with selection IO (#1219) Merged branch 'selection_io' into subfiling branch. * Subfile name fixes (#1250) * fixed subfiling naming convention, and added leading zero to rank names. * Merge branch 'selection_io' into selection_io_with_subfiling_vfd (#1265) * Added script to join subfiles into a single HDF5 file (#1350) * Modified H5FD__subfiling_query() to report that the sub-filing VFD supports MPI This exposed issues with truncate and get EOF in the sub-filing VFD. I believe I have addressed these issues (get EOF not as fully tested as it should be), howeer, it exposed race conditions resulting in hangs. As of this writing, I have not been able to chase these down. Note that the tests that expose these race conditions are in testpar/t_subfiling_vfd.c, and are currently skipped. Unskip these tests to reproduce the race conditions. tested (to the extent possible) debug/parallel on charis and jelly. * Committing clang-format changes * fixed H5MM_free Co-authored-by: mainzer <mainzer#hdfgroup.org> Co-authored-by: jrmainzer <72230804+jrmainzer@users.noreply.github.com> Co-authored-by: Richard Warren <Richard.Warren@hdfgroup.org> Co-authored-by: Richard.Warren <richard.warren@jelly.ad.hdfgroup.org> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> * Move Subfiling VFD components into H5FDsubfiling source directory * Update Autotools build and add H5_HAVE_SUBFILING_VFD macro to H5pubconf.h * Tidy up CMake build of subfiling sources * Merge branch 'develop' into feature/subfiling (#1539) Merge branch 'develop' into feature/subfiling * Add VFD interface version field to Subfiling and IOC VFDs * Merge branch 'develop' into feature/subfiling (#1557) Merge branch 'develop' into feature/subfiling * Merge branch 'develop' into feature/subfiling (#1563) Merge branch 'develop' into feature/subfiling * Tidy up merge artifacts after rebase on develop * Fix incorrect variable in mirror VFD utils CMake * Ensure VFD values are always defined * Add subfiling to CMake VFD_LIST if built * Mark MPI I/O driver self-initialization global as static * Add Subfiling VFD to predefined VFDs for HDF5_DRIVER env. variable * Initial progress towards separating private vs. public subfiling code * include libgen.h in t_vfd tests for correct dirname/basename * Committing clang-format changes * removed mercury option, included subfiling header path (#1577) Added subfiling status to configure output, installed h5fuse.sh to build directory for use in future tests. * added check for stdatomic.h (#1578) * added check for stdatomic.h with subfiling * added H5_HAVE_SUBFILING_VFD for cmake * fix old-style-definition warning (#1582) * fix old-style-definition warning * added test for enable parallel with subfiling VFD (#1586) Fails if subfiling VFD is not used with parallel support. * Subfiling/IOC VFD fixes and tidying (#1619) * Rename CMake option for Subfiling VFD to be consistent with other VFDs * Miscellaneous Subfiling fixes Add error message for unset MPI communicator Support dynamic loading of subfiling VFD with default configuration * Temporary fix for subfile name issue * Added subfile checks (#1634) * added subfile checks * Feature/subfiling (#1655) * Subfiling/IOC VFD cleanup Fix misuse of MPI_COMM_WORLD in IOC VFD Propagate Subfiling FAPL MPI settings down to IOC FAPL in default configuration case Cleanup IOC VFD debugging code Change sprintf to snprintf in a few places * Major work on separating Subfiling and IOC VFDs from each other * Re-write async_completion func to not overuse stack * Replace usage of MPI_COMM_WORLD with file's actual MPI communicator * Refactor H5FDsubfile_mpi.c * Remove empty file H5FDsubfile_mpi.c * Separate IOC VFD errors to its own error stack * Committing clang-format changes * Remove H5TRACE macros from H5FDioc.c * Integrate H5FDioc_threads.c with IOC error stack * Fix for subfile name generation Use number of I/O concentrators from existing subfiling configuration file, if one exists * Add temporary barrier in "Get EOF" operation to prevent races on EOF Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> * Fix for retrieval of machine Host ID * Default to MPI_COMM_WORLD if no MPI params set * added libs rt and pthreads (#1673) * added libs rt and pthreads * Feature/subfiling (#1689) * More tidying of IOC VFD and subfiling debug code * Remove old unused log file code * Clear FID from active file map on failure * Fix bug in generation of subfile names when truncating file * Change subfile names to start from 1 instead of 0 * Use long long for user-specified stripe size from environment variable * Skip 0-sized I/Os in low-level IOC I/O routines * Don't update EOF on read * Convert printed warning about data size mismatch to assertion * Don't add base file address to I/O addresses twice Base address should already be applied as part of H5FDwrite/read_vector calls * Account for 0-sized I/O vector entries in subfile write/read functions * Rewrite init_indep_io for clarity * Correction for IOC wraparound calculations * Some corrections to iovec calculations * Remove temporary barrier on EOF retrieval * Complete work request queue entry on error instead of skipping over * Account for stripe size wraparound for sf_col_offset calculation * Committing clang-format changes Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> * Re-write and fix bugs in I/O vector filling routines (#1703) * Rewrite I/O vector filling routines for clarity * Fix bug with iovec_fill_last when last I/O size is 0 * added subfiling_dir line read (#1714) * added subfiling_dir line read and use it * shellcheck fixes * I/O request dispatch logic update (#1731) Short-circuit I/O request dispatch when head of I/O queue is an in-progress get EOF or truncate operation. This prevents an issue where a write operation can be dispatched alongside a get EOF/truncate operation, whereas all I/O requests are supposed to be ineligible for dispatch until the get EOF/truncate is completed * h5fuse.sh.in clean-up (#1757) * Added command-line options * Committing clang-format changes * Align with changes from develop * Mimic MPI I/O VFD for EOF handling * Initialize context_id field for work request objects * Use logfile for some debugging information * Use atomic store to set IOC ready flag * Use separate communicator for sending file EOF data Minor IOC cleanup * Use H5_subfile_fid_to_context to get context ID for file in Subfiling VFD * IOVEC calculation fixes * Updates for debugging code * Minor fixes for threaded code * Committing clang-format changes * Use separate MPI communicator for barrier operations * Committing clang-format changes * Rewrite EOF routine to use nonblocking MPI communication * Committing clang-format changes * Always dispatch I/O work requests in IOC main loop * Return distinct MPI communicator to library when requested * Minor warning cleanup * Committing clang-format changes * Generate h5fuse.sh from h5fuse.sh.in in CMake * Send truncate messages to correct IOC rank * Committing clang-format changes * Miscellaneous cleanup Post some MPI receives before sends Free some duplicated MPI communicator/Info objects Remove unnecessary extra MPI_Barrier * Warning cleanup * Fix for leaked MPI communicator * Retrieve file EOF on single rank and bcast it * Fixes for a few failure paths * Cleanup of IOC file opens * Committing clang-format changes * Use plan MPI_Send for send of EOF messages * Always check MPI thread support level during Subfiling init * Committing clang-format changes * Handle a hang on failure when IOCs can't open subfiles * Committing clang-format changes * Refactor file open status consensus check * Committing clang-format changes * Fix for MPI_Comm_free being called after MPI_Finalize * Fix VFD test by setting MPI params before setting subfiling on FAPL * Update Subfiling VFD error handling and error stack usage * Improvements for Subfiling logfiles * Remove prototypes for currently unused routines * Disable I/O queue stat collecting by default * Remove unused serialization mutex variable * Update VFD testing to take subfiling VFD into account * Fix usage of global subfiling application layout object * Minor fixes for failure pathways * Keep track of the number of failures in an IOC I/O queue * Make sure not to exceed MPI_TAG_UB value for data communication messages * Committing clang-format changes * Update for rename of some H5FD 'ctl' opcodes * Always include Subfiling's public header files in hdf5.h * Remove old unused code and comments * Implement support for per-file I/O queues Allows the subfiling VFD to have multiple HDF5 files open simultaneously * Use simple MPI_Iprobe over unnecessary MPI_Improbe * Committing clang-format changes * Update HDF5 testing to query driver for H5FD_FEAT_DEFAULT_VFD_COMPATIBLE flag * Fix a few bugs related to file multi-opens * Avoid calling MPI routines if subfiling gets reinitialized * Fix issue when files are closed in a random order * Update HDF5 testing to query VFD for "using MPI" feature flag * Register atexit handler in subfiling VFD to call MPI_Finalize after HDF5 closes * Fail for collective I/O requests until support is implemented * Correct VOL test function prototypes * Minor cleanup of old code and comments * Update mercury dependency * Cleanup of subfiling configuration structure * Committing clang-format changes * Build system updates for Subfiling VFD * Fix possible hang on failure in t_vfd tests caused by mismatched MPI_Barrier calls * Copy subfiling IOC fapl in "fapl get" method * Mirror subfiling superblock writes to stub file for legacy POSIX-y HDF5 applications * Allow collective I/O for MPI_BYTE types and rank 0 bcast strategy * Committing clang-format changes * Use different scheme for subfiling write message MPI tag calculations * Committing clang-format changes * Avoid performing fstat calls on all MPI ranks * Add MPI_Barrier before finalizing IOC threads * Use try_lock in I/O queue dispatch to minimize contention from worker threads * Use simple Waitall for nonblocking I/O waits * Add configurable IOC main thread delay and try_lock option to I/O queue dispatch * Fix bug that could cause serialization of non-overlapping I/O requests * Temporarily treat collective subfiling vector I/O calls as independent * Removed unused mercury bits * Add stubs for subfiling and IOC file delete callback * Update VFD testing for Subfiling VFD * Work around HDF5 metadata cache bug for Subfiling VFD when MPI Comm size = 1 * Committing clang-format changes Co-authored-by: mainzer <mainzer#hdfgroup.org> Co-authored-by: Neil Fortner <nfortne2@hdfgroup.org> Co-authored-by: Scot Breitenfeld <brtnfld@hdfgroup.org> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: jrmainzer <72230804+jrmainzer@users.noreply.github.com> Co-authored-by: Richard Warren <Richard.Warren@hdfgroup.org> Co-authored-by: Richard.Warren <richard.warren@jelly.ad.hdfgroup.org>
2022-07-23 04:03:12 +08:00
ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible);
CHECK_I(ret, "h5_driver_is_default_vfd_compatible");
if (!driver_is_default_compatible) {
HDprintf("-- SKIPPED --\n");
return;
}
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_ALL_FLAG, 16);
CHECK_I(ret, "H5Pset_shared_mesg_index");
verify_dset_create_and_open_through_extlink_with_sohm(fcpl_id, H5P_DEFAULT);
verify_dset_create_and_open_through_extlink_with_sohm(H5P_DEFAULT, fcpl_id);
verify_dset_create_and_open_through_extlink_with_sohm(fcpl_id, fcpl_id);
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
} /* test_sohm_extlink */
/*-------------------------------------------------------------------------
* Function: verify_dataset_extension
*
* Purpose: Tests extending a dataset's dataspace when sharing is
* enabled.
*
* If close_reopen is TRUE, closes and reopens the file to
* ensure that data is correctly written to disk.
*
* Programmer: James Laird
* Wednesday, January 10, 2007
*
*-------------------------------------------------------------------------
*/
static int
verify_dataset_extension(hid_t fcpl_id, hbool_t close_reopen)
{
2020-09-30 22:27:10 +08:00
hid_t file_id = H5I_INVALID_HID;
hid_t orig_space_id = H5I_INVALID_HID;
hid_t space1_id, space2_id, space3_id;
hid_t dcpl_id = H5I_INVALID_HID;
hid_t dset1_id, dset2_id = H5I_INVALID_HID, dset3_id = H5I_INVALID_HID;
hsize_t dims1[] = {1, 2};
hsize_t max_dims[] = {H5S_UNLIMITED, 2};
2020-09-30 22:27:10 +08:00
hsize_t dims2[] = {5, 2};
hsize_t out_dims[2];
hsize_t out_maxdims[2];
2020-09-30 22:27:10 +08:00
int x;
int old_nerrs; /* Number of errors when entering this routine */
herr_t ret;
hsize_t *space_dims[3];
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* Macro: TSOHM_VDE_VERIFY_SPACES
*
* Purpose: Encapsulate a common pattern
* Open, read-verify, and close the dataspaces for datasets 1-3
*
* Programmer: Jacob Smith
* 2018 November 5
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*/
2020-09-30 22:27:10 +08:00
#define TSOHM_VDE_VERIFY_SPACES(dims) \
{ \
/* Open dataspaces \
*/ \
space1_id = H5Dget_space(dset1_id); \
CHECK_I(space1_id, "H5Dget_space"); \
space2_id = H5Dget_space(dset2_id); \
CHECK_I(space2_id, "H5Dget_space"); \
space3_id = H5Dget_space(dset3_id); \
CHECK_I(space3_id, "H5Dget_space"); \
/* Verify dataspaces \
*/ \
ret = H5Sget_simple_extent_dims(space1_id, out_dims, out_maxdims); \
CHECK_I(ret, "H5Sget_simple_extent_dims"); \
for (x = 0; x < EXTEND_NDIMS; ++x) { \
VERIFY(out_dims[x], (dims)[0][x], "H5Sget_simple_extent_dims"); \
VERIFY(out_maxdims[x], max_dims[x], "H5Sget_simple_extent_dims"); \
} \
ret = H5Sget_simple_extent_dims(space2_id, out_dims, out_maxdims); \
CHECK_I(ret, "H5Sget_simple_extent_dims"); \
for (x = 0; x < EXTEND_NDIMS; ++x) { \
VERIFY(out_dims[x], (dims)[1][x], "H5Sget_simple_extent_dims"); \
VERIFY(out_maxdims[x], max_dims[x], "H5Sget_simple_extent_dims"); \
} \
ret = H5Sget_simple_extent_dims(space3_id, out_dims, out_maxdims); \
CHECK_I(ret, "H5Sget_simple_extent_dims"); \
for (x = 0; x < EXTEND_NDIMS; ++x) { \
VERIFY(out_dims[x], (dims)[2][x], "H5Sget_simple_extent_dims"); \
VERIFY(out_maxdims[x], max_dims[x], "H5Sget_simple_extent_dims"); \
} \
/* Close dataspaces \
*/ \
CHECK_I(H5Sclose(space1_id), "H5Sclose"); \
CHECK_I(H5Sclose(space2_id), "H5Sclose"); \
CHECK_I(H5Sclose(space3_id), "H5Sclose"); \
} /* define TSOHM_VDE_VERIFY_SPACES */
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* Macro: TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS()
*
* Purpose: Encapsulate a common pattern
* Wrapper to close and reopen file and datasets:
* + "dataset" (dset_id)
* + if n > 1 then include "dataset2" (dset_id2)
* + if n > 2 then include "dataset3" (dset_id3)
* + file (file_id)
*
* Programmer: Jacob Smith
* 2018 November 5
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*/
2020-09-30 22:27:10 +08:00
#define TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(n) \
{ \
CHECK_I(H5Dclose(dset1_id), "H5Dclose"); \
if ((n) > 1) \
CHECK_I(H5Dclose(dset2_id), "H5Dclose"); \
if ((n) > 2) \
CHECK_I(H5Dclose(dset3_id), "H5Dclose"); \
CHECK_I(H5Fclose(file_id), "H5Fclose"); \
\
file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT); \
CHECK_I(file_id, "H5Fopen"); \
dset1_id = H5Dopen2(file_id, "dataset", H5P_DEFAULT); \
CHECK_I(dset1_id, "H5Dopen2"); \
if ((n) > 1) { \
dset2_id = H5Dopen2(file_id, "dataset2", H5P_DEFAULT); \
CHECK_I(dset2_id, "H5Dopen2"); \
} \
if ((n) > 2) { \
dset3_id = H5Dopen2(file_id, "dataset3", H5P_DEFAULT); \
CHECK_I(dset3_id, "H5Dopen2"); \
} \
} /* define TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS */
/* Remember the current # of reported errors */
old_nerrs = GetTestNumErrs();
file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(file_id, "H5Fcreate");
/* Create property list with chunking */
dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
CHECK_I(dcpl_id, "H5Pcreate");
ret = H5Pset_chunk(dcpl_id, 2, dims1);
CHECK_I(ret, "H5Pset_chunk");
/* Create a dataspace and a dataset*/
orig_space_id = H5Screate_simple(EXTEND_NDIMS, dims1, max_dims);
CHECK_I(orig_space_id, "H5Screate_simple");
2020-09-30 22:27:10 +08:00
dset1_id =
H5Dcreate2(file_id, "dataset", H5T_NATIVE_LONG, orig_space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
CHECK_I(dset1_id, "H5Dcreate2");
2020-09-30 22:27:10 +08:00
if (close_reopen)
TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(1);
/* Create another dataset starting with the same dataspace */
2020-09-30 22:27:10 +08:00
dset2_id =
H5Dcreate2(file_id, "dataset2", H5T_NATIVE_LONG, orig_space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
CHECK_I(dset2_id, "H5Dcreate2");
if (close_reopen)
TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(2);
/* Create a third dataset with the same dataspace */
2020-09-30 22:27:10 +08:00
dset3_id =
H5Dcreate2(file_id, "dataset3", H5T_NATIVE_LONG, orig_space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
CHECK_I(dset3_id, "H5Dcreate2");
if (close_reopen)
TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(3);
/* Extend the first dataset */
ret = H5Dset_extent(dset1_id, dims2);
CHECK_I(ret, "H5Dset_extent");
if (close_reopen)
TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(3);
space_dims[0] = dims2;
space_dims[1] = dims1;
space_dims[2] = dims1;
TSOHM_VDE_VERIFY_SPACES(space_dims);
/* Extend the second dataset */
ret = H5Dset_extent(dset2_id, dims2);
CHECK_I(ret, "H5Dset_extent");
2020-09-30 22:27:10 +08:00
if (close_reopen)
TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(3);
space_dims[1] = dims2;
TSOHM_VDE_VERIFY_SPACES(space_dims);
/* Extend the third dataset */
ret = H5Dset_extent(dset3_id, dims2);
CHECK_I(ret, "H5Dset_extent");
2020-09-30 22:27:10 +08:00
if (close_reopen)
TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(3);
space_dims[2] = dims2;
TSOHM_VDE_VERIFY_SPACES(space_dims);
/* Close the datasets and file */
ret = H5Dclose(dset1_id);
CHECK_I(ret, "H5Dclose");
ret = H5Dclose(dset2_id);
CHECK_I(ret, "H5Dclose");
ret = H5Dclose(dset3_id);
CHECK_I(ret, "H5Dclose");
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
/* Change the order in which datasets are extended to ensure that there
* are no problems if a dataspace goes from being shared to not being
* shared or vice versa.
*/
file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
CHECK_I(file_id, "H5Fcreate");
2020-09-30 22:27:10 +08:00
dset1_id =
H5Dcreate2(file_id, "dataset", H5T_NATIVE_LONG, orig_space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
CHECK_I(dset1_id, "H5Dcreate2");
if (close_reopen)
TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(1);
/* Extend the first dataset */
ret = H5Dset_extent(dset1_id, dims2);
CHECK_I(ret, "H5Dset_extent");
if (close_reopen)
TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(1);
/* Create the second dataset. Its dataspace will be unshared and then
* become shared when extended.
*/
2020-09-30 22:27:10 +08:00
dset2_id =
H5Dcreate2(file_id, "dataset2", H5T_NATIVE_LONG, orig_space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
CHECK_I(dset2_id, "H5Dcreate2");
if (close_reopen)
TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(2);
/* Extend the second dataset */
ret = H5Dset_extent(dset2_id, dims2);
CHECK_I(ret, "H5Dset_extent");
if (close_reopen)
TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(2);
/* Create the third dataset. Its dataspace will be unshared and then
* become shared when extended.
*/
2020-09-30 22:27:10 +08:00
dset3_id =
H5Dcreate2(file_id, "dataset3", H5T_NATIVE_LONG, orig_space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
CHECK_I(dset3_id, "H5Dcreate2");
2020-09-30 22:27:10 +08:00
if (close_reopen)
TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(3);
/* Extend the third dataset */
ret = H5Dset_extent(dset3_id, dims2);
CHECK_I(ret, "H5Dset_extent");
2020-09-30 22:27:10 +08:00
if (close_reopen)
TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS(3);
TSOHM_VDE_VERIFY_SPACES(space_dims);
/* Close the datasets and file */
ret = H5Dclose(dset1_id);
CHECK_I(ret, "H5Dclose");
ret = H5Dclose(dset2_id);
CHECK_I(ret, "H5Dclose");
ret = H5Dclose(dset3_id);
CHECK_I(ret, "H5Dclose");
ret = H5Fclose(file_id);
CHECK_I(ret, "H5Fclose");
/* Cleanup */
ret = H5Sclose(orig_space_id);
CHECK_I(ret, "H5Sclose");
ret = H5Pclose(dcpl_id);
CHECK_I(ret, "H5Pclose");
/* Complain if this test generated errors */
2020-09-30 22:27:10 +08:00
if (old_nerrs == GetTestNumErrs())
return (0);
else
2020-09-30 22:27:10 +08:00
return (-1);
/* macros are exclusive to this function */
#undef TSOHM_VDE_CLOSE_REOPEN_FILE_AND_DSETS
#undef TSOHM_VDE_VERIFY_SPACES
} /* verify_dataset_extension */
/*-------------------------------------------------------------------------
* Function: test_sohm_extend_dset
*
* Purpose: Test extended shared dataspaces. An extended dataset's
* dataspace will change, possibly confusing the shared message
* code.
*
* Programmer: James Laird
* Wednesday, January 10, 2007
*
*-------------------------------------------------------------------------
*/
static void
test_sohm_extend_dset(void)
{
2020-09-30 22:27:10 +08:00
hid_t fcpl_id = -1;
herr_t ret;
MESSAGE(5, ("Testing extending shared dataspaces\n"));
fcpl_id = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl_id, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl_id, 1);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
/* No shared messages */
ret = verify_dataset_extension(fcpl_id, FALSE);
CHECK_I(ret, "verify_dataset_extension");
ret = verify_dataset_extension(fcpl_id, TRUE);
CHECK_I(ret, "verify_dataset_extension");
/* Only dataspaces */
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_SDSPACE_FLAG, 16);
CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = verify_dataset_extension(fcpl_id, FALSE);
CHECK_I(ret, "verify_dataset_extension");
ret = verify_dataset_extension(fcpl_id, TRUE);
CHECK_I(ret, "verify_dataset_extension");
/* All messages */
ret = H5Pset_shared_mesg_index(fcpl_id, 0, H5O_SHMESG_ALL_FLAG, 16);
CHECK_I(ret, "H5Pset_shared_mesg_index");
ret = verify_dataset_extension(fcpl_id, FALSE);
CHECK_I(ret, "verify_dataset_extension");
ret = verify_dataset_extension(fcpl_id, TRUE);
CHECK_I(ret, "verify_dataset_extension");
/* All messages in lists */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 100, 50);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
ret = verify_dataset_extension(fcpl_id, FALSE);
CHECK_I(ret, "verify_dataset_extension");
ret = verify_dataset_extension(fcpl_id, TRUE);
CHECK_I(ret, "verify_dataset_extension");
/* All messages in lists converted to B-trees */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 1, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
ret = verify_dataset_extension(fcpl_id, FALSE);
CHECK_I(ret, "verify_dataset_extension");
ret = verify_dataset_extension(fcpl_id, TRUE);
CHECK_I(ret, "verify_dataset_extension");
/* All messages in B-trees */
ret = H5Pset_shared_mesg_phase_change(fcpl_id, 0, 0);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
ret = verify_dataset_extension(fcpl_id, FALSE);
CHECK_I(ret, "verify_dataset_extension");
ret = verify_dataset_extension(fcpl_id, TRUE);
CHECK_I(ret, "verify_dataset_extension");
ret = H5Pclose(fcpl_id);
CHECK_I(ret, "H5Pclose");
} /* test_sohm_extend_dset */
/*-------------------------------------------------------------------------
* Function: test_sohm_external_dtype
*
* Purpose: When a datatype is a SOHM type in one file, test that the
* second file using the same datatype actually save it in
* the file, too.
*
* Programmer: Raymond Lu
* 13 October, 2008
*
*-------------------------------------------------------------------------
*/
static void
test_sohm_external_dtype(void)
{
typedef struct s1_t {
int a;
int b;
} s1_t;
s1_t *s_ptr, *orig;
2020-09-30 22:27:10 +08:00
hid_t fcpl, file1, file2;
hid_t dataset1, dataset2;
hid_t s1_tid, dset1_tid, dset2_tid, space;
hsize_t dims[2] = {NX, NY};
H5T_class_t dtype_class;
2020-09-30 22:27:10 +08:00
size_t dmsg_count;
unsigned x, i;
herr_t ret;
MESSAGE(5, ("Testing shared external datatype\n"));
fcpl = H5Pcreate(H5P_FILE_CREATE);
CHECK_I(fcpl, "H5Pcreate");
ret = H5Pset_shared_mesg_nindexes(fcpl, TEST_NUM_INDEXES);
CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
2020-09-30 22:27:10 +08:00
for (x = 0; x < TEST_NUM_INDEXES; ++x) {
ret = H5Pset_shared_mesg_index(fcpl, x, test_type_flags[x], test_minsizes[x]);
CHECK_I(ret, "H5Pset_shared_mesg_index");
}
ret = H5Pset_shared_mesg_phase_change(fcpl, TEST_L2B, TEST_B2L);
CHECK_I(ret, "H5Pset_shared_mesg_phase_change");
space = H5Screate_simple(2, dims, NULL);
CHECK_I(space, "H5Screate_simple");
s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
CHECK_I(s1_tid, "H5Tcreate");
2020-09-30 22:27:10 +08:00
ret = H5Tinsert(s1_tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT);
CHECK_I(ret, "H5Tinsert");
2020-09-30 22:27:10 +08:00
ret = H5Tinsert(s1_tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT);
CHECK_I(ret, "H5Tinsert");
2019-08-25 03:07:19 +08:00
/* Set up dataset in first file */
file1 = H5Fcreate(FILENAME_SRC, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT);
CHECK_I(file1, "H5Fcreate");
/* Check on datatype storage status. It should be zero now. */
ret = H5F__get_sohm_mesg_count_test(file1, H5O_DTYPE_ID, &dmsg_count);
CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
VERIFY(dmsg_count, 0, "H5F__get_sohm_mesg_count_test");
2020-09-30 22:27:10 +08:00
dataset1 = H5Dcreate2(file1, "dataset_1", s1_tid, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(dataset1, "H5Dcreate2");
/* Check on datatype storage status. It should be 1 now. */
ret = H5F__get_sohm_mesg_count_test(file1, H5O_DTYPE_ID, &dmsg_count);
CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
VERIFY(dmsg_count, 1, "H5F__get_sohm_mesg_count_test");
dset1_tid = H5Dget_type(dataset1);
CHECK_I(dset1_tid, "H5Dget_type");
/* Allocate space and initialize data */
2020-09-30 22:27:10 +08:00
orig = (s1_t *)HDmalloc(NX * NY * sizeof(s1_t));
if (orig == NULL)
CHECK_I(-1, "HDmalloc");
2020-09-30 22:27:10 +08:00
for (i = 0; i < NX * NY; i++) {
s_ptr = (s1_t *)orig + i;
s_ptr->a = (int)(i * 3 + 1);
s_ptr->b = (int)(i * 3 + 2);
}
ret = H5Dwrite(dataset1, s1_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, orig);
CHECK_I(ret, "H5Dwrite");
ret = H5Dclose(dataset1);
CHECK_I(ret, "H5Dclose");
2019-08-25 03:07:19 +08:00
/* Create dataset in second file using datatype from dataset in the first file */
file2 = H5Fcreate(FILENAME_DST, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT);
CHECK_I(file2, "H5Fcreate");
/* Check on datatype storage status. It should be zero now. */
ret = H5F__get_sohm_mesg_count_test(file2, H5O_DTYPE_ID, &dmsg_count);
CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
VERIFY(dmsg_count, 0, "H5F__get_sohm_mesg_count_test");
2020-09-30 22:27:10 +08:00
dataset2 = H5Dcreate2(file2, "dataset_2", dset1_tid, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK_I(dataset2, "H5Dcreate2");
/* Check on datatype storage status. It should be 1 now. */
ret = H5F__get_sohm_mesg_count_test(file2, H5O_DTYPE_ID, &dmsg_count);
CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
VERIFY(dmsg_count, 1, "H5F__get_sohm_mesg_count_test");
ret = H5Dwrite(dataset2, s1_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, orig);
CHECK_I(ret, "H5Dwrite");
/* Close references to the first file */
ret = H5Dclose(dataset2);
CHECK_I(ret, "H5Dclose");
ret = H5Tclose(dset1_tid);
CHECK_I(ret, "H5Tclose");
ret = H5Fclose(file1);
CHECK_I(ret, "H5Fclose");
2019-08-25 03:07:19 +08:00
/* Verify that datatype details are still accessible by second file */
dataset2 = H5Dopen2(file2, "dataset_2", H5P_DEFAULT);
CHECK_I(dataset2, "H5Dopen2");
dset2_tid = H5Dget_type(dataset2);
CHECK_I(dset2_tid, "H5Dget_type");
dtype_class = H5Tget_class(dset2_tid);
VERIFY(dtype_class, H5T_COMPOUND, "H5Tget_class");
2019-08-25 03:07:19 +08:00
/* Cleanup */
ret = H5Tclose(dset2_tid);
CHECK_I(ret, "H5Tclose");
ret = H5Dclose(dataset2);
CHECK_I(ret, "H5Dclose");
ret = H5Sclose(space);
CHECK_I(ret, "H5Sclose");
ret = H5Tclose(s1_tid);
CHECK_I(ret, "H5Tclose");
ret = H5Pclose(fcpl);
CHECK_I(ret, "H5Pclose");
ret = H5Fclose(file2);
CHECK_I(ret, "H5Fclose");
HDfree(orig);
} /* test_sohm_external_dtype */
/****************************************************************
**
** test_sohm(): Main Shared Object Header Message testing routine.
**
****************************************************************/
void
test_sohm(void)
{
const char *env_h5_drvr;
hbool_t default_driver;
MESSAGE(5, ("Testing Shared Object Header Messages\n"));
/* Get the VFD to use */
env_h5_drvr = HDgetenv(HDF5_DRIVER);
if (env_h5_drvr == NULL)
env_h5_drvr = "nomatch";
default_driver = h5_using_default_driver(env_h5_drvr);
test_sohm_fcpl(); /* Test SOHMs and file creation plists */
test_sohm_fcpl_errors(); /* Bogus H5P* calls for SOHMs */
/* Only run this test with sec2/default driver */
if (default_driver)
test_sohm_size1(); /* Tests the sizes of files with one SOHM */
#if 0 /* TODO: REVEALS BUG TO BE FIXED - SEE JIRA HDFFV-10645 */
test_sohm_size_consistency_open_create();
#endif /* Jira HDFFV-10645 */
test_sohm_attrs(); /* Tests shared messages in attributes */
/* Only run these tests with sec2/default driver */
if (default_driver) {
test_sohm_size2(0); /* Tests the sizes of files with multiple SOHMs */
test_sohm_size2(1); /* Tests the sizes of files with multiple
* SOHMs, closing and reopening file after
* each write. */
}
2020-09-30 22:27:10 +08:00
test_sohm_delete(); /* Test deleting shared messages */
test_sohm_delete_revert(); /* Test that a file with SOHMs becomes an
* empty file again when they are deleted. */
Subfiling VFD (#1883) * Added support for vector I/O calls to the VFD layer, and associated test code. Note that this includes the optimization to allow shortened sizes and types arrays to allow more space efficient representations of vectors in which all entries are of the same size and/or type. See the Selection I/o RFC for further details. Tested serial and parallel, debug and production on Charis. serial and parallel debug only on Jelly. * ran code formatter quick serial build and test on jelly * Add H5FD_read_selection() and H5FD_write_selection(). Currently only translate to scalar calls. Fix const buf in H5FD_write_vector(). * Format source * Fix comments * Add selection I/O to chunk code, used when: not using chunk cache, no datatype conversion, no I/O filters, no page buffer, not using collective I/O. Requires global variable H5_use_selection_io_g be set to TRUE. Implemented selection to vector I/O transaltion at the file driver layer. * Fix formatting unrelated to previous change to stop github from complaining. * Add full API support for selection I/O. Add tests for this. * Implement selection I/O for contiguous datasets. Fix bug in selection I/O translation. Add const qualifiers to some internal selection I/O routines to maintain const-correctness while avoiding memcpys. * Added vector read / write support to the MPIO VFD, with associated test code (see testpar/t_vfd.c). Note that this implementation does NOT support vector entries of size greater than 2 GB. This must be repaired before release, but it should be good enough for correctness testing. As MPIO requires vector I/O requests to be sorted in increasing address order, also added a vector sort utility in H5FDint.c This function is tested in passing by the MPIO vector I/O extension. In passing, repaired a bug in size / type vector extension management in H5FD_read/write_vector() Tested parallel debug and production on charis and Jelly. * Ran source code formatter * Add support for independent parallel I/O with selection I/O. Add HDF5_USE_SELECTION_IO env var to control selection I/O (default off). * Implement parallel collective support for selection I/O. * Fix comments and run formatter. * Update selection IO branch with develop (#1215) Merged branch 'develop' into selection_io * Sync with develop (#1262) Updated the branch with develop changes. * Implement big I/O support for vector I/O requests in the MPIO file driver. * Free arrays in H5FD__mpio_read/write_vector() as soon as they're not needed, to cut down on memory usage during I/O. * Address comments from code review. Fix const warnings with H5S_SEL_ITER_INIT(). * Committing clang-format changes * Feature/subfiling (#1464) * Initial checkin of merged sub-filing VFD. Passes regression tests (debug/shared/paralle) on Jelly. However, bugs and many compiler warnings remain -- not suitable for merge to develop. * Minor mods to src/H5FDsubfile_mpi.c to address errors reported by autogen.sh * Code formatting run -- no test * Merged my subfiling code fixes into the new selection_io_branch * Forgot to add the FindMERCURY.cmake file. This will probably disappear soon * attempting to make a more reliable subfile file open which doesn't return errors. For some unknown reason, the regular posix open will occasionally fail to create a subfile. Some better error handling for file close has been added. * added NULL option for H5FD_subfiling_config_t in H5Pset_fapl_subfiling (#1034) * NULL option automatically stacks IOC VFD for subfiling and returns a valid fapl. * added doxygen subfiling APIs * Various fixes which allow the IOR benchmark to run correctly * Lots of updates including the packaging up of the mercury_util source files to enable easier builds for our Benchmarking * Interim checkin of selection_io_with_subfiling_vfd branch Moddified testpar/t_vfd.c to test the subfiling vfd with default configuration. Must update this code to run with a variety of configurations -- most particularly multiple IO concentrators, and stripe depth small enough to test the other IO concentrators. testpar/t_vfd.c exposed a large number of race condidtions -- symtoms included: 1) Crashes (usually seg faults) 2) Heap corruption 3) Stack corruption 4) Double frees of heap space 5) Hangs 6) Out of order execution of I/O requests / violations of POSIX semantics 7) Swapped write requests Items 1 - 4 turned out to be primarily caused by file close issues -- specifically, the main I/O concentrator thread and its pool of worker threads were not being shut down properly on file close. Addressing this issue in combination with some other minor fixes seems to have addressed these issues. Items 5 & 6 appear to have been caused by issue of I/O requests to the thread pool in an order that did not maintain POSIX semantics. A rewrite of the I/O request dispatch code appears to have solved these issues. Item 7 seems to have been caused by multiple write requests from a given rank being read by the wrong worker thread. Code to issue "unique" tags for each write request via the ACK message appears to have cleaned this up. Note that the code is still in poor condtition. A partial list of known defects includes: a) Race condiditon on file close that allows superblock writes to arrive at the I/O concentrator after it has been shutdown. This defect is most evident when testpar/t_subfiling_vfd is run with 8 ranks. b) No error reporting from I/O concentrators -- must design and implement this. For now, mostly just asserts, which suggests that it should be run in debug mode. c) Much commented out and/or un-used code. d) Code orgnaization e) Build system with bits of Mercury is awkward -- think of shifting to pthreads with our own thread pool code. f) Need to add native support for vector and selection I/O to the subfiling VFD. g) Need to review, and posibly rework configuration code. h) Need to store subfile configuration data in a superblock extension message, and add code to use this data on file open. i) Test code is inadequate -- expect more issues as it is extended. In particular, there is no unit test code for the I/O request dispatch code. While I think it is correct at present, we need test code to verify this. Similarly, we need to test with multiple I/O concentrators and much smaller stripe depth. My actual code changes were limited to: src/H5FDioc.c src/H5FDioc_threads.c src/H5FDsubfile_int.c src/H5FDsubfile_mpi.c src/H5FDsubfiling.c src/H5FDsubfiling.h src/H5FDsubfiling_priv.h testpar/t_subfiling_vfd.c testpar/t_vfd.c I'm not sure what is going on with the deletions in src/mercury/src/util. Tested parallel/debug on Charis and Jelly * subfiling with selection IO (#1219) Merged branch 'selection_io' into subfiling branch. * Subfile name fixes (#1250) * fixed subfiling naming convention, and added leading zero to rank names. * Merge branch 'selection_io' into selection_io_with_subfiling_vfd (#1265) * Added script to join subfiles into a single HDF5 file (#1350) * Modified H5FD__subfiling_query() to report that the sub-filing VFD supports MPI This exposed issues with truncate and get EOF in the sub-filing VFD. I believe I have addressed these issues (get EOF not as fully tested as it should be), howeer, it exposed race conditions resulting in hangs. As of this writing, I have not been able to chase these down. Note that the tests that expose these race conditions are in testpar/t_subfiling_vfd.c, and are currently skipped. Unskip these tests to reproduce the race conditions. tested (to the extent possible) debug/parallel on charis and jelly. * Committing clang-format changes * fixed H5MM_free Co-authored-by: mainzer <mainzer#hdfgroup.org> Co-authored-by: jrmainzer <72230804+jrmainzer@users.noreply.github.com> Co-authored-by: Richard Warren <Richard.Warren@hdfgroup.org> Co-authored-by: Richard.Warren <richard.warren@jelly.ad.hdfgroup.org> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> * Move Subfiling VFD components into H5FDsubfiling source directory * Update Autotools build and add H5_HAVE_SUBFILING_VFD macro to H5pubconf.h * Tidy up CMake build of subfiling sources * Merge branch 'develop' into feature/subfiling (#1539) Merge branch 'develop' into feature/subfiling * Add VFD interface version field to Subfiling and IOC VFDs * Merge branch 'develop' into feature/subfiling (#1557) Merge branch 'develop' into feature/subfiling * Merge branch 'develop' into feature/subfiling (#1563) Merge branch 'develop' into feature/subfiling * Tidy up merge artifacts after rebase on develop * Fix incorrect variable in mirror VFD utils CMake * Ensure VFD values are always defined * Add subfiling to CMake VFD_LIST if built * Mark MPI I/O driver self-initialization global as static * Add Subfiling VFD to predefined VFDs for HDF5_DRIVER env. variable * Initial progress towards separating private vs. public subfiling code * include libgen.h in t_vfd tests for correct dirname/basename * Committing clang-format changes * removed mercury option, included subfiling header path (#1577) Added subfiling status to configure output, installed h5fuse.sh to build directory for use in future tests. * added check for stdatomic.h (#1578) * added check for stdatomic.h with subfiling * added H5_HAVE_SUBFILING_VFD for cmake * fix old-style-definition warning (#1582) * fix old-style-definition warning * added test for enable parallel with subfiling VFD (#1586) Fails if subfiling VFD is not used with parallel support. * Subfiling/IOC VFD fixes and tidying (#1619) * Rename CMake option for Subfiling VFD to be consistent with other VFDs * Miscellaneous Subfiling fixes Add error message for unset MPI communicator Support dynamic loading of subfiling VFD with default configuration * Temporary fix for subfile name issue * Added subfile checks (#1634) * added subfile checks * Feature/subfiling (#1655) * Subfiling/IOC VFD cleanup Fix misuse of MPI_COMM_WORLD in IOC VFD Propagate Subfiling FAPL MPI settings down to IOC FAPL in default configuration case Cleanup IOC VFD debugging code Change sprintf to snprintf in a few places * Major work on separating Subfiling and IOC VFDs from each other * Re-write async_completion func to not overuse stack * Replace usage of MPI_COMM_WORLD with file's actual MPI communicator * Refactor H5FDsubfile_mpi.c * Remove empty file H5FDsubfile_mpi.c * Separate IOC VFD errors to its own error stack * Committing clang-format changes * Remove H5TRACE macros from H5FDioc.c * Integrate H5FDioc_threads.c with IOC error stack * Fix for subfile name generation Use number of I/O concentrators from existing subfiling configuration file, if one exists * Add temporary barrier in "Get EOF" operation to prevent races on EOF Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> * Fix for retrieval of machine Host ID * Default to MPI_COMM_WORLD if no MPI params set * added libs rt and pthreads (#1673) * added libs rt and pthreads * Feature/subfiling (#1689) * More tidying of IOC VFD and subfiling debug code * Remove old unused log file code * Clear FID from active file map on failure * Fix bug in generation of subfile names when truncating file * Change subfile names to start from 1 instead of 0 * Use long long for user-specified stripe size from environment variable * Skip 0-sized I/Os in low-level IOC I/O routines * Don't update EOF on read * Convert printed warning about data size mismatch to assertion * Don't add base file address to I/O addresses twice Base address should already be applied as part of H5FDwrite/read_vector calls * Account for 0-sized I/O vector entries in subfile write/read functions * Rewrite init_indep_io for clarity * Correction for IOC wraparound calculations * Some corrections to iovec calculations * Remove temporary barrier on EOF retrieval * Complete work request queue entry on error instead of skipping over * Account for stripe size wraparound for sf_col_offset calculation * Committing clang-format changes Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> * Re-write and fix bugs in I/O vector filling routines (#1703) * Rewrite I/O vector filling routines for clarity * Fix bug with iovec_fill_last when last I/O size is 0 * added subfiling_dir line read (#1714) * added subfiling_dir line read and use it * shellcheck fixes * I/O request dispatch logic update (#1731) Short-circuit I/O request dispatch when head of I/O queue is an in-progress get EOF or truncate operation. This prevents an issue where a write operation can be dispatched alongside a get EOF/truncate operation, whereas all I/O requests are supposed to be ineligible for dispatch until the get EOF/truncate is completed * h5fuse.sh.in clean-up (#1757) * Added command-line options * Committing clang-format changes * Align with changes from develop * Mimic MPI I/O VFD for EOF handling * Initialize context_id field for work request objects * Use logfile for some debugging information * Use atomic store to set IOC ready flag * Use separate communicator for sending file EOF data Minor IOC cleanup * Use H5_subfile_fid_to_context to get context ID for file in Subfiling VFD * IOVEC calculation fixes * Updates for debugging code * Minor fixes for threaded code * Committing clang-format changes * Use separate MPI communicator for barrier operations * Committing clang-format changes * Rewrite EOF routine to use nonblocking MPI communication * Committing clang-format changes * Always dispatch I/O work requests in IOC main loop * Return distinct MPI communicator to library when requested * Minor warning cleanup * Committing clang-format changes * Generate h5fuse.sh from h5fuse.sh.in in CMake * Send truncate messages to correct IOC rank * Committing clang-format changes * Miscellaneous cleanup Post some MPI receives before sends Free some duplicated MPI communicator/Info objects Remove unnecessary extra MPI_Barrier * Warning cleanup * Fix for leaked MPI communicator * Retrieve file EOF on single rank and bcast it * Fixes for a few failure paths * Cleanup of IOC file opens * Committing clang-format changes * Use plan MPI_Send for send of EOF messages * Always check MPI thread support level during Subfiling init * Committing clang-format changes * Handle a hang on failure when IOCs can't open subfiles * Committing clang-format changes * Refactor file open status consensus check * Committing clang-format changes * Fix for MPI_Comm_free being called after MPI_Finalize * Fix VFD test by setting MPI params before setting subfiling on FAPL * Update Subfiling VFD error handling and error stack usage * Improvements for Subfiling logfiles * Remove prototypes for currently unused routines * Disable I/O queue stat collecting by default * Remove unused serialization mutex variable * Update VFD testing to take subfiling VFD into account * Fix usage of global subfiling application layout object * Minor fixes for failure pathways * Keep track of the number of failures in an IOC I/O queue * Make sure not to exceed MPI_TAG_UB value for data communication messages * Committing clang-format changes * Update for rename of some H5FD 'ctl' opcodes * Always include Subfiling's public header files in hdf5.h * Remove old unused code and comments * Implement support for per-file I/O queues Allows the subfiling VFD to have multiple HDF5 files open simultaneously * Use simple MPI_Iprobe over unnecessary MPI_Improbe * Committing clang-format changes * Update HDF5 testing to query driver for H5FD_FEAT_DEFAULT_VFD_COMPATIBLE flag * Fix a few bugs related to file multi-opens * Avoid calling MPI routines if subfiling gets reinitialized * Fix issue when files are closed in a random order * Update HDF5 testing to query VFD for "using MPI" feature flag * Register atexit handler in subfiling VFD to call MPI_Finalize after HDF5 closes * Fail for collective I/O requests until support is implemented * Correct VOL test function prototypes * Minor cleanup of old code and comments * Update mercury dependency * Cleanup of subfiling configuration structure * Committing clang-format changes * Build system updates for Subfiling VFD * Fix possible hang on failure in t_vfd tests caused by mismatched MPI_Barrier calls * Copy subfiling IOC fapl in "fapl get" method * Mirror subfiling superblock writes to stub file for legacy POSIX-y HDF5 applications * Allow collective I/O for MPI_BYTE types and rank 0 bcast strategy * Committing clang-format changes * Use different scheme for subfiling write message MPI tag calculations * Committing clang-format changes * Avoid performing fstat calls on all MPI ranks * Add MPI_Barrier before finalizing IOC threads * Use try_lock in I/O queue dispatch to minimize contention from worker threads * Use simple Waitall for nonblocking I/O waits * Add configurable IOC main thread delay and try_lock option to I/O queue dispatch * Fix bug that could cause serialization of non-overlapping I/O requests * Temporarily treat collective subfiling vector I/O calls as independent * Removed unused mercury bits * Add stubs for subfiling and IOC file delete callback * Update VFD testing for Subfiling VFD * Work around HDF5 metadata cache bug for Subfiling VFD when MPI Comm size = 1 * Committing clang-format changes Co-authored-by: mainzer <mainzer#hdfgroup.org> Co-authored-by: Neil Fortner <nfortne2@hdfgroup.org> Co-authored-by: Scot Breitenfeld <brtnfld@hdfgroup.org> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: jrmainzer <72230804+jrmainzer@users.noreply.github.com> Co-authored-by: Richard Warren <Richard.Warren@hdfgroup.org> Co-authored-by: Richard.Warren <richard.warren@jelly.ad.hdfgroup.org>
2022-07-23 04:03:12 +08:00
test_sohm_extlink(); /* Test SOHMs when external links are used */
test_sohm_extend_dset(); /* Test extending shared datasets */
test_sohm_external_dtype(); /* Test using datatype in another file */
} /* test_sohm */
/*-------------------------------------------------------------------------
* Function: cleanup_sohm
*
* Purpose: Cleanup temporary test files
*
* Return: none
*
* Programmer: James Laird
* October 9, 2006
*
*-------------------------------------------------------------------------
*/
void
cleanup_sohm(void)
{
2019-01-11 09:51:42 +08:00
HDremove(FILENAME);
HDremove(FILENAME_SRC);
HDremove(FILENAME_DST);
} /* cleanup_sohm */