hdf5/testpar/t_pflush1.c

211 lines
6.9 KiB
C
Raw Normal View History

/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by The HDF Group. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://www.hdfgroup.org/licenses. *
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Purpose: This is the first half of a two-part test that makes sure
* that a file can be read after a parallel application crashes
* as long as the file was flushed first. We simulate a crash by
* calling _exit() since this doesn't flush HDF5 caches but
* still exits with success.
*/
#include "h5test.h"
static const char *FILENAME[] = {"flush", "noflush", NULL};
static int *data_g = NULL;
2020-09-30 22:27:10 +08:00
#define N_GROUPS 100
/*-------------------------------------------------------------------------
* Function: create_test_file
*
* Purpose: Creates the file used in part 1 of the test
*
* Return: Success: A valid file ID
* Failure: H5I_INVALID_HID
*-------------------------------------------------------------------------
*/
static hid_t
create_test_file(char *name, size_t name_length, hid_t fapl_id)
{
2020-09-30 22:27:10 +08:00
hid_t fid = H5I_INVALID_HID;
hid_t dcpl_id = H5I_INVALID_HID;
hid_t sid = H5I_INVALID_HID;
hid_t did = H5I_INVALID_HID;
hid_t top_level_gid = H5I_INVALID_HID;
hid_t gid = H5I_INVALID_HID;
hid_t dxpl_id = H5I_INVALID_HID;
hsize_t dims[2] = {100, 100};
hsize_t chunk_dims[2] = {5, 5};
hsize_t i, j;
2020-09-30 22:27:10 +08:00
if ((fid = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0)
goto error;
/* Create a chunked dataset */
2020-09-30 22:27:10 +08:00
if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
goto error;
2020-09-30 22:27:10 +08:00
if (H5Pset_chunk(dcpl_id, 2, chunk_dims) < 0)
goto error;
2020-09-30 22:27:10 +08:00
if ((sid = H5Screate_simple(2, dims, NULL)) < 0)
goto error;
2020-09-30 22:27:10 +08:00
if ((did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
2020-09-30 22:27:10 +08:00
if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
goto error;
2020-09-30 22:27:10 +08:00
if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0)
goto error;
/* Write some data */
2020-09-30 22:27:10 +08:00
for (i = 0; i < dims[0]; i++)
for (j = 0; j < dims[1]; j++)
data_g[(i * 100) + j] = (int)(i + (i * j) + j);
2020-09-30 22:27:10 +08:00
if (H5Dwrite(did, H5T_NATIVE_INT, sid, sid, dxpl_id, data_g) < 0)
goto error;
/* Create some groups */
2020-09-30 22:27:10 +08:00
if ((top_level_gid = H5Gcreate2(fid, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
2020-09-30 22:27:10 +08:00
for (i = 0; i < N_GROUPS; i++) {
snprintf(name, name_length, "grp%02u", (unsigned)i);
2020-09-30 22:27:10 +08:00
if ((gid = H5Gcreate2(top_level_gid, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
2020-09-30 22:27:10 +08:00
if (H5Gclose(gid) < 0)
goto error;
}
return fid;
error:
return H5I_INVALID_HID;
} /* end create_test_file() */
/*-------------------------------------------------------------------------
2018-02-15 00:08:09 +08:00
* Function: main
*
* Purpose: Part 1 of a two-part parallel H5Fflush() test.
*
* Return: EXIT_FAILURE (always)
*-------------------------------------------------------------------------
*/
int
2020-09-30 22:27:10 +08:00
main(int argc, char *argv[])
{
2020-09-30 22:27:10 +08:00
hid_t fid1 = H5I_INVALID_HID;
hid_t fid2 = H5I_INVALID_HID;
hid_t fapl_id = H5I_INVALID_HID;
MPI_File *mpifh_p = NULL;
char name[1024];
const char *driver_name;
int mpi_size;
int mpi_rank;
2020-09-30 22:27:10 +08:00
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
2020-09-30 22:27:10 +08:00
if (mpi_rank == 0)
TESTING("H5Fflush (part1)");
/* Don't run using the split VFD */
driver_name = h5_get_test_driver_name();
if (!strcmp(driver_name, "split")) {
2020-09-30 22:27:10 +08:00
if (mpi_rank == 0) {
SKIPPED();
puts(" Test not compatible with current Virtual File Driver");
}
MPI_Finalize();
exit(EXIT_FAILURE);
}
if (NULL == (data_g = malloc(100 * 100 * sizeof(*data_g))))
goto error;
2020-09-30 22:27:10 +08:00
if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
goto error;
2020-09-30 22:27:10 +08:00
if (H5Pset_fapl_mpio(fapl_id, comm, info) < 0)
goto error;
2018-02-15 00:08:09 +08:00
/* Create the file */
h5_fixname(FILENAME[0], fapl_id, name, sizeof(name));
if ((fid1 = create_test_file(name, sizeof(name), fapl_id)) < 0)
goto error;
2018-02-15 00:08:09 +08:00
/* Flush and exit without closing the library */
2020-09-30 22:27:10 +08:00
if (H5Fflush(fid1, H5F_SCOPE_GLOBAL) < 0)
goto error;
2018-02-15 00:08:09 +08:00
/* Create the other file which will not be flushed */
h5_fixname(FILENAME[1], fapl_id, name, sizeof(name));
if ((fid2 = create_test_file(name, sizeof(name), fapl_id)) < 0)
goto error;
2020-09-30 22:27:10 +08:00
if (mpi_rank == 0)
2018-02-15 00:08:09 +08:00
PASSED();
fflush(stdout);
fflush(stderr);
/* Some systems like AIX do not like files not being closed when MPI_Finalize
* is called. So, we need to get the MPI file handles, close them by hand.
* Then the _exit is still needed to stop at_exit from happening in some systems.
* Note that MPIO VFD returns the address of the file-handle in the VFD struct
* because MPI_File_close wants to modify the file-handle variable.
*/
/* Close file 1 */
2020-09-30 22:27:10 +08:00
if (H5Fget_vfd_handle(fid1, fapl_id, (void **)&mpifh_p) < 0)
goto error;
2020-09-30 22:27:10 +08:00
if (MPI_File_close(mpifh_p) != MPI_SUCCESS)
goto error;
/* Close file 2 */
2020-09-30 22:27:10 +08:00
if (H5Fget_vfd_handle(fid2, fapl_id, (void **)&mpifh_p) < 0)
goto error;
2020-09-30 22:27:10 +08:00
if (MPI_File_close(mpifh_p) != MPI_SUCCESS)
goto error;
fflush(stdout);
fflush(stderr);
if (data_g) {
free(data_g);
data_g = NULL;
}
/* Always exit with a failure code!
*
* In accordance with the standard, not having all processes
* call MPI_Finalize() can be considered an error, so mpiexec
* et al. may indicate failure on return. It's much easier to
* always ignore the failure condition than to handle some
* platforms returning success and others failure.
*/
_exit(EXIT_FAILURE);
error:
fflush(stdout);
fflush(stderr);
printf("*** ERROR ***\n");
printf("THERE WAS A REAL ERROR IN t_pflush1.\n");
fflush(stdout);
if (data_g)
free(data_g);
_exit(EXIT_FAILURE);
} /* end main() */