mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-03-31 17:10:47 +08:00
Edits to the file open optimization and associated test
code to bring them closer to the HDF5 library's unwritten coding standards. Also bug fix to repair a hang in testphdf5. Tested parallel/debug on Charis and Jelly, parallel/production on Jelly.
This commit is contained in:
parent
da0f8223df
commit
ceab5a5176
@ -29,6 +29,7 @@
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
#if 0 /* delete this eventually */
|
||||
#define H5FD_GET_MPI_RANK_AND_SIZE(rank,size, f) { \
|
||||
(rank) = 0; (size) = 1; \
|
||||
if (H5F_HAS_FEATURE((f), H5FD_FEAT_HAS_MPI)) { \
|
||||
@ -49,6 +50,7 @@
|
||||
(comm) = H5F_mpi_get_comm((f)); \
|
||||
else (comm) = MPI_COMM_WORLD; \
|
||||
}
|
||||
#endif /* delete this eventually */
|
||||
|
||||
/*Turn on H5FDmpio_debug if H5F_DEBUG is on */
|
||||
#ifdef H5F_DEBUG
|
||||
|
@ -333,7 +333,10 @@ H5F__super_read(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t initial
|
||||
unsigned rw_flags; /* Read/write permissions for file */
|
||||
hbool_t skip_eof_check = FALSE; /* Whether to skip checking the EOF value */
|
||||
herr_t ret_value = SUCCEED; /* Return value */
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
int mpi_rank = 0, mpi_size = 1;
|
||||
int mpi_result;
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
|
||||
FUNC_ENTER_PACKAGE_TAG(meta_dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
|
||||
|
||||
@ -356,21 +359,43 @@ H5F__super_read(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t initial
|
||||
|
||||
/* Find the superblock */
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
#if 0
|
||||
H5FD_GET_MPI_RANK_AND_SIZE(mpi_rank, mpi_size, f);
|
||||
#else
|
||||
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
|
||||
|
||||
if((mpi_rank = H5F_mpi_get_rank(f)) < 0)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "Can't get MPI rank")
|
||||
|
||||
if((mpi_size = H5F_mpi_get_size(f)) < 0)
|
||||
HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTGET, FAIL, "can't retrieve MPI communicator size")
|
||||
}
|
||||
#endif
|
||||
/* If we are an MPI application with at least two processes, the
|
||||
* following superblock signature location optimization is applicable.
|
||||
*/
|
||||
if ( mpi_size > 1 ) {
|
||||
MPI_Comm this_comm = MPI_COMM_NULL;
|
||||
MPI_Comm this_comm = MPI_COMM_NULL;
|
||||
|
||||
if ( mpi_rank == 0 ) {
|
||||
if(H5FD_locate_signature(&fdio_info, &super_addr) < 0)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "unable to locate file signature")
|
||||
}
|
||||
H5FD_GET_MPI_COMM(this_comm, f);
|
||||
if (( this_comm == MPI_COMM_NULL ) ||
|
||||
( MPI_Bcast(&super_addr,sizeof(super_addr), MPI_BYTE, 0, this_comm) != MPI_SUCCESS))
|
||||
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "unable to locate file signature")
|
||||
if ( mpi_rank == 0 ) {
|
||||
if(H5FD_locate_signature(&fdio_info, &super_addr) < 0)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "unable to locate file signature")
|
||||
}
|
||||
#if 0
|
||||
H5FD_GET_MPI_COMM(this_comm, f);
|
||||
if (( this_comm == MPI_COMM_NULL ) ||
|
||||
( MPI_Bcast(&super_addr,sizeof(super_addr), MPI_BYTE, 0, this_comm) != MPI_SUCCESS))
|
||||
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "unable to locate file signature")
|
||||
#else
|
||||
HDassert(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI));
|
||||
|
||||
if ( MPI_COMM_NULL == (this_comm = H5F_mpi_get_comm(f)) )
|
||||
HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get MPI communicator")
|
||||
|
||||
if ( MPI_SUCCESS !=
|
||||
(mpi_result = MPI_Bcast(&super_addr,sizeof(super_addr), MPI_BYTE, 0, this_comm)))
|
||||
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
/* Locate the signature as per per the serial library */
|
||||
@ -381,7 +406,7 @@ H5F__super_read(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t initial
|
||||
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
}
|
||||
#endif
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
if(HADDR_UNDEF == super_addr)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "file signature not found")
|
||||
|
||||
|
@ -1,196 +1,693 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
|
||||
* Copyright by The HDF Group. *
|
||||
* Copyright by the Board of Trustees of the University of Illinois. *
|
||||
* All rights reserved. *
|
||||
* *
|
||||
* This file is part of HDF5. The full HDF5 copyright notice, including *
|
||||
* terms governing use, modification, and redistribution, is contained in *
|
||||
* the COPYING file, which can be found at the root of the source code *
|
||||
* distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
|
||||
* If you do not have access to either file, you may request a copy from *
|
||||
* help@hdfgroup.org. *
|
||||
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
|
||||
|
||||
#include "mpi.h"
|
||||
#include "hdf5.h"
|
||||
/*
|
||||
* Collective file open optimization tests
|
||||
*
|
||||
*/
|
||||
|
||||
static char *random_hdf5_text =
|
||||
"Now is the time for all first-time-users of HDF5 to read their manual or go thru the tutorials!\n\
|
||||
|
||||
#include "h5test.h"
|
||||
#include "testpar.h"
|
||||
|
||||
#define NFILENAME 3
|
||||
const char *FILENAMES[NFILENAME + 1]={"t_pread_data_file",
|
||||
"reloc_t_pread_data_file",
|
||||
"prefix_file",
|
||||
NULL};
|
||||
#define FILENAME_BUF_SIZE 1024
|
||||
|
||||
#define COUNT 1000
|
||||
|
||||
hbool_t pass = true;
|
||||
static const char *random_hdf5_text =
|
||||
"Now is the time for all first-time-users of HDF5 to read their \
|
||||
manual or go thru the tutorials!\n\
|
||||
While you\'re at it, now is also the time to read up on MPI-IO.";
|
||||
|
||||
static char *datafile_relocated = "relocated_super.h5";
|
||||
hbool_t pass = true;
|
||||
static int generate_test_file(int mpi_rank, int mpi_size);
|
||||
static int test_parallel_read(int mpi_rank);
|
||||
|
||||
|
||||
static void
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: generate_test_file
|
||||
*
|
||||
* Purpose: *** Richard -- please fill this in ***
|
||||
*
|
||||
*
|
||||
* Return: Success: 0
|
||||
*
|
||||
* Failure: 1
|
||||
*
|
||||
* Programmer: Richard Warren
|
||||
* 10/1/17
|
||||
*
|
||||
* Modifications:
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
static int
|
||||
generate_test_file( int mpi_rank, int mpi_size )
|
||||
{
|
||||
FILE *header;
|
||||
char *datafile_base = "mytestfile.h5";
|
||||
char *prologue_file = "hdf5_readme.txt";
|
||||
hid_t file_id, memspace, filespace, attr_id, fapl_id, dxpl_id, dset_id;
|
||||
hsize_t i, offset, count = 1000;
|
||||
hsize_t dims[1] = {0};
|
||||
float nextValue, data_slice[count];
|
||||
FILE *header;
|
||||
const char *fcn_name = "generate_test_file()";
|
||||
const char *failure_mssg = NULL;
|
||||
char data_filename[FILENAME_BUF_SIZE];
|
||||
char reloc_data_filename[FILENAME_BUF_SIZE];
|
||||
char prolog_filename[FILENAME_BUF_SIZE];
|
||||
int local_failure = 0;
|
||||
int global_failures = 0;
|
||||
hsize_t count = COUNT;
|
||||
hsize_t i;
|
||||
hsize_t offset;
|
||||
hsize_t dims[1] = {0};
|
||||
hid_t file_id;
|
||||
hid_t memspace;
|
||||
hid_t filespace;
|
||||
hid_t fapl_id;
|
||||
hid_t dxpl_id;
|
||||
hid_t dset_id;
|
||||
float nextValue;
|
||||
float *data_slice = NULL;
|
||||
|
||||
pass = true;
|
||||
pass = true;
|
||||
|
||||
nextValue = (float)(mpi_rank * count);
|
||||
for(i=0; i<count; i++) {
|
||||
data_slice[i] = nextValue;
|
||||
nextValue += 1;
|
||||
}
|
||||
if ( mpi_rank == 0 ) {
|
||||
|
||||
/* create the file (parallel) */
|
||||
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
|
||||
H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL);
|
||||
file_id = H5Fcreate(datafile_base, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
|
||||
if ( file_id < 0 ) {
|
||||
pass = false;
|
||||
HDfprintf(stderr, "FATAL: H5Fcreate failed!\n");
|
||||
}
|
||||
HDfprintf(stdout, "Constructing test files...");
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
dxpl_id = H5Pcreate(H5P_DATASET_XFER);
|
||||
H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
|
||||
}
|
||||
/* setup the file names */
|
||||
if ( pass ) {
|
||||
HDassert(FILENAMES[0]);
|
||||
|
||||
dims[0] = count;
|
||||
memspace = H5Screate_simple(1, dims, NULL);
|
||||
dims[0] *= mpi_size;
|
||||
filespace = H5Screate_simple(1, dims, NULL);
|
||||
offset = mpi_rank * count;
|
||||
H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset, NULL, &count, NULL);
|
||||
|
||||
dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
|
||||
if ( dset_id < 0 ) {
|
||||
pass = false;
|
||||
HDfprintf(stderr, "FATAL: H5Dcreate2 failed!\n");
|
||||
}
|
||||
if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, data_filename,
|
||||
sizeof(data_filename)) == NULL ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "h5_fixname(0) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace, filespace, dxpl_id, data_slice);
|
||||
}
|
||||
H5Dclose(dset_id);
|
||||
H5Sclose(memspace);
|
||||
H5Sclose(filespace);
|
||||
if ( pass ) {
|
||||
HDassert(FILENAMES[1]);
|
||||
|
||||
/* finished creating a data file */
|
||||
H5Fclose(file_id);
|
||||
H5Pclose(dxpl_id);
|
||||
if ( h5_fixname(FILENAMES[1], H5P_DEFAULT, reloc_data_filename,
|
||||
sizeof(reloc_data_filename)) == NULL ) {
|
||||
|
||||
if ( mpi_rank > 0 ) return;
|
||||
pass = FALSE;
|
||||
failure_mssg = "h5_fixname(1) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* ---- mpi_rank 0 ------*/
|
||||
header = fopen( prologue_file, "w+");
|
||||
if (header == NULL) {
|
||||
pass = false;
|
||||
HDfprintf(stderr, "FATAL: Unable to create a simple txt file\n");
|
||||
return;
|
||||
}
|
||||
else {
|
||||
size_t bytes_written, bytes_to_write = strlen(random_hdf5_text);
|
||||
bytes_written = fwrite( random_hdf5_text, 1, bytes_to_write , header);
|
||||
if (bytes_written == 0) {
|
||||
pass = false;
|
||||
HDfprintf(stderr, "FATAL: Unable to write a simple txt file\n");
|
||||
}
|
||||
fclose(header);
|
||||
}
|
||||
if ( pass ) {
|
||||
HDassert(FILENAMES[2]);
|
||||
|
||||
if ( pass ) {
|
||||
char cmd[256];
|
||||
sprintf(cmd, "../tools/src/h5jam/h5jam -i %s -u %s -o %s",
|
||||
datafile_base, prologue_file, datafile_relocated);
|
||||
system(cmd);
|
||||
unlink(datafile_base);
|
||||
unlink(prologue_file);
|
||||
}
|
||||
}
|
||||
if ( h5_fixname(FILENAMES[2], H5P_DEFAULT, prolog_filename,
|
||||
sizeof(prolog_filename)) == NULL ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "h5_fixname(2) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* setup data to write */
|
||||
if ( pass ) {
|
||||
if ( (data_slice = (float *)HDmalloc(COUNT * sizeof(float))) == NULL ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "malloc of data_slice failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
test_parallel_read( int mpi_rank, int mpi_size )
|
||||
if ( pass ) {
|
||||
nextValue = (float)(mpi_rank * COUNT);
|
||||
|
||||
for(i=0; i<COUNT; i++) {
|
||||
data_slice[i] = nextValue;
|
||||
nextValue += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* setup FAPL */
|
||||
if ( pass ) {
|
||||
if ( (fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Pset_fapl_mpio() failed\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* create the data file */
|
||||
if ( pass ) {
|
||||
if ( (file_id = H5Fcreate(data_filename, H5F_ACC_TRUNC,
|
||||
H5P_DEFAULT, fapl_id)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Fcreate() failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* create and write the dataset */
|
||||
if ( pass ) {
|
||||
if ( (dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Pset_dxpl_mpio() failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
dims[0] = COUNT;
|
||||
if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Screate_simple(1, dims, NULL) failed (1).\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
dims[0] *= (hsize_t)mpi_size;
|
||||
if ( (filespace = H5Screate_simple(1, dims, NULL)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Screate_simple(1, dims, NULL) failed (2).\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
offset = (hsize_t)mpi_rank * (hsize_t)COUNT;
|
||||
if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset,
|
||||
NULL, &count, NULL)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Sselect_hyperslab() failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( (dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT,
|
||||
filespace, H5P_DEFAULT, H5P_DEFAULT,
|
||||
H5P_DEFAULT)) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Dcreate2() failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( (H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace,
|
||||
filespace, dxpl_id, data_slice)) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Dwrite() failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* close file, etc. */
|
||||
if ( pass ) {
|
||||
if ( H5Dclose(dset_id) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Dclose(dset_id) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( H5Sclose(memspace) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Sclose(memspace) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( H5Sclose(filespace) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Sclose(filespace) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( H5Fclose(file_id) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Fclose(file_id) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( H5Pclose(dxpl_id) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Pclose(dxpl_id) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( H5Pclose(fapl_id) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Pclose(fapl_id) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* add a userblock to the head of the datafile.
|
||||
* We will use this to for a functional test of the
|
||||
* file open optimization.
|
||||
*
|
||||
* Also delete files that are no longer needed.
|
||||
*/
|
||||
if ( mpi_rank == 0 ) {
|
||||
|
||||
size_t bytes_to_write;
|
||||
|
||||
bytes_to_write = strlen(random_hdf5_text);
|
||||
|
||||
if ( pass ) {
|
||||
if ( (header = HDfopen(prolog_filename, "w+")) == NULL ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "HDfopen(prolog_filename, \"w+\") failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
bytes_to_write = strlen(random_hdf5_text);
|
||||
if ( HDfwrite(random_hdf5_text, 1, bytes_to_write, header) !=
|
||||
bytes_to_write ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "Unable to write header file.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( HDfclose(header) != 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "HDfclose() failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
char cmd[256];
|
||||
|
||||
HDsprintf(cmd, "../tools/src/h5jam/h5jam -i %s -u %s -o %s",
|
||||
data_filename, prolog_filename, reloc_data_filename);
|
||||
|
||||
if ( system(cmd) != 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "invocation of h5jam failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
HDremove(prolog_filename);
|
||||
HDremove(data_filename);
|
||||
}
|
||||
}
|
||||
|
||||
/* collect results from other processes.
|
||||
* Only overwrite the failure message if no preveious error
|
||||
* has been detected
|
||||
*/
|
||||
local_failure = ( pass ? 0 : 1 );
|
||||
|
||||
if ( MPI_Allreduce(&local_failure, &global_failures, 1,
|
||||
MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS ) {
|
||||
if ( pass ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "MPI_Allreduce() failed.\n";
|
||||
}
|
||||
} else if ( ( pass ) && ( global_failures > 0 ) ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "One or more processes report failure.\n";
|
||||
}
|
||||
|
||||
/* report results */
|
||||
if ( mpi_rank == 0 ) {
|
||||
if ( pass ) {
|
||||
HDfprintf(stdout, "Done.\n");
|
||||
} else {
|
||||
HDfprintf(stdout, "FAILED.\n");
|
||||
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
|
||||
fcn_name, failure_mssg);
|
||||
}
|
||||
}
|
||||
|
||||
/* free data_slice if it has been allocated */
|
||||
if ( data_slice != NULL ) {
|
||||
HDfree(data_slice);
|
||||
data_slice = NULL;
|
||||
}
|
||||
|
||||
return(! pass);
|
||||
|
||||
} /* generate_test_file() */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: test_parallel_read
|
||||
*
|
||||
* Purpose: *** Richard -- please fill this in ***
|
||||
*
|
||||
*
|
||||
* Return: Success: 0
|
||||
*
|
||||
* Failure: 1
|
||||
*
|
||||
* Programmer: Richard Warren
|
||||
* 10/1/17
|
||||
*
|
||||
* Modifications:
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
static int
|
||||
test_parallel_read(int mpi_rank)
|
||||
{
|
||||
int status, errors = 0;
|
||||
hid_t access_plist = -1, dataset = -1;
|
||||
hid_t file_id = -1, memspace = -1, dataspace = -1;
|
||||
hsize_t i, offset, count = 1000;
|
||||
hsize_t dims[1] = {0};
|
||||
float nextValue, data_slice[count];
|
||||
herr_t ret;
|
||||
const char *failure_mssg;
|
||||
const char *fcn_name = "test_parallel_read()";
|
||||
char reloc_data_filename[FILENAME_BUF_SIZE];
|
||||
int local_failure = 0;
|
||||
int global_failures = 0;
|
||||
hid_t fapl_id;
|
||||
hid_t file_id;
|
||||
hid_t dset_id;
|
||||
hid_t memspace = -1;
|
||||
hid_t filespace = -1;
|
||||
hsize_t i;
|
||||
hsize_t offset;
|
||||
hsize_t count = COUNT;
|
||||
hsize_t dims[1] = {0};
|
||||
float nextValue;
|
||||
float *data_slice = NULL;
|
||||
|
||||
access_plist = H5Pcreate(H5P_FILE_ACCESS);
|
||||
if (access_plist >= 0) {
|
||||
ret = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
|
||||
} else pass = false;
|
||||
if (ret >= 0) {
|
||||
file_id = H5Fopen(datafile_relocated,H5F_ACC_RDONLY,access_plist);
|
||||
} else pass = false;
|
||||
if (file_id >= 0) {
|
||||
dataset = H5Dopen2(file_id, "dataset0", H5P_DEFAULT);
|
||||
} else pass = false;
|
||||
if (dataset >= 0) {
|
||||
dims[0] = count;
|
||||
memspace = H5Screate_simple(1, dims, NULL);
|
||||
} else pass = false;
|
||||
if ( memspace >= 0 ) {
|
||||
dataspace = H5Dget_space(dataset);
|
||||
} else pass = false;
|
||||
if ( dataspace >= 0 ) {
|
||||
offset = mpi_rank * count;
|
||||
ret = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, &offset, NULL, &count, NULL);
|
||||
} else pass = false;
|
||||
if ( ret >= 0 ) {
|
||||
ret = H5Dread(dataset, H5T_NATIVE_FLOAT, memspace, dataspace, H5P_DEFAULT, data_slice);
|
||||
} else pass = false;
|
||||
if (ret >= 0) {
|
||||
nextValue = (float)(mpi_rank * count);
|
||||
for (i=0; i < count; i++) {
|
||||
if (data_slice[i] != nextValue) pass = false;
|
||||
nextValue += 1;
|
||||
}
|
||||
} else pass = false;
|
||||
pass = TRUE;
|
||||
|
||||
status = ( pass ? 0 : -1 );
|
||||
MPI_Allreduce( &status, &errors, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
|
||||
if ( mpi_rank == 0 ) {
|
||||
|
||||
if ( mpi_rank == 0)
|
||||
HDfprintf(stderr, "H5Fopen/H5Dread/data_validation %s\n", ((errors == 0) ? "succeeded" : "FAILED"));
|
||||
|
||||
H5Pclose(access_plist);
|
||||
H5Dclose(dataset);
|
||||
H5Fclose(file_id);
|
||||
TESTING("parallel file open test 1");
|
||||
}
|
||||
|
||||
/* Cleanup */
|
||||
unlink(datafile_relocated);
|
||||
/* allocate space for the data_slice array */
|
||||
if ( pass ) {
|
||||
if ( (data_slice = (float *)HDmalloc(COUNT * sizeof(float))) == NULL ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "malloc of data_slice failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* construct file file name */
|
||||
if ( pass ) {
|
||||
HDassert(FILENAMES[1]);
|
||||
|
||||
if ( h5_fixname(FILENAMES[1], H5P_DEFAULT, reloc_data_filename,
|
||||
sizeof(reloc_data_filename)) == NULL ) {
|
||||
|
||||
pass = FALSE;
|
||||
failure_mssg = "h5_fixname(1) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* setup FAPL */
|
||||
if ( pass ) {
|
||||
if ( (fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( (H5Pset_fapl_mpio(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Pset_fapl_mpio() failed\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* open the file -- should have user block, exercising the optimization */
|
||||
if ( pass ) {
|
||||
if ( (file_id = H5Fopen(reloc_data_filename,
|
||||
H5F_ACC_RDONLY, fapl_id)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Fopen() failed\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* open the data set */
|
||||
if ( pass ) {
|
||||
if ( (dset_id = H5Dopen2(file_id, "dataset0", H5P_DEFAULT)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Dopen2() failed\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* setup memspace */
|
||||
if ( pass ) {
|
||||
dims[0] = count;
|
||||
if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* setup filespace */
|
||||
if ( pass ) {
|
||||
if ( (filespace = H5Dget_space(dset_id)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Dget_space(dataset) failed\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
offset = (hsize_t)mpi_rank * count;
|
||||
if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET,
|
||||
&offset, NULL, &count, NULL)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Sselect_hyperslab() failed\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* read this processes section of the data */
|
||||
if ( pass ) {
|
||||
if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace,
|
||||
filespace, H5P_DEFAULT, data_slice)) < 0 ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "H5Dread() failed\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* verify the data */
|
||||
if ( pass ) {
|
||||
nextValue = (float)((hsize_t)mpi_rank * count);
|
||||
i = 0;
|
||||
while ( ( pass ) && ( i < count ) ) {
|
||||
/* what we really want is data_slice[i] != nextValue --
|
||||
* the following is a circumlocution to shut up the
|
||||
* the compiler.
|
||||
*/
|
||||
if ( ( data_slice[i] > nextValue ) ||
|
||||
( data_slice[i] < nextValue ) ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "Unexpected dset contents.\n";
|
||||
}
|
||||
nextValue += 1;
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
/* close file, etc. */
|
||||
if ( pass ) {
|
||||
if ( H5Dclose(dset_id) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Dclose(dset_id) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( H5Sclose(memspace) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Sclose(memspace) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( H5Sclose(filespace) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Sclose(filespace) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( H5Fclose(file_id) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Fclose(file_id) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
if ( pass ) {
|
||||
if ( H5Pclose(fapl_id) < 0 ) {
|
||||
pass = false;
|
||||
failure_mssg = "H5Pclose(fapl_id) failed.\n";
|
||||
}
|
||||
}
|
||||
|
||||
/* collect results from other processes.
|
||||
* Only overwrite the failure message if no preveious error
|
||||
* has been detected
|
||||
*/
|
||||
local_failure = ( pass ? 0 : 1 );
|
||||
|
||||
if ( MPI_Allreduce( &local_failure, &global_failures, 1,
|
||||
MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS ) {
|
||||
if ( pass ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "MPI_Allreduce() failed.\n";
|
||||
}
|
||||
} else if ( ( pass ) && ( global_failures > 0 ) ) {
|
||||
pass = FALSE;
|
||||
failure_mssg = "One or more processes report failure.\n";
|
||||
}
|
||||
|
||||
/* report results and finish cleanup */
|
||||
if ( mpi_rank == 0 ) {
|
||||
if ( pass ) {
|
||||
PASSED();
|
||||
} else {
|
||||
H5_FAILED();
|
||||
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
|
||||
fcn_name, failure_mssg);
|
||||
}
|
||||
|
||||
HDremove(reloc_data_filename);
|
||||
}
|
||||
|
||||
/* free data_slice if it has been allocated */
|
||||
if ( data_slice != NULL ) {
|
||||
HDfree(data_slice);
|
||||
data_slice = NULL;
|
||||
}
|
||||
|
||||
|
||||
return( ! pass );
|
||||
|
||||
} /* test_parallel_read() */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: main
|
||||
*
|
||||
* Purpose: *** Richard -- please fill this in ***
|
||||
*
|
||||
*
|
||||
* WARNING: This test uses fork() and execve(), and
|
||||
* therefore will not run on Windows.
|
||||
*
|
||||
* Return: Success: 0
|
||||
*
|
||||
* Failure: 1
|
||||
*
|
||||
* Programmer: Richard Warren
|
||||
* 10/1/17
|
||||
*
|
||||
* Modifications:
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
int
|
||||
main( int argc, char **argv)
|
||||
{
|
||||
int status, errors, mpi_rank, mpi_size;
|
||||
int nerrs = 0;
|
||||
int mpi_rank;
|
||||
int mpi_size;
|
||||
|
||||
if ((status = MPI_Init(&argc, &argv)) != MPI_SUCCESS) {
|
||||
HDfprintf(stderr, "FATAL: Unable to initialize MPI\n");
|
||||
exit(1);
|
||||
}
|
||||
if ((status = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)) != MPI_SUCCESS) {
|
||||
HDfprintf(stderr, "FATAL: MPI_Comm_rank returned an error\n");
|
||||
exit(2);
|
||||
}
|
||||
if ((status = MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)) != MPI_SUCCESS) {
|
||||
HDfprintf(stderr, "FATAL: MPI_Comm_size returned an error\n");
|
||||
exit(2);
|
||||
if ( (MPI_Init(&argc, &argv)) != MPI_SUCCESS) {
|
||||
HDfprintf(stderr, "FATAL: Unable to initialize MPI\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if ( (MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)) != MPI_SUCCESS) {
|
||||
HDfprintf(stderr, "FATAL: MPI_Comm_rank returned an error\n");
|
||||
exit(2);
|
||||
}
|
||||
|
||||
if ( (MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)) != MPI_SUCCESS) {
|
||||
HDfprintf(stderr, "FATAL: MPI_Comm_size returned an error\n");
|
||||
exit(2);
|
||||
}
|
||||
|
||||
H5open();
|
||||
|
||||
if ( mpi_rank == 0 ) {
|
||||
HDfprintf(stdout, "========================================\n");
|
||||
HDfprintf(stdout, "Collective file open optimization tests\n");
|
||||
HDfprintf(stdout, " mpi_size = %d\n", mpi_size);
|
||||
HDfprintf(stdout, "========================================\n");
|
||||
}
|
||||
|
||||
if ( mpi_size < 2 ) {
|
||||
|
||||
if ( mpi_rank == 0 ) {
|
||||
|
||||
HDprintf(" Need at least 2 processes. Exiting.\n");
|
||||
}
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* create the test files & verify that the process
|
||||
* succeeded. If not, abort the remaining tests as
|
||||
* they depend on the test files.
|
||||
*/
|
||||
|
||||
nerrs += generate_test_file( mpi_rank, mpi_size );
|
||||
|
||||
/* abort tests if there were any errors in test file construction */
|
||||
if ( nerrs > 0 ) {
|
||||
if ( mpi_rank == 0 ) {
|
||||
HDprintf(" Test file construction failed -- skipping tests.\n");
|
||||
}
|
||||
goto finish;
|
||||
}
|
||||
|
||||
generate_test_file( mpi_rank, mpi_size );
|
||||
status = ( pass ? 0 : -1 );
|
||||
/* run the tests */
|
||||
nerrs += test_parallel_read(mpi_rank);
|
||||
|
||||
/* Synch all ranks before attempting the parallel read */
|
||||
if ( MPI_Allreduce( &status, &errors, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ) != MPI_SUCCESS) {
|
||||
pass = false;
|
||||
if (mpi_rank == 0) HDfprintf(stderr, "FATAL: MPI_Allreduce returned an error\n");
|
||||
}
|
||||
finish:
|
||||
|
||||
if ( errors == 0 ) {
|
||||
test_parallel_read( mpi_rank, mpi_size );
|
||||
}
|
||||
/* make sure all processes are finished before final report, cleanup
|
||||
* and exit.
|
||||
*/
|
||||
MPI_Barrier(MPI_COMM_WORLD);
|
||||
|
||||
MPI_Finalize();
|
||||
return 0;
|
||||
}
|
||||
if ( mpi_rank == 0 ) { /* only process 0 reports */
|
||||
const char *header = "Collective file open optimization tests";
|
||||
|
||||
HDfprintf(stdout, "===================================\n");
|
||||
if ( nerrs > 0 ) {
|
||||
|
||||
HDfprintf(stdout, "***%s detected %d failures***\n", header, nerrs);
|
||||
}
|
||||
else {
|
||||
HDfprintf(stdout, "%s finished with no failures\n", header);
|
||||
}
|
||||
HDfprintf(stdout, "===================================\n");
|
||||
}
|
||||
|
||||
/* close HDF5 library */
|
||||
H5close();
|
||||
|
||||
/* MPI_Finalize must be called AFTER H5close which may use MPI calls */
|
||||
MPI_Finalize();
|
||||
|
||||
/* cannot just return (nerrs) because exit code is limited to 1byte */
|
||||
return(nerrs > 0);
|
||||
|
||||
} /* main() */
|
||||
|
Loading…
x
Reference in New Issue
Block a user