2003-04-01 02:02:10 +08:00
|
|
|
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
|
2007-02-07 22:56:24 +08:00
|
|
|
* Copyright by The HDF Group. *
|
2003-04-01 02:02:10 +08:00
|
|
|
* Copyright by the Board of Trustees of the University of Illinois. *
|
|
|
|
* All rights reserved. *
|
|
|
|
* *
|
|
|
|
* This file is part of HDF5. The full HDF5 copyright notice, including *
|
|
|
|
* terms governing use, modification, and redistribution, is contained in *
|
2017-04-18 03:32:16 +08:00
|
|
|
* the COPYING file, which can be found at the root of the source code *
|
|
|
|
* distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
|
|
|
|
* If you do not have access to either file, you may request a copy from *
|
|
|
|
* help@hdfgroup.org. *
|
2003-04-01 02:02:10 +08:00
|
|
|
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
|
|
|
|
|
1998-07-02 05:33:35 +08:00
|
|
|
/*
|
|
|
|
* Parallel tests for file operations
|
|
|
|
*/
|
|
|
|
|
2001-04-06 01:09:42 +08:00
|
|
|
#include "testphdf5.h"
|
1998-07-02 05:33:35 +08:00
|
|
|
|
2018-03-16 05:54:30 +08:00
|
|
|
#include "H5CXprivate.h" /* API Contexts */
|
2017-03-14 12:30:37 +08:00
|
|
|
#include "H5Iprivate.h"
|
2018-03-16 05:54:30 +08:00
|
|
|
#include "H5PBprivate.h"
|
2017-03-14 12:30:37 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This file needs to access private information from the H5F package.
|
|
|
|
*/
|
2019-08-25 03:07:19 +08:00
|
|
|
#define H5AC_FRIEND /*suppress error about including H5ACpkg */
|
2017-03-14 12:30:37 +08:00
|
|
|
#include "H5ACpkg.h"
|
2019-08-25 03:07:19 +08:00
|
|
|
#define H5C_FRIEND /*suppress error about including H5Cpkg */
|
2018-03-16 05:54:30 +08:00
|
|
|
#include "H5Cpkg.h"
|
2019-08-25 03:07:19 +08:00
|
|
|
#define H5F_FRIEND /*suppress error about including H5Fpkg */
|
2017-03-14 12:30:37 +08:00
|
|
|
#define H5F_TESTING
|
|
|
|
#include "H5Fpkg.h"
|
2019-08-25 03:07:19 +08:00
|
|
|
#define H5MF_FRIEND /*suppress error about including H5MFpkg */
|
2018-03-16 05:54:30 +08:00
|
|
|
#include "H5MFpkg.h"
|
2017-03-14 12:30:37 +08:00
|
|
|
|
|
|
|
#define NUM_DSETS 5
|
|
|
|
|
|
|
|
int mpi_size, mpi_rank;
|
|
|
|
|
|
|
|
static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy);
|
2019-08-25 03:07:19 +08:00
|
|
|
static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
|
2017-03-14 12:30:37 +08:00
|
|
|
hsize_t page_size, size_t page_buffer_size);
|
|
|
|
|
1998-07-02 05:33:35 +08:00
|
|
|
/*
|
|
|
|
* test file access by communicator besides COMM_WORLD.
|
|
|
|
* Split COMM_WORLD into two, one (even_comm) contains the original
|
|
|
|
* processes of even ranks. The other (odd_comm) contains the original
|
|
|
|
* processes of odd ranks. Processes in even_comm creates a file, then
|
|
|
|
* cloose it, using even_comm. Processes in old_comm just do a barrier
|
|
|
|
* using odd_comm. Then they all do a barrier using COMM_WORLD.
|
|
|
|
* If the file creation and cloose does not do correct collective action
|
|
|
|
* according to the communicator argument, the processes will freeze up
|
|
|
|
* sooner or later due to barrier mixed up.
|
|
|
|
*/
|
|
|
|
void
|
2004-04-02 07:00:13 +08:00
|
|
|
test_split_comm_access(void)
|
1998-07-02 05:33:35 +08:00
|
|
|
{
|
|
|
|
MPI_Comm comm;
|
|
|
|
MPI_Info info = MPI_INFO_NULL;
|
2009-08-11 18:35:29 +08:00
|
|
|
int is_old, mrc;
|
1998-07-02 05:33:35 +08:00
|
|
|
int newrank, newprocs;
|
2019-08-25 03:07:19 +08:00
|
|
|
hid_t fid; /* file IDs */
|
|
|
|
hid_t acc_tpl; /* File access properties */
|
|
|
|
herr_t ret; /* generic return value */
|
2004-12-29 22:26:20 +08:00
|
|
|
const char *filename;
|
1998-07-02 05:33:35 +08:00
|
|
|
|
[svn-r20119] Description:
Clean up MPI resource leaks in parallel tests, along with a bunch of
compiler warnings.
Tested on:
FreeBSD/32 6.3 (duty) in debug mode
FreeBSD/64 6.3 (liberty) w/C++ & FORTRAN, in debug mode
Linux/32 2.6 (jam) w/PGI compilers, w/default API=1.8.x,
w/C++ & FORTRAN, w/threadsafe, in debug mode
Linux/64-amd64 2.6 (amani) w/Intel compilers, w/default API=1.6.x,
w/C++ & FORTRAN, in production mode
Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN,
w/szip filter, w/threadsafe, in production mode
Linux/PPC 2.6 (heiwa) w/C++ & FORTRAN, w/threadsafe, in debug mode
Linux/64-amd64 2.6 (abe) w/parallel, w/FORTRAN, in debug mode
Mac OS X/32 10.6.6 (amazon) in debug mode
Mac OS X/32 10.6.6 (amazon) w/C++ & FORTRAN, w/threadsafe,
in production mode
2011-02-18 05:43:07 +08:00
|
|
|
filename = (const char *)GetTestParameters();
|
2004-01-23 07:11:39 +08:00
|
|
|
if (VERBOSE_MED)
|
2019-08-25 03:07:19 +08:00
|
|
|
HDprintf("Split Communicator access test on file %s\n",
|
|
|
|
filename);
|
1998-07-02 05:33:35 +08:00
|
|
|
|
|
|
|
/* set up MPI parameters */
|
|
|
|
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
|
|
|
|
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
|
2009-08-11 18:35:29 +08:00
|
|
|
is_old = mpi_rank%2;
|
[svn-r20119] Description:
Clean up MPI resource leaks in parallel tests, along with a bunch of
compiler warnings.
Tested on:
FreeBSD/32 6.3 (duty) in debug mode
FreeBSD/64 6.3 (liberty) w/C++ & FORTRAN, in debug mode
Linux/32 2.6 (jam) w/PGI compilers, w/default API=1.8.x,
w/C++ & FORTRAN, w/threadsafe, in debug mode
Linux/64-amd64 2.6 (amani) w/Intel compilers, w/default API=1.6.x,
w/C++ & FORTRAN, in production mode
Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN,
w/szip filter, w/threadsafe, in production mode
Linux/PPC 2.6 (heiwa) w/C++ & FORTRAN, w/threadsafe, in debug mode
Linux/64-amd64 2.6 (abe) w/parallel, w/FORTRAN, in debug mode
Mac OS X/32 10.6.6 (amazon) in debug mode
Mac OS X/32 10.6.6 (amazon) w/C++ & FORTRAN, w/threadsafe,
in production mode
2011-02-18 05:43:07 +08:00
|
|
|
mrc = MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm);
|
1998-07-02 05:33:35 +08:00
|
|
|
VRFY((mrc==MPI_SUCCESS), "");
|
|
|
|
MPI_Comm_size(comm,&newprocs);
|
|
|
|
MPI_Comm_rank(comm,&newrank);
|
|
|
|
|
2009-08-11 18:35:29 +08:00
|
|
|
if (is_old){
|
2019-08-25 03:07:19 +08:00
|
|
|
/* odd-rank processes */
|
|
|
|
mrc = MPI_Barrier(comm);
|
|
|
|
VRFY((mrc==MPI_SUCCESS), "");
|
1998-07-02 05:33:35 +08:00
|
|
|
}else{
|
2019-08-25 03:07:19 +08:00
|
|
|
/* even-rank processes */
|
|
|
|
int sub_mpi_rank; /* rank in the sub-comm */
|
|
|
|
MPI_Comm_rank(comm,&sub_mpi_rank);
|
|
|
|
|
|
|
|
/* setup file access template */
|
|
|
|
acc_tpl = create_faccess_plist(comm, info, facc_type);
|
|
|
|
VRFY((acc_tpl >= 0), "");
|
|
|
|
|
|
|
|
/* create the file collectively */
|
|
|
|
fid=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl);
|
|
|
|
VRFY((fid >= 0), "H5Fcreate succeeded");
|
|
|
|
|
|
|
|
/* Release file-access template */
|
|
|
|
ret=H5Pclose(acc_tpl);
|
|
|
|
VRFY((ret >= 0), "");
|
|
|
|
|
|
|
|
/* close the file */
|
|
|
|
ret=H5Fclose(fid);
|
|
|
|
VRFY((ret >= 0), "");
|
|
|
|
|
|
|
|
/* delete the test file */
|
|
|
|
if (sub_mpi_rank == 0){
|
|
|
|
mrc = MPI_File_delete((char *)filename, info);
|
|
|
|
/*VRFY((mrc==MPI_SUCCESS), ""); */
|
|
|
|
}
|
1998-07-02 05:33:35 +08:00
|
|
|
}
|
[svn-r20119] Description:
Clean up MPI resource leaks in parallel tests, along with a bunch of
compiler warnings.
Tested on:
FreeBSD/32 6.3 (duty) in debug mode
FreeBSD/64 6.3 (liberty) w/C++ & FORTRAN, in debug mode
Linux/32 2.6 (jam) w/PGI compilers, w/default API=1.8.x,
w/C++ & FORTRAN, w/threadsafe, in debug mode
Linux/64-amd64 2.6 (amani) w/Intel compilers, w/default API=1.6.x,
w/C++ & FORTRAN, in production mode
Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN,
w/szip filter, w/threadsafe, in production mode
Linux/PPC 2.6 (heiwa) w/C++ & FORTRAN, w/threadsafe, in debug mode
Linux/64-amd64 2.6 (abe) w/parallel, w/FORTRAN, in debug mode
Mac OS X/32 10.6.6 (amazon) in debug mode
Mac OS X/32 10.6.6 (amazon) w/C++ & FORTRAN, w/threadsafe,
in production mode
2011-02-18 05:43:07 +08:00
|
|
|
mrc = MPI_Comm_free(&comm);
|
|
|
|
VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free succeeded");
|
2000-12-13 07:12:57 +08:00
|
|
|
mrc = MPI_Barrier(MPI_COMM_WORLD);
|
|
|
|
VRFY((mrc==MPI_SUCCESS), "final MPI_Barrier succeeded");
|
1998-07-02 05:33:35 +08:00
|
|
|
}
|
1999-01-27 12:12:04 +08:00
|
|
|
|
2017-03-14 12:30:37 +08:00
|
|
|
void
|
|
|
|
test_page_buffer_access(void)
|
|
|
|
{
|
|
|
|
hid_t file_id = -1; /* File ID */
|
2019-08-27 01:26:31 +08:00
|
|
|
hid_t fcpl, fapl;
|
2017-03-14 12:30:37 +08:00
|
|
|
size_t page_count = 0;
|
|
|
|
int i, num_elements = 200;
|
|
|
|
haddr_t raw_addr, meta_addr;
|
|
|
|
int *data;
|
|
|
|
H5F_t *f = NULL;
|
2019-08-25 03:07:19 +08:00
|
|
|
herr_t ret; /* generic return value */
|
2017-03-14 12:30:37 +08:00
|
|
|
const char *filename;
|
2018-03-19 11:51:19 +08:00
|
|
|
hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
|
2017-03-14 12:30:37 +08:00
|
|
|
|
|
|
|
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
|
|
|
|
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
|
|
|
|
|
|
|
|
filename = (const char *)GetTestParameters();
|
|
|
|
|
|
|
|
if (VERBOSE_MED)
|
2019-08-25 03:07:19 +08:00
|
|
|
HDprintf("Page Buffer Usage in Parallel %s\n", filename);
|
2017-03-14 12:30:37 +08:00
|
|
|
|
|
|
|
fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
|
|
|
|
VRFY((fapl >= 0), "create_faccess_plist succeeded");
|
|
|
|
fcpl = H5Pcreate(H5P_FILE_CREATE);
|
|
|
|
VRFY((fcpl >= 0), "");
|
|
|
|
|
|
|
|
ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0);
|
|
|
|
VRFY((ret == 0), "");
|
2019-10-05 22:18:17 +08:00
|
|
|
ret = H5Pset_file_space_page_size(fcpl, sizeof(int)*128);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
ret = H5Pset_page_buffer_size(fapl, sizeof(int)*100000, 0, 0);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
/* This should fail because collective metadata writes are not supported with page buffering */
|
|
|
|
H5E_BEGIN_TRY {
|
|
|
|
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
|
|
|
|
} H5E_END_TRY;
|
|
|
|
VRFY((file_id < 0), "H5Fcreate failed");
|
|
|
|
|
|
|
|
/* disable collective metadata writes for page buffering to work */
|
|
|
|
ret = H5Pset_coll_metadata_write(fapl, FALSE);
|
|
|
|
VRFY((ret >= 0), "");
|
|
|
|
|
|
|
|
ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, sizeof(int)*100, sizeof(int)*100000);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, sizeof(int)*100, sizeof(int)*100000);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
ret = H5Pset_file_space_page_size(fcpl, sizeof(int)*100);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
data = (int *) HDmalloc(sizeof(int)*(size_t)num_elements);
|
|
|
|
|
|
|
|
/* intialize all the elements to have a value of -1 */
|
|
|
|
for(i=0 ; i<num_elements ; i++)
|
|
|
|
data[i] = -1;
|
|
|
|
if(MAINPROCESS) {
|
2019-08-27 01:26:31 +08:00
|
|
|
hid_t fapl_self = H5I_INVALID_HID;
|
2017-03-14 12:30:37 +08:00
|
|
|
fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type);
|
|
|
|
|
|
|
|
ret = H5Pset_page_buffer_size(fapl_self, sizeof(int)*1000, 0, 0);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
/* collective metadata writes do not work with page buffering */
|
|
|
|
ret = H5Pset_coll_metadata_write(fapl_self, FALSE);
|
|
|
|
VRFY((ret >= 0), "");
|
|
|
|
|
|
|
|
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_self);
|
|
|
|
VRFY((file_id >= 0), "");
|
2018-03-19 11:51:19 +08:00
|
|
|
|
|
|
|
/* Push API context */
|
|
|
|
ret = H5CX_push();
|
|
|
|
VRFY((ret == 0), "H5CX_push()");
|
|
|
|
api_ctx_pushed = TRUE;
|
2017-03-14 12:30:37 +08:00
|
|
|
|
|
|
|
/* Get a pointer to the internal file object */
|
|
|
|
f = (H5F_t *)H5I_object(file_id);
|
|
|
|
|
|
|
|
VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
|
|
|
|
|
|
|
|
/* allocate space for 200 raw elements */
|
2018-03-16 05:54:30 +08:00
|
|
|
raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int)*(size_t)num_elements);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((raw_addr != HADDR_UNDEF), "");
|
|
|
|
|
|
|
|
/* allocate space for 200 metadata elements */
|
2018-03-16 05:54:30 +08:00
|
|
|
meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int)*(size_t)num_elements);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((meta_addr != HADDR_UNDEF), "");
|
|
|
|
|
|
|
|
page_count = 0;
|
|
|
|
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, data);
|
|
|
|
ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
|
|
|
|
/* update the first 50 elements */
|
|
|
|
for(i=0 ; i<50 ; i++)
|
|
|
|
data[i] = i;
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
H5Eprint2(H5E_DEFAULT, stderr);
|
|
|
|
VRFY((ret == 0), "");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
page_count += 2;
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
|
|
|
|
/* update the second 50 elements */
|
|
|
|
for(i=0 ; i<50 ; i++)
|
|
|
|
data[i] = i+50;
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
|
|
|
|
/* update 100 - 200 */
|
|
|
|
for(i=0 ; i<100 ; i++)
|
|
|
|
data[i] = i+100;
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
|
2019-08-20 07:48:06 +08:00
|
|
|
ret = H5PB_flush(f->shared);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
/* read elements 0 - 200 */
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
for (i=0; i < 200; i++)
|
|
|
|
VRFY((data[i] == i), "Read different values than written");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
for (i=0; i < 200; i++)
|
|
|
|
VRFY((data[i] == i), "Read different values than written");
|
|
|
|
|
|
|
|
/* read elements 0 - 50 */
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
for (i=0; i < 50; i++)
|
|
|
|
VRFY((data[i] == i), "Read different values than written");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
for (i=0; i < 50; i++)
|
|
|
|
VRFY((data[i] == i), "Read different values than written");
|
|
|
|
|
|
|
|
/* close the file */
|
|
|
|
ret = H5Fclose(file_id);
|
|
|
|
VRFY((ret >= 0), "H5Fclose succeeded");
|
|
|
|
ret = H5Pclose(fapl_self);
|
|
|
|
VRFY((ret>=0), "H5Pclose succeeded");
|
2018-03-19 11:51:19 +08:00
|
|
|
|
|
|
|
/* Pop API context */
|
|
|
|
if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
|
2017-03-14 12:30:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
|
|
|
|
|
|
|
if(mpi_size > 1) {
|
|
|
|
ret = H5Pset_page_buffer_size(fapl, sizeof(int)*1000, 0, 0);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
/* collective metadata writes do not work with page buffering */
|
|
|
|
ret = H5Pset_coll_metadata_write(fapl, FALSE);
|
|
|
|
VRFY((ret >= 0), "");
|
|
|
|
|
|
|
|
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
|
|
|
|
VRFY((file_id >= 0), "");
|
2018-03-19 11:51:19 +08:00
|
|
|
|
|
|
|
/* Push API context */
|
|
|
|
ret = H5CX_push();
|
|
|
|
VRFY((ret == 0), "H5CX_push()");
|
|
|
|
api_ctx_pushed = TRUE;
|
2017-03-14 12:30:37 +08:00
|
|
|
|
|
|
|
/* Get a pointer to the internal file object */
|
|
|
|
f = (H5F_t *)H5I_object(file_id);
|
|
|
|
|
|
|
|
VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
|
|
|
|
|
|
|
|
/* allocate space for 200 raw elements */
|
2018-03-16 05:54:30 +08:00
|
|
|
raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int)*(size_t)num_elements);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((raw_addr != HADDR_UNDEF), "");
|
|
|
|
/* allocate space for 200 metadata elements */
|
2018-03-16 05:54:30 +08:00
|
|
|
meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int)*(size_t)num_elements);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((meta_addr != HADDR_UNDEF), "");
|
|
|
|
|
|
|
|
page_count = 0;
|
|
|
|
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
|
|
|
|
/* update the first 50 elements */
|
|
|
|
for(i=0 ; i<50 ; i++)
|
|
|
|
data[i] = i;
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
|
|
|
|
/* update the second 50 elements */
|
|
|
|
for(i=0 ; i<50 ; i++)
|
|
|
|
data[i] = i+50;
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
|
|
|
|
/* update 100 - 200 */
|
|
|
|
for(i=0 ; i<100 ; i++)
|
|
|
|
data[i] = i+100;
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
|
|
|
|
ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
/* read elements 0 - 200 */
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
for (i=0; i < 200; i++)
|
|
|
|
VRFY((data[i] == i), "Read different values than written");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
for (i=0; i < 200; i++)
|
|
|
|
VRFY((data[i] == i), "Read different values than written");
|
|
|
|
|
|
|
|
/* read elements 0 - 50 */
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
for (i=0; i < 50; i++)
|
|
|
|
VRFY((data[i] == i), "Read different values than written");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
page_count += 1;
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
for (i=0; i < 50; i++)
|
|
|
|
VRFY((data[i] == i), "Read different values than written");
|
|
|
|
|
|
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
|
|
|
/* reset the first 50 elements to -1*/
|
|
|
|
for(i=0 ; i<50 ; i++)
|
|
|
|
data[i] = -1;
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
|
|
|
|
/* read elements 0 - 50 */
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
|
|
|
for (i=0; i < 50; i++)
|
|
|
|
VRFY((data[i] == -1), "Read different values than written");
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
|
2019-08-25 03:07:19 +08:00
|
|
|
for (i=0; i < 50; i++)
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((data[i] == -1), "Read different values than written");
|
|
|
|
|
|
|
|
/* close the file */
|
|
|
|
ret = H5Fclose(file_id);
|
|
|
|
VRFY((ret >= 0), "H5Fclose succeeded");
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = H5Pclose(fapl);
|
|
|
|
VRFY((ret>=0), "H5Pclose succeeded");
|
|
|
|
ret = H5Pclose(fcpl);
|
|
|
|
VRFY((ret>=0), "H5Pclose succeeded");
|
2018-03-19 11:51:19 +08:00
|
|
|
|
|
|
|
/* Pop API context */
|
|
|
|
if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
|
2017-03-14 12:30:37 +08:00
|
|
|
|
|
|
|
HDfree(data);
|
|
|
|
data = NULL;
|
|
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy)
|
|
|
|
{
|
|
|
|
hid_t file_id, dset_id, grp_id;
|
|
|
|
hid_t sid, mem_dataspace;
|
|
|
|
hsize_t start[RANK];
|
|
|
|
hsize_t count[RANK];
|
|
|
|
hsize_t stride[RANK];
|
|
|
|
hsize_t block[RANK];
|
|
|
|
DATATYPE *data_array = NULL;
|
|
|
|
hsize_t dims[RANK], i;
|
|
|
|
hsize_t num_elements;
|
|
|
|
int k;
|
2019-10-07 20:56:05 +08:00
|
|
|
char dset_name[20];
|
2017-03-14 12:30:37 +08:00
|
|
|
H5F_t *f = NULL;
|
|
|
|
H5C_t *cache_ptr = NULL;
|
|
|
|
H5AC_cache_config_t config;
|
2018-03-19 11:51:19 +08:00
|
|
|
hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
|
2017-03-14 12:30:37 +08:00
|
|
|
herr_t ret;
|
|
|
|
|
|
|
|
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
|
|
|
|
VRFY((file_id >= 0), "");
|
|
|
|
|
|
|
|
ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
|
|
|
|
VRFY((ret == 0), "");
|
2018-03-19 11:51:19 +08:00
|
|
|
|
|
|
|
/* Push API context */
|
|
|
|
ret = H5CX_push();
|
|
|
|
VRFY((ret == 0), "H5CX_push()");
|
|
|
|
api_ctx_pushed = TRUE;
|
2017-03-14 12:30:37 +08:00
|
|
|
|
|
|
|
f = (H5F_t *)H5I_object(file_id);
|
|
|
|
VRFY((f != NULL), "");
|
|
|
|
|
|
|
|
cache_ptr = f->shared->cache;
|
|
|
|
VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), "");
|
|
|
|
|
|
|
|
cache_ptr->ignore_tags = TRUE;
|
|
|
|
H5C_stats__reset(cache_ptr);
|
|
|
|
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
|
|
|
|
|
|
|
|
ret = H5AC_get_cache_auto_resize_config(cache_ptr, &config);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
config.metadata_write_strategy = metadata_write_strategy;
|
|
|
|
|
|
|
|
ret = H5AC_set_cache_auto_resize_config(cache_ptr, &config);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
|
|
|
|
VRFY((grp_id >= 0), "");
|
|
|
|
|
2020-01-24 05:12:00 +08:00
|
|
|
dims[0] = (hsize_t)(ROW_FACTOR*mpi_size);
|
|
|
|
dims[1] = (hsize_t)(COL_FACTOR*mpi_size);
|
2017-03-14 12:30:37 +08:00
|
|
|
sid = H5Screate_simple (RANK, dims, NULL);
|
|
|
|
VRFY((sid >= 0), "H5Screate_simple succeeded");
|
|
|
|
|
|
|
|
/* Each process takes a slabs of rows. */
|
2020-01-24 05:12:00 +08:00
|
|
|
block[0] = dims[0]/(hsize_t)mpi_size;
|
2017-03-14 12:30:37 +08:00
|
|
|
block[1] = dims[1];
|
|
|
|
stride[0] = block[0];
|
|
|
|
stride[1] = block[1];
|
|
|
|
count[0] = 1;
|
|
|
|
count[1] = 1;
|
2020-01-24 05:12:00 +08:00
|
|
|
start[0] = (hsize_t)mpi_rank*block[0];
|
2017-03-14 12:30:37 +08:00
|
|
|
start[1] = 0;
|
|
|
|
|
|
|
|
num_elements = block[0] * block[1];
|
|
|
|
/* allocate memory for data buffer */
|
|
|
|
data_array = (DATATYPE *)HDmalloc(num_elements*sizeof(DATATYPE));
|
|
|
|
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
|
|
|
|
/* put some trivial data in the data_array */
|
|
|
|
for(i=0 ; i<num_elements; i++)
|
|
|
|
data_array[i] = mpi_rank + 1;
|
|
|
|
|
|
|
|
ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
|
|
|
|
|
|
|
|
/* create a memory dataspace independently */
|
|
|
|
mem_dataspace = H5Screate_simple (1, &num_elements, NULL);
|
|
|
|
VRFY((mem_dataspace >= 0), "");
|
|
|
|
|
|
|
|
for(k=0 ; k<NUM_DSETS; k++) {
|
2019-08-25 03:07:19 +08:00
|
|
|
HDsprintf(dset_name, "D1dset%d", k);
|
2017-03-14 12:30:37 +08:00
|
|
|
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
|
|
|
|
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
|
|
|
|
VRFY((dset_id >= 0), "");
|
|
|
|
ret = H5Dclose(dset_id);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
2019-08-25 03:07:19 +08:00
|
|
|
HDsprintf(dset_name, "D2dset%d", k);
|
2017-03-14 12:30:37 +08:00
|
|
|
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
|
|
|
|
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
|
|
|
|
VRFY((dset_id >= 0), "");
|
|
|
|
ret = H5Dclose(dset_id);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
2019-08-25 03:07:19 +08:00
|
|
|
HDsprintf(dset_name, "D3dset%d", k);
|
2017-03-14 12:30:37 +08:00
|
|
|
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
|
|
|
|
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
|
|
|
|
VRFY((dset_id >= 0), "");
|
|
|
|
ret = H5Dclose(dset_id);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
2019-08-25 03:07:19 +08:00
|
|
|
HDsprintf(dset_name, "dset%d", k);
|
2017-03-14 12:30:37 +08:00
|
|
|
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
|
|
|
|
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
|
|
|
|
VRFY((dset_id >= 0), "");
|
|
|
|
|
|
|
|
ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
ret = H5Dclose(dset_id);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
HDmemset(data_array, 0, num_elements*sizeof(DATATYPE));
|
|
|
|
dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
|
|
|
|
VRFY((dset_id >= 0), "");
|
|
|
|
|
|
|
|
ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
ret = H5Dclose(dset_id);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
for (i=0; i < num_elements; i++)
|
|
|
|
VRFY((data_array[i] == mpi_rank+1), "Dataset Verify failed");
|
|
|
|
|
2019-08-25 03:07:19 +08:00
|
|
|
HDsprintf(dset_name, "D1dset%d", k);
|
2017-03-14 12:30:37 +08:00
|
|
|
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
|
|
|
|
VRFY((ret == 0), "");
|
2019-08-25 03:07:19 +08:00
|
|
|
HDsprintf(dset_name, "D2dset%d", k);
|
2017-03-14 12:30:37 +08:00
|
|
|
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
|
|
|
|
VRFY((ret == 0), "");
|
2019-08-25 03:07:19 +08:00
|
|
|
HDsprintf(dset_name, "D3dset%d", k);
|
2017-03-14 12:30:37 +08:00
|
|
|
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = H5Gclose(grp_id);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
ret = H5Fclose(file_id);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
ret = H5Sclose(sid);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
ret = H5Sclose(mem_dataspace);
|
|
|
|
VRFY((ret == 0), "");
|
2018-03-19 11:51:19 +08:00
|
|
|
|
|
|
|
/* Pop API context */
|
|
|
|
if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
|
2017-03-14 12:30:37 +08:00
|
|
|
|
|
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
|
|
|
HDfree(data_array);
|
|
|
|
return 0;
|
|
|
|
} /* create_file */
|
|
|
|
|
|
|
|
static int
|
|
|
|
open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
|
|
|
|
hsize_t page_size, size_t page_buffer_size)
|
|
|
|
{
|
|
|
|
hid_t file_id, dset_id, grp_id, grp_id2;
|
|
|
|
hid_t sid, mem_dataspace;
|
|
|
|
DATATYPE *data_array = NULL;
|
|
|
|
hsize_t dims[RANK];
|
|
|
|
hsize_t start[RANK];
|
|
|
|
hsize_t count[RANK];
|
|
|
|
hsize_t stride[RANK];
|
|
|
|
hsize_t block[RANK];
|
|
|
|
int i, k, ndims;
|
|
|
|
hsize_t num_elements;
|
2019-10-07 20:56:05 +08:00
|
|
|
char dset_name[20];
|
2017-03-14 12:30:37 +08:00
|
|
|
H5F_t *f = NULL;
|
|
|
|
H5C_t *cache_ptr = NULL;
|
|
|
|
H5AC_cache_config_t config;
|
2018-03-19 11:51:19 +08:00
|
|
|
hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
|
2017-03-14 12:30:37 +08:00
|
|
|
herr_t ret;
|
|
|
|
|
|
|
|
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
|
|
|
|
ret = H5Pget_mdc_config(fapl, &config);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
config.metadata_write_strategy = metadata_write_strategy;
|
|
|
|
|
|
|
|
ret = H5Pget_mdc_config(fapl, &config);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl);
|
|
|
|
H5Eprint2(H5E_DEFAULT, stderr);
|
|
|
|
VRFY((file_id >= 0), "");
|
2018-03-19 11:51:19 +08:00
|
|
|
|
|
|
|
/* Push API context */
|
|
|
|
ret = H5CX_push();
|
|
|
|
VRFY((ret == 0), "H5CX_push()");
|
|
|
|
api_ctx_pushed = TRUE;
|
2017-03-14 12:30:37 +08:00
|
|
|
|
|
|
|
ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
f = (H5F_t *)H5I_object(file_id);
|
|
|
|
VRFY((f != NULL), "");
|
|
|
|
|
|
|
|
cache_ptr = f->shared->cache;
|
|
|
|
VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), "");
|
|
|
|
|
|
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
|
|
|
|
|
|
|
VRFY((f->shared->page_buf != NULL), "");
|
|
|
|
VRFY((f->shared->page_buf->page_size == page_size), "");
|
|
|
|
VRFY((f->shared->page_buf->max_size == page_buffer_size), "");
|
|
|
|
|
|
|
|
grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT);
|
|
|
|
VRFY((grp_id >= 0), "");
|
|
|
|
|
2020-01-24 05:12:00 +08:00
|
|
|
dims[0] = (hsize_t)(ROW_FACTOR*mpi_size);
|
|
|
|
dims[1] = (hsize_t)(COL_FACTOR*mpi_size);
|
2017-03-14 12:30:37 +08:00
|
|
|
|
|
|
|
/* Each process takes a slabs of rows. */
|
2020-01-24 05:12:00 +08:00
|
|
|
block[0] = dims[0]/(hsize_t)mpi_size;
|
2017-03-14 12:30:37 +08:00
|
|
|
block[1] = dims[1];
|
|
|
|
stride[0] = block[0];
|
|
|
|
stride[1] = block[1];
|
|
|
|
count[0] = 1;
|
|
|
|
count[1] = 1;
|
2020-01-24 05:12:00 +08:00
|
|
|
start[0] = (hsize_t)mpi_rank*block[0];
|
2017-03-14 12:30:37 +08:00
|
|
|
start[1] = 0;
|
|
|
|
|
|
|
|
num_elements = block[0] * block[1];
|
|
|
|
/* allocate memory for data buffer */
|
|
|
|
data_array = (DATATYPE *)HDmalloc(num_elements*sizeof(DATATYPE));
|
|
|
|
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
|
|
|
|
|
|
|
|
/* create a memory dataspace independently */
|
|
|
|
mem_dataspace = H5Screate_simple (1, &num_elements, NULL);
|
|
|
|
VRFY((mem_dataspace >= 0), "");
|
|
|
|
|
|
|
|
for(k=0 ; k<NUM_DSETS; k++) {
|
2019-08-25 03:07:19 +08:00
|
|
|
HDsprintf(dset_name, "dset%d", k);
|
2017-03-14 12:30:37 +08:00
|
|
|
dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
|
|
|
|
VRFY((dset_id >= 0), "");
|
|
|
|
|
|
|
|
sid = H5Dget_space(dset_id);
|
|
|
|
VRFY((dset_id >= 0), "H5Dget_space succeeded");
|
|
|
|
|
|
|
|
ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
|
|
|
|
VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded");
|
2019-10-07 20:56:05 +08:00
|
|
|
VRFY(dims[0] == (hsize_t)(ROW_FACTOR*mpi_size), "Wrong dataset dimensions");
|
|
|
|
VRFY(dims[1] == (hsize_t)(COL_FACTOR*mpi_size), "Wrong dataset dimensions");
|
2017-03-14 12:30:37 +08:00
|
|
|
|
|
|
|
ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
|
|
|
|
|
|
|
|
ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
|
|
|
|
VRFY((ret >= 0), "");
|
|
|
|
|
|
|
|
ret = H5Dclose(dset_id);
|
|
|
|
VRFY((ret >= 0), "");
|
|
|
|
ret = H5Sclose(sid);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
2019-10-07 20:56:05 +08:00
|
|
|
for (i=0; i < (int)num_elements; i++)
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((data_array[i] == mpi_rank+1), "Dataset Verify failed");
|
|
|
|
}
|
|
|
|
|
|
|
|
grp_id2 = H5Gcreate2(file_id, "GROUP/GROUP2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
|
|
|
|
VRFY((grp_id2 >= 0), "");
|
|
|
|
ret = H5Gclose(grp_id2);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
|
|
|
/* flush invalidate each ring, starting from the outermost ring and
|
|
|
|
* working inward.
|
|
|
|
*/
|
|
|
|
for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ ) {
|
|
|
|
H5C_cache_entry_t * entry_ptr = NULL;
|
|
|
|
|
|
|
|
entry_ptr = cache_ptr->index[i];
|
|
|
|
|
|
|
|
while ( entry_ptr != NULL ) {
|
|
|
|
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
|
|
|
|
HDassert(entry_ptr->is_dirty == FALSE);
|
|
|
|
|
|
|
|
if(!entry_ptr->is_pinned && !entry_ptr->is_protected) {
|
2018-03-16 05:54:30 +08:00
|
|
|
ret = H5AC_expunge_entry(f, entry_ptr->type, entry_ptr->addr, 0);
|
2017-03-14 12:30:37 +08:00
|
|
|
VRFY((ret == 0), "");
|
|
|
|
}
|
|
|
|
|
|
|
|
entry_ptr = entry_ptr->ht_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
|
|
|
|
|
|
|
grp_id2 = H5Gopen2(file_id, "GROUP/GROUP2", H5P_DEFAULT);
|
|
|
|
H5Eprint2(H5E_DEFAULT, stderr);
|
|
|
|
VRFY((grp_id2 >= 0), "");
|
|
|
|
ret = H5Gclose(grp_id2);
|
|
|
|
H5Eprint2(H5E_DEFAULT, stderr);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
|
|
|
|
ret = H5Gclose(grp_id);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
ret = H5Fclose(file_id);
|
|
|
|
VRFY((ret == 0), "");
|
|
|
|
ret = H5Sclose(mem_dataspace);
|
|
|
|
VRFY((ret == 0), "");
|
2018-03-19 11:51:19 +08:00
|
|
|
|
|
|
|
/* Pop API context */
|
|
|
|
if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
|
|
|
|
|
2017-03-14 12:30:37 +08:00
|
|
|
HDfree(data_array);
|
|
|
|
|
|
|
|
return nerrors;
|
|
|
|
}
|
|
|
|
|
2019-09-03 01:59:48 +08:00
|
|
|
/*
|
|
|
|
* NOTE: See HDFFV-10894 and add tests later to verify MPI-specific properties in the
|
|
|
|
* incoming fapl that could conflict with the existing values in H5F_shared_t on
|
|
|
|
* multiple opens of the same file.
|
|
|
|
*/
|
2016-02-12 00:03:44 +08:00
|
|
|
void
|
|
|
|
test_file_properties(void)
|
|
|
|
{
|
2019-08-27 01:26:31 +08:00
|
|
|
hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
|
|
|
|
hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
|
|
|
|
hid_t fapl_copy_id = H5I_INVALID_HID; /* File access plist */
|
2016-02-12 00:03:44 +08:00
|
|
|
hbool_t is_coll;
|
2019-08-27 01:26:31 +08:00
|
|
|
htri_t are_equal;
|
2016-02-12 00:03:44 +08:00
|
|
|
const char *filename;
|
|
|
|
MPI_Comm comm = MPI_COMM_WORLD;
|
|
|
|
MPI_Info info = MPI_INFO_NULL;
|
2019-08-27 01:26:31 +08:00
|
|
|
MPI_Comm comm_out = MPI_COMM_NULL;
|
|
|
|
MPI_Info info_out = MPI_INFO_NULL;
|
2016-02-12 00:03:44 +08:00
|
|
|
herr_t ret; /* Generic return value */
|
2019-08-27 01:26:31 +08:00
|
|
|
int mpi_ret; /* MPI return value */
|
|
|
|
int cmp; /* Compare value */
|
2016-02-12 00:03:44 +08:00
|
|
|
|
2016-03-24 08:38:03 +08:00
|
|
|
filename = (const char *)GetTestParameters();
|
2016-02-12 00:03:44 +08:00
|
|
|
|
|
|
|
/* set up MPI parameters */
|
2019-08-27 01:26:31 +08:00
|
|
|
mpi_ret = MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
|
|
|
|
VRFY((mpi_ret >= 0), "MPI_Comm_size succeeded");
|
|
|
|
mpi_ret = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
|
|
|
|
VRFY((mpi_ret >= 0), "MPI_Comm_rank succeeded");
|
|
|
|
mpi_ret = MPI_Info_create(&info);
|
|
|
|
VRFY((mpi_ret >= 0), "MPI_Info_create succeeded");
|
|
|
|
mpi_ret = MPI_Info_set(info, "hdf_info_prop1", "xyz");
|
|
|
|
VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
|
2016-02-12 00:03:44 +08:00
|
|
|
|
|
|
|
/* setup file access plist */
|
2016-03-24 08:38:03 +08:00
|
|
|
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
|
2019-08-27 01:26:31 +08:00
|
|
|
VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate");
|
2016-03-24 08:38:03 +08:00
|
|
|
ret = H5Pset_fapl_mpio(fapl_id, comm, info);
|
|
|
|
VRFY((ret >= 0), "H5Pset_fapl_mpio");
|
2016-02-12 00:03:44 +08:00
|
|
|
|
2019-08-27 01:26:31 +08:00
|
|
|
/* Check getting and setting MPI properties
|
|
|
|
* (for use in VOL connectors, not the MPI-I/O VFD)
|
|
|
|
*/
|
|
|
|
ret = H5Pset_mpi_params(fapl_id, comm, info);
|
|
|
|
VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
|
|
|
|
ret = H5Pget_mpi_params(fapl_id, &comm_out, &info_out);
|
|
|
|
VRFY((ret >= 0), "H5Pget_mpi_params succeeded");
|
|
|
|
|
|
|
|
/* Check the communicator */
|
|
|
|
VRFY((comm != comm_out), "Communicators should not be bitwise identical");
|
|
|
|
cmp = MPI_UNEQUAL;
|
|
|
|
mpi_ret = MPI_Comm_compare(comm, comm_out, &cmp);
|
|
|
|
VRFY((ret >= 0), "MPI_Comm_compare succeeded");
|
|
|
|
VRFY((cmp == MPI_CONGRUENT), "Communicators should be congruent via MPI_Comm_compare");
|
|
|
|
|
|
|
|
/* Check the info object */
|
|
|
|
VRFY((info != info_out), "Info objects should not be bitwise identical");
|
|
|
|
|
|
|
|
/* Free the obtained comm and info object */
|
|
|
|
mpi_ret = MPI_Comm_free(&comm_out);
|
|
|
|
VRFY((mpi_ret >= 0), "MPI_Comm_free succeeded");
|
|
|
|
mpi_ret = MPI_Info_free(&info_out);
|
|
|
|
VRFY((mpi_ret >= 0), "MPI_Info_free succeeded");
|
|
|
|
|
|
|
|
/* Copy the fapl and ensure it's equal to the original */
|
|
|
|
fapl_copy_id = H5Pcopy(fapl_id);
|
|
|
|
VRFY((fapl_copy_id != H5I_INVALID_HID), "H5Pcopy");
|
|
|
|
are_equal = H5Pequal(fapl_id, fapl_copy_id);
|
|
|
|
VRFY((TRUE == are_equal), "H5Pequal");
|
|
|
|
|
|
|
|
/* Add a property to the copy and ensure it's different now */
|
|
|
|
mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc");
|
|
|
|
VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
|
|
|
|
ret = H5Pset_mpi_params(fapl_copy_id, comm, info);
|
|
|
|
VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
|
|
|
|
are_equal = H5Pequal(fapl_id, fapl_copy_id);
|
|
|
|
VRFY((FALSE == are_equal), "H5Pequal");
|
|
|
|
|
|
|
|
/* Add a property with the same key but a different value to the original
|
|
|
|
* and ensure they are still different.
|
|
|
|
*/
|
|
|
|
mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "ijk");
|
|
|
|
VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
|
|
|
|
ret = H5Pset_mpi_params(fapl_id, comm, info);
|
|
|
|
VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
|
|
|
|
are_equal = H5Pequal(fapl_id, fapl_copy_id);
|
|
|
|
VRFY((FALSE == are_equal), "H5Pequal");
|
|
|
|
|
|
|
|
/* Set the second property in the original to the same
|
|
|
|
* value as the copy and ensure they are the same now.
|
|
|
|
*/
|
|
|
|
mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc");
|
|
|
|
VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
|
|
|
|
ret = H5Pset_mpi_params(fapl_id, comm, info);
|
|
|
|
VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
|
|
|
|
are_equal = H5Pequal(fapl_id, fapl_copy_id);
|
|
|
|
VRFY((TRUE == are_equal), "H5Pequal");
|
|
|
|
|
2016-03-24 08:38:03 +08:00
|
|
|
/* create the file */
|
2016-02-12 00:03:44 +08:00
|
|
|
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
|
2019-08-27 01:26:31 +08:00
|
|
|
VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
|
2016-02-12 00:03:44 +08:00
|
|
|
|
|
|
|
/* verify settings for file access properties */
|
|
|
|
|
|
|
|
/* Collective metadata writes */
|
|
|
|
ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
|
|
|
|
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
|
|
|
|
VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes");
|
|
|
|
|
|
|
|
/* Collective metadata read API calling requirement */
|
|
|
|
ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
|
|
|
|
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
|
|
|
|
VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement");
|
|
|
|
|
|
|
|
ret = H5Fclose(fid);
|
|
|
|
VRFY((ret >= 0), "H5Fclose succeeded");
|
|
|
|
|
|
|
|
/* Open the file with the MPI-IO driver */
|
|
|
|
ret = H5Pset_fapl_mpio(fapl_id, comm, info);
|
|
|
|
VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
|
|
|
|
fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
|
2019-08-27 01:26:31 +08:00
|
|
|
VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
|
1999-01-27 12:12:04 +08:00
|
|
|
|
2016-02-12 00:03:44 +08:00
|
|
|
/* verify settings for file access properties */
|
|
|
|
|
|
|
|
/* Collective metadata writes */
|
|
|
|
ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
|
|
|
|
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
|
|
|
|
VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes");
|
|
|
|
|
|
|
|
/* Collective metadata read API calling requirement */
|
|
|
|
ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
|
|
|
|
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
|
|
|
|
VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement");
|
|
|
|
|
|
|
|
ret = H5Fclose(fid);
|
|
|
|
VRFY((ret >= 0), "H5Fclose succeeded");
|
|
|
|
|
2019-08-27 01:26:31 +08:00
|
|
|
/* Open the file with the MPI-IO driver w/ collective settings */
|
2016-02-12 00:03:44 +08:00
|
|
|
ret = H5Pset_fapl_mpio(fapl_id, comm, info);
|
|
|
|
VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
|
|
|
|
/* Collective metadata writes */
|
|
|
|
ret = H5Pset_coll_metadata_write(fapl_id, TRUE);
|
|
|
|
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
|
|
|
|
/* Collective metadata read API calling requirement */
|
|
|
|
ret = H5Pset_all_coll_metadata_ops(fapl_id, TRUE);
|
|
|
|
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
|
|
|
|
fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
|
2019-08-27 01:26:31 +08:00
|
|
|
VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
|
2016-02-12 00:03:44 +08:00
|
|
|
|
|
|
|
/* verify settings for file access properties */
|
|
|
|
|
|
|
|
/* Collective metadata writes */
|
|
|
|
ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
|
|
|
|
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
|
|
|
|
VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes");
|
|
|
|
|
|
|
|
/* Collective metadata read API calling requirement */
|
|
|
|
ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
|
|
|
|
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
|
|
|
|
VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement");
|
|
|
|
|
|
|
|
/* close fapl and retrieve it from file */
|
|
|
|
ret = H5Pclose(fapl_id);
|
|
|
|
VRFY((ret >= 0), "H5Pclose succeeded");
|
2019-08-27 01:26:31 +08:00
|
|
|
fapl_id = H5I_INVALID_HID;
|
2016-02-12 00:03:44 +08:00
|
|
|
|
|
|
|
fapl_id = H5Fget_access_plist(fid);
|
2019-08-27 01:26:31 +08:00
|
|
|
VRFY((fapl_id != H5I_INVALID_HID), "H5P_FILE_ACCESS");
|
2016-02-12 00:03:44 +08:00
|
|
|
|
|
|
|
/* verify settings for file access properties */
|
|
|
|
|
|
|
|
/* Collective metadata writes */
|
|
|
|
ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
|
|
|
|
VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
|
|
|
|
VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes");
|
|
|
|
|
|
|
|
/* Collective metadata read API calling requirement */
|
|
|
|
ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
|
|
|
|
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
|
|
|
|
VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement");
|
|
|
|
|
|
|
|
/* close file */
|
|
|
|
ret = H5Fclose(fid);
|
|
|
|
VRFY((ret >= 0), "H5Fclose succeeded");
|
|
|
|
|
|
|
|
/* Release file-access plist */
|
|
|
|
ret = H5Pclose(fapl_id);
|
|
|
|
VRFY((ret >= 0), "H5Pclose succeeded");
|
2019-08-27 01:26:31 +08:00
|
|
|
ret = H5Pclose(fapl_copy_id);
|
|
|
|
VRFY((ret >= 0), "H5Pclose succeeded");
|
|
|
|
|
|
|
|
/* Free the MPI info object */
|
|
|
|
mpi_ret = MPI_Info_free(&info);
|
|
|
|
VRFY((mpi_ret >= 0), "MPI_Info_free succeeded");
|
|
|
|
|
2016-03-24 08:38:03 +08:00
|
|
|
} /* end test_file_properties() */
|
|
|
|
|