2004-11-12 05:08:11 +08:00
|
|
|
|
|
|
|
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
|
2007-02-07 22:56:24 +08:00
|
|
|
* Copyright by The HDF Group. *
|
2004-11-12 05:08:11 +08:00
|
|
|
* Copyright by the Board of Trustees of the University of Illinois. *
|
|
|
|
* All rights reserved. *
|
|
|
|
* *
|
|
|
|
* This file is part of HDF5. The full HDF5 copyright notice, including *
|
|
|
|
* terms governing use, modification, and redistribution, is contained in *
|
|
|
|
* the files COPYING and Copyright.html. COPYING can be found at the root *
|
|
|
|
* of the source code distribution tree; Copyright.html can be found at the *
|
|
|
|
* root level of an installed copy of the electronic HDF5 document set and *
|
|
|
|
* is linked from the top-level documents page. It can also be found at *
|
2007-02-07 22:56:24 +08:00
|
|
|
* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
|
|
|
|
* access to either file, you may request a copy from help@hdfgroup.org. *
|
2004-11-12 05:08:11 +08:00
|
|
|
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
|
|
|
|
|
2005-08-14 04:53:35 +08:00
|
|
|
/*
|
2004-11-25 05:13:26 +08:00
|
|
|
This program will test irregular hyperslab selections with collective write and read.
|
|
|
|
The way to test whether collective write and read works is to use independent IO
|
|
|
|
output to verify the collective output.
|
|
|
|
|
|
|
|
1) We will write two datasets with the same hyperslab selection settings;
|
|
|
|
one in independent mode,
|
|
|
|
one in collective mode,
|
|
|
|
2) We will read two datasets with the same hyperslab selection settings,
|
|
|
|
1. independent read to read independent output,
|
|
|
|
independent read to read collecive output,
|
2005-08-14 04:53:35 +08:00
|
|
|
Compare the result,
|
2004-11-25 05:13:26 +08:00
|
|
|
If the result is the same, then collective write succeeds.
|
|
|
|
2. collective read to read independent output,
|
|
|
|
independent read to read independent output,
|
|
|
|
Compare the result,
|
|
|
|
If the result is the same, then collective read succeeds.
|
|
|
|
|
2005-08-14 04:53:35 +08:00
|
|
|
*/
|
|
|
|
|
2004-11-12 05:08:11 +08:00
|
|
|
#include "hdf5.h"
|
|
|
|
#include "H5private.h"
|
|
|
|
#include "testphdf5.h"
|
|
|
|
|
|
|
|
|
2004-12-09 07:33:40 +08:00
|
|
|
static void coll_write_test(int chunk_factor);
|
|
|
|
static void coll_read_test(int chunk_factor);
|
|
|
|
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
* Function: coll_irregular_cont_write
|
|
|
|
*
|
2006-08-09 11:16:07 +08:00
|
|
|
* Purpose: Wrapper to test the collectively irregular hyperslab write in
|
2005-10-18 23:47:13 +08:00
|
|
|
contiguous storage
|
2004-12-09 07:33:40 +08:00
|
|
|
*
|
|
|
|
* Return: Success: 0
|
|
|
|
*
|
|
|
|
* Failure: -1
|
|
|
|
*
|
|
|
|
* Programmer: Unknown
|
|
|
|
* Dec 2nd, 2004
|
|
|
|
*
|
|
|
|
* Modifications:
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
coll_irregular_cont_write(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
coll_write_test(0);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
* Function: coll_irregular_cont_read
|
|
|
|
*
|
2006-08-09 11:16:07 +08:00
|
|
|
* Purpose: Wrapper to test the collectively irregular hyperslab read in
|
2005-10-18 23:47:13 +08:00
|
|
|
contiguous storage
|
2004-12-09 07:33:40 +08:00
|
|
|
*
|
|
|
|
* Return: Success: 0
|
|
|
|
*
|
|
|
|
* Failure: -1
|
|
|
|
*
|
|
|
|
* Programmer: Unknown
|
|
|
|
* Dec 2nd, 2004
|
|
|
|
*
|
|
|
|
* Modifications:
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
coll_irregular_cont_read(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
coll_read_test(0);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
* Function: coll_irregular_simple_chunk_write
|
|
|
|
*
|
2006-08-09 11:16:07 +08:00
|
|
|
* Purpose: Wrapper to test the collectively irregular hyperslab write in
|
2005-10-18 23:47:13 +08:00
|
|
|
chunk storage(1 chunk)
|
2004-12-09 07:33:40 +08:00
|
|
|
*
|
|
|
|
* Return: Success: 0
|
|
|
|
*
|
|
|
|
* Failure: -1
|
|
|
|
*
|
|
|
|
* Programmer: Unknown
|
|
|
|
* Dec 2nd, 2004
|
|
|
|
*
|
|
|
|
* Modifications:
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
coll_irregular_simple_chunk_write(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
coll_write_test(1);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
* Function: coll_irregular_simple_chunk_read
|
|
|
|
*
|
2005-10-18 23:47:13 +08:00
|
|
|
* Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
|
2004-12-09 07:33:40 +08:00
|
|
|
storage(1 chunk)
|
|
|
|
*
|
|
|
|
* Return: Success: 0
|
|
|
|
*
|
|
|
|
* Failure: -1
|
|
|
|
*
|
|
|
|
* Programmer: Unknown
|
|
|
|
* Dec 2nd, 2004
|
|
|
|
*
|
|
|
|
* Modifications:
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
coll_irregular_simple_chunk_read(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
coll_read_test(1);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
* Function: coll_irregular_complex_chunk_write
|
|
|
|
*
|
2005-10-18 23:47:13 +08:00
|
|
|
* Purpose: Wrapper to test the collectively irregular hyperslab write in chunk
|
2004-12-09 07:33:40 +08:00
|
|
|
storage(4 chunks)
|
|
|
|
*
|
|
|
|
* Return: Success: 0
|
|
|
|
*
|
|
|
|
* Failure: -1
|
|
|
|
*
|
|
|
|
* Programmer: Unknown
|
|
|
|
* Dec 2nd, 2004
|
|
|
|
*
|
|
|
|
* Modifications:
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
coll_irregular_complex_chunk_write(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
coll_write_test(4);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
* Function: coll_irregular_complex_chunk_read
|
|
|
|
*
|
2005-10-18 23:47:13 +08:00
|
|
|
* Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
|
2004-12-09 07:33:40 +08:00
|
|
|
storage(1 chunk)
|
|
|
|
*
|
|
|
|
* Return: Success: 0
|
|
|
|
*
|
|
|
|
* Failure: -1
|
|
|
|
*
|
|
|
|
* Programmer: Unknown
|
|
|
|
* Dec 2nd, 2004
|
|
|
|
*
|
|
|
|
* Modifications:
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
coll_irregular_complex_chunk_read(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
coll_read_test(4);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
* Function: coll_write_test
|
|
|
|
*
|
|
|
|
* Purpose: To test the collectively irregular hyperslab write in chunk
|
|
|
|
storage
|
|
|
|
* Input: number of chunks on each dimension
|
2006-08-09 11:16:07 +08:00
|
|
|
if number is equal to 0, contiguous storage
|
2005-10-18 23:47:13 +08:00
|
|
|
* Return: Success: 0
|
|
|
|
*
|
|
|
|
* Failure: -1
|
|
|
|
*
|
|
|
|
* Programmer: Unknown
|
|
|
|
* Dec 2nd, 2004
|
|
|
|
*
|
|
|
|
* Modifications: Oct 18th, 2005
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
2004-12-09 07:33:40 +08:00
|
|
|
void coll_write_test(int chunk_factor)
|
2004-11-12 05:08:11 +08:00
|
|
|
{
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
const char *filename;
|
|
|
|
hid_t facc_plist,dxfer_plist,dcrt_plist;
|
|
|
|
hid_t file, datasetc,dataseti; /* File and dataset identifiers */
|
|
|
|
hid_t mspaceid1, mspaceid, fspaceid,fspaceid1; /* Dataspace identifiers */
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
|
|
|
|
hsize_t mdim1[1],fsdim[2],mdim[2];
|
|
|
|
|
|
|
|
#if 0
|
2005-10-18 23:47:13 +08:00
|
|
|
hsize_t mdim1[] = {MSPACE1_DIM}; /* Dimension size of the first dataset
|
2005-08-14 04:53:35 +08:00
|
|
|
(in memory) */
|
2005-10-18 23:47:13 +08:00
|
|
|
hsize_t fsdim[] = {FSPACE_DIM1, FSPACE_DIM2}; /* Dimension sizes of the dataset
|
|
|
|
(on disk) */
|
2005-08-14 04:53:35 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the
|
2004-11-25 05:13:26 +08:00
|
|
|
dataset in memory when we
|
|
|
|
read selection from the
|
|
|
|
dataset on the disk */
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
#endif
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
hsize_t start[2]; /* Start of hyperslab */
|
2004-11-25 05:13:26 +08:00
|
|
|
hsize_t stride[2]; /* Stride of hyperslab */
|
|
|
|
hsize_t count[2]; /* Block count */
|
|
|
|
hsize_t block[2]; /* Block sizes */
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
hsize_t chunk_dims[2];
|
2004-11-25 05:13:26 +08:00
|
|
|
|
|
|
|
herr_t ret;
|
|
|
|
unsigned i,j;
|
2005-10-18 23:47:13 +08:00
|
|
|
int fillvalue = 0; /* Fill value for the dataset */
|
2004-11-25 05:13:26 +08:00
|
|
|
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
#if 0
|
2005-10-18 23:47:13 +08:00
|
|
|
int matrix_out[MSPACE_DIM1][MSPACE_DIM2];
|
|
|
|
int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the
|
|
|
|
dataset */
|
|
|
|
int vector[MSPACE1_DIM];
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
int *matrix_out, *matrix_out1, *vector;
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
hbool_t use_gpfs = FALSE;
|
|
|
|
int mpi_size,mpi_rank;
|
2004-11-25 05:13:26 +08:00
|
|
|
|
|
|
|
MPI_Comm comm = MPI_COMM_WORLD;
|
|
|
|
MPI_Info info = MPI_INFO_NULL;
|
|
|
|
|
|
|
|
/*set up MPI parameters */
|
|
|
|
MPI_Comm_size(comm,&mpi_size);
|
|
|
|
MPI_Comm_rank(comm,&mpi_rank);
|
|
|
|
|
|
|
|
/* Obtain file name */
|
2005-08-14 04:53:35 +08:00
|
|
|
filename = GetTestParameters();
|
2004-11-25 05:13:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Buffers' initialization.
|
|
|
|
*/
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
|
|
|
|
mdim1[0] = MSPACE1_DIM *mpi_size;
|
|
|
|
mdim[0] = MSPACE_DIM1;
|
|
|
|
mdim[1] = MSPACE_DIM2*mpi_size;
|
|
|
|
fsdim[0] = FSPACE_DIM1;
|
|
|
|
fsdim[1] = FSPACE_DIM2*mpi_size;
|
2006-08-09 11:16:07 +08:00
|
|
|
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
vector = (int*)HDmalloc(sizeof(int)*mdim1[0]*mpi_size);
|
|
|
|
matrix_out = (int*)HDmalloc(sizeof(int)*mdim[0]*mdim[1]*mpi_size);
|
|
|
|
matrix_out1 = (int*)HDmalloc(sizeof(int)*mdim[0]*mdim[1]*mpi_size);
|
|
|
|
|
|
|
|
HDmemset(vector,0,sizeof(int)*mdim1[0]*mpi_size);
|
|
|
|
vector[0] = vector[MSPACE1_DIM*mpi_size - 1] = -1;
|
|
|
|
for (i = 1; i < MSPACE1_DIM*mpi_size - 1; i++) vector[i] = i;
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* Grab file access property list */
|
|
|
|
facc_plist = create_faccess_plist(comm, info, facc_type, use_gpfs);
|
|
|
|
VRFY((facc_plist >= 0),"");
|
2005-08-14 04:53:35 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
/*
|
|
|
|
* Create a file.
|
|
|
|
*/
|
2005-10-18 23:47:13 +08:00
|
|
|
file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, facc_plist);
|
2004-11-25 05:13:26 +08:00
|
|
|
VRFY((file >= 0),"H5Fcreate succeeded");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create property list for a dataset and set up fill values.
|
|
|
|
*/
|
2005-10-18 23:47:13 +08:00
|
|
|
dcrt_plist = H5Pcreate(H5P_DATASET_CREATE);
|
|
|
|
VRFY((dcrt_plist >= 0),"");
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
ret = H5Pset_fill_value(dcrt_plist, H5T_NATIVE_INT, &fillvalue);
|
2004-11-25 05:13:26 +08:00
|
|
|
VRFY((ret >= 0),"Fill value creation property list succeeded");
|
2004-12-09 07:33:40 +08:00
|
|
|
|
|
|
|
if(chunk_factor != 0) {
|
[svn-r14199] Description:
Add H5Dcreate to API versioned routines, replacing internal usage with
H5Dcreate2
Fix thread-safe error stack initialization for API versioned error
stack printing routines.
Tested on:
FreeBSD/32 6.2 (duty) in debug mode
FreeBSD/64 6.2 (liberty) w/C++ & FORTRAN, in debug mode
Linux/32 2.6 (kagiso) w/PGI compilers, w/C++ & FORTRAN, w/threadsafe,
in debug mode
Linux/64-amd64 2.6 (smirom) w/default API=1.6.x, w/C++ & FORTRAN,
in production mode
Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN,
in production mode
Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN,
w/szip filter, in production mode
Mac OS X/32 10.4.10 (amazon) in debug mode
2007-10-12 00:24:11 +08:00
|
|
|
chunk_dims[0] = fsdim[0] / chunk_factor;
|
|
|
|
chunk_dims[1] = fsdim[1] / chunk_factor;
|
2005-10-18 23:47:13 +08:00
|
|
|
ret = H5Pset_chunk(dcrt_plist, 2, chunk_dims);
|
2004-12-09 07:33:40 +08:00
|
|
|
VRFY((ret >= 0),"chunk creation property list succeeded");
|
|
|
|
}
|
2005-10-18 23:47:13 +08:00
|
|
|
|
2005-08-14 04:53:35 +08:00
|
|
|
/*
|
2005-10-18 23:47:13 +08:00
|
|
|
*
|
|
|
|
* Create dataspace for the first dataset in the disk.
|
|
|
|
* dim1 = 9
|
|
|
|
* dim2 = 3600
|
|
|
|
*
|
|
|
|
*
|
2004-11-25 05:13:26 +08:00
|
|
|
*/
|
|
|
|
fspaceid = H5Screate_simple(FSPACE_RANK, fsdim, NULL);
|
|
|
|
VRFY((fspaceid >= 0),"file dataspace created succeeded");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create dataset in the file. Notice that creation
|
2005-10-18 23:47:13 +08:00
|
|
|
* property list dcrt_plist is used.
|
2004-11-25 05:13:26 +08:00
|
|
|
*/
|
[svn-r14199] Description:
Add H5Dcreate to API versioned routines, replacing internal usage with
H5Dcreate2
Fix thread-safe error stack initialization for API versioned error
stack printing routines.
Tested on:
FreeBSD/32 6.2 (duty) in debug mode
FreeBSD/64 6.2 (liberty) w/C++ & FORTRAN, in debug mode
Linux/32 2.6 (kagiso) w/PGI compilers, w/C++ & FORTRAN, w/threadsafe,
in debug mode
Linux/64-amd64 2.6 (smirom) w/default API=1.6.x, w/C++ & FORTRAN,
in production mode
Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN,
in production mode
Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN,
w/szip filter, in production mode
Mac OS X/32 10.4.10 (amazon) in debug mode
2007-10-12 00:24:11 +08:00
|
|
|
datasetc = H5Dcreate2(file, "collect_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT);
|
2004-11-25 05:13:26 +08:00
|
|
|
VRFY((datasetc >= 0),"dataset created succeeded");
|
|
|
|
|
[svn-r14199] Description:
Add H5Dcreate to API versioned routines, replacing internal usage with
H5Dcreate2
Fix thread-safe error stack initialization for API versioned error
stack printing routines.
Tested on:
FreeBSD/32 6.2 (duty) in debug mode
FreeBSD/64 6.2 (liberty) w/C++ & FORTRAN, in debug mode
Linux/32 2.6 (kagiso) w/PGI compilers, w/C++ & FORTRAN, w/threadsafe,
in debug mode
Linux/64-amd64 2.6 (smirom) w/default API=1.6.x, w/C++ & FORTRAN,
in production mode
Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN,
in production mode
Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN,
w/szip filter, in production mode
Mac OS X/32 10.4.10 (amazon) in debug mode
2007-10-12 00:24:11 +08:00
|
|
|
dataseti = H5Dcreate2(file, "independ_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT);
|
2004-11-25 05:13:26 +08:00
|
|
|
VRFY((dataseti >= 0),"dataset created succeeded");
|
2005-10-18 23:47:13 +08:00
|
|
|
|
|
|
|
/* The First selection for FILE
|
|
|
|
*
|
|
|
|
* block (3,2)
|
2006-08-09 11:16:07 +08:00
|
|
|
* stride(4,3)
|
2005-10-18 23:47:13 +08:00
|
|
|
* count (1,768/mpi_size)
|
|
|
|
* start (0,1+768*3*mpi_rank/mpi_size)
|
|
|
|
*
|
|
|
|
*/
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-08-14 04:53:35 +08:00
|
|
|
start[0] = FHSTART0;
|
[svn-r14199] Description:
Add H5Dcreate to API versioned routines, replacing internal usage with
H5Dcreate2
Fix thread-safe error stack initialization for API versioned error
stack printing routines.
Tested on:
FreeBSD/32 6.2 (duty) in debug mode
FreeBSD/64 6.2 (liberty) w/C++ & FORTRAN, in debug mode
Linux/32 2.6 (kagiso) w/PGI compilers, w/C++ & FORTRAN, w/threadsafe,
in debug mode
Linux/64-amd64 2.6 (smirom) w/default API=1.6.x, w/C++ & FORTRAN,
in production mode
Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN,
in production mode
Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN,
w/szip filter, in production mode
Mac OS X/32 10.4.10 (amazon) in debug mode
2007-10-12 00:24:11 +08:00
|
|
|
start[1] = FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1;
|
2005-08-14 04:53:35 +08:00
|
|
|
stride[0] = FHSTRIDE0;
|
2004-11-25 05:13:26 +08:00
|
|
|
stride[1] = FHSTRIDE1;
|
2005-08-14 04:53:35 +08:00
|
|
|
count[0] = FHCOUNT0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
count[1] = FHCOUNT1;
|
2005-08-14 04:53:35 +08:00
|
|
|
block[0] = FHBLOCK0;
|
2004-11-25 05:13:26 +08:00
|
|
|
block[1] = FHBLOCK1;
|
|
|
|
|
|
|
|
ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* The Second selection for FILE
|
2006-08-09 11:16:07 +08:00
|
|
|
*
|
2005-10-18 23:47:13 +08:00
|
|
|
* block (3,768)
|
|
|
|
* stride (1,1)
|
2006-08-09 11:16:07 +08:00
|
|
|
* count (1,1)
|
2005-10-18 23:47:13 +08:00
|
|
|
* start (4,768*mpi_rank/mpi_size)
|
|
|
|
*
|
2004-11-25 05:13:26 +08:00
|
|
|
*/
|
|
|
|
|
2005-08-14 04:53:35 +08:00
|
|
|
start[0] = SHSTART0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
start[1] = SHSTART1+SHCOUNT1*SHBLOCK1*mpi_rank;
|
2005-08-14 04:53:35 +08:00
|
|
|
stride[0] = SHSTRIDE0;
|
2004-11-25 05:13:26 +08:00
|
|
|
stride[1] = SHSTRIDE1;
|
2005-08-14 04:53:35 +08:00
|
|
|
count[0] = SHCOUNT0;
|
|
|
|
count[1] = SHCOUNT1;
|
|
|
|
block[0] = SHBLOCK0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
block[1] = SHBLOCK1;
|
2004-11-25 05:13:26 +08:00
|
|
|
|
|
|
|
ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
|
|
|
|
|
|
|
/*
|
2005-10-18 23:47:13 +08:00
|
|
|
* Create dataspace for the first dataset in the memory
|
|
|
|
* dim1 = 27000
|
|
|
|
*
|
2004-11-25 05:13:26 +08:00
|
|
|
*/
|
|
|
|
mspaceid1 = H5Screate_simple(MSPACE1_RANK, mdim1, NULL);
|
|
|
|
VRFY((mspaceid1 >= 0),"memory dataspace created succeeded");
|
|
|
|
|
|
|
|
/*
|
2005-10-18 23:47:13 +08:00
|
|
|
* Memory space is 1-D, this is a good test to check
|
|
|
|
* whether a span-tree derived datatype needs to be built.
|
|
|
|
* block 1
|
|
|
|
* stride 1
|
|
|
|
* count 6912/mpi_size
|
|
|
|
* start 1
|
|
|
|
*
|
2004-11-25 05:13:26 +08:00
|
|
|
*/
|
|
|
|
start[0] = MHSTART0;
|
|
|
|
stride[0] = MHSTRIDE0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
count[0] = MHCOUNT0;
|
2004-11-25 05:13:26 +08:00
|
|
|
block[0] = MHBLOCK0;
|
|
|
|
|
|
|
|
ret = H5Sselect_hyperslab(mspaceid1, H5S_SELECT_SET, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* independent write */
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Dwrite(dataseti, H5T_NATIVE_INT, mspaceid1, fspaceid, H5P_DEFAULT, vector);
|
|
|
|
VRFY((ret >= 0),"dataset independent write succeed");
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
dxfer_plist = H5Pcreate(H5P_DATASET_XFER);
|
|
|
|
VRFY((dxfer_plist >= 0),"");
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE);
|
|
|
|
VRFY((ret >= 0),"MPIO data transfer property list succeed");
|
2006-08-09 11:16:07 +08:00
|
|
|
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
|
|
|
|
ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
|
|
|
|
VRFY((ret>= 0),"set independent IO collectively succeeded");
|
|
|
|
}
|
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* collective write */
|
|
|
|
ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, dxfer_plist, vector);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"dataset collective write succeed");
|
2004-11-25 05:13:26 +08:00
|
|
|
|
|
|
|
ret = H5Sclose(mspaceid1);
|
|
|
|
VRFY((ret >= 0),"");
|
|
|
|
|
2005-08-14 04:53:35 +08:00
|
|
|
ret = H5Sclose(fspaceid);
|
2004-11-25 05:13:26 +08:00
|
|
|
VRFY((ret >= 0),"");
|
2005-08-14 04:53:35 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
/*
|
|
|
|
* Close dataset.
|
|
|
|
*/
|
|
|
|
ret = H5Dclose(datasetc);
|
|
|
|
VRFY((ret >= 0),"");
|
2005-10-18 23:47:13 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Dclose(dataseti);
|
|
|
|
VRFY((ret >= 0),"");
|
2005-08-14 04:53:35 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
/*
|
|
|
|
* Close the file.
|
|
|
|
*/
|
|
|
|
ret = H5Fclose(file);
|
|
|
|
VRFY((ret >= 0),"");
|
|
|
|
/*
|
|
|
|
* Close property list
|
|
|
|
*/
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
ret = H5Pclose(facc_plist);
|
2004-11-25 05:13:26 +08:00
|
|
|
VRFY((ret >= 0),"");
|
2005-10-18 23:47:13 +08:00
|
|
|
ret = H5Pclose(dxfer_plist);
|
2004-11-25 05:13:26 +08:00
|
|
|
VRFY((ret >= 0),"");
|
2005-10-18 23:47:13 +08:00
|
|
|
ret = H5Pclose(dcrt_plist);
|
2004-11-25 05:13:26 +08:00
|
|
|
VRFY((ret >= 0),"");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Open the file.
|
|
|
|
*/
|
|
|
|
|
2006-08-09 11:16:07 +08:00
|
|
|
/***
|
|
|
|
|
|
|
|
For testing collective hyperslab selection write
|
2005-10-18 23:47:13 +08:00
|
|
|
In this test, we are using independent read to check
|
2006-08-09 11:16:07 +08:00
|
|
|
the correctedness of collective write compared with
|
2005-10-18 23:47:13 +08:00
|
|
|
independent write,
|
|
|
|
|
|
|
|
In order to throughly test this feature, we choose
|
|
|
|
a different selection set for reading the data out.
|
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
***/
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* Obtain file access property list with MPI-IO driver */
|
|
|
|
facc_plist = create_faccess_plist(comm, info, facc_type, use_gpfs);
|
|
|
|
VRFY((facc_plist >= 0),"");
|
2005-08-14 04:53:35 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist);
|
2004-11-25 05:13:26 +08:00
|
|
|
VRFY((file >= 0),"H5Fopen succeeded");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Open the dataset.
|
|
|
|
*/
|
[svn-r14193] Description:
Make H5Dopen versioned and change all internal usage to use H5Dopen2
Add simple regression test for H5Dopen1
Tested on:
FreeBSD/32 6.2 (duty) in debug mode
FreeBSD/64 6.2 (liberty) w/C++ & FORTRAN, in debug mode
Linux/32 2.6 (kagiso) w/PGI compilers, w/C++ & FORTRAN, w/threadsafe,
in debug mode
Linux/64-amd64 2.6 (smirom) w/default API=1.6.x, w/C++ & FORTRAN,
in production mode
Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN,
in production mode
Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN,
w/szip filter, in production mode
Mac OS X/32 10.4.10 (amazon) in debug mode
2007-10-09 03:59:36 +08:00
|
|
|
datasetc = H5Dopen2(file,"collect_write", H5P_DEFAULT);
|
|
|
|
VRFY((datasetc >= 0),"H5Dopen2 succeeded");
|
2005-10-18 23:47:13 +08:00
|
|
|
|
[svn-r14193] Description:
Make H5Dopen versioned and change all internal usage to use H5Dopen2
Add simple regression test for H5Dopen1
Tested on:
FreeBSD/32 6.2 (duty) in debug mode
FreeBSD/64 6.2 (liberty) w/C++ & FORTRAN, in debug mode
Linux/32 2.6 (kagiso) w/PGI compilers, w/C++ & FORTRAN, w/threadsafe,
in debug mode
Linux/64-amd64 2.6 (smirom) w/default API=1.6.x, w/C++ & FORTRAN,
in production mode
Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN,
in production mode
Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN,
w/szip filter, in production mode
Mac OS X/32 10.4.10 (amazon) in debug mode
2007-10-09 03:59:36 +08:00
|
|
|
dataseti = H5Dopen2(file,"independ_write", H5P_DEFAULT);
|
|
|
|
VRFY((dataseti >= 0),"H5Dopen2 succeeded");
|
2005-08-14 04:53:35 +08:00
|
|
|
|
|
|
|
/*
|
2004-11-25 05:13:26 +08:00
|
|
|
* Get dataspace of the open dataset.
|
|
|
|
*/
|
2005-10-18 23:47:13 +08:00
|
|
|
fspaceid = H5Dget_space(datasetc);
|
2004-11-25 05:13:26 +08:00
|
|
|
VRFY((fspaceid >= 0),"file dataspace obtained succeeded");
|
|
|
|
|
|
|
|
fspaceid1 = H5Dget_space(dataseti);
|
|
|
|
VRFY((fspaceid1 >= 0),"file dataspace obtained succeeded");
|
|
|
|
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* The First selection for FILE to read
|
|
|
|
*
|
|
|
|
* block (1,1)
|
2006-08-09 11:16:07 +08:00
|
|
|
* stride(1.1)
|
2005-10-18 23:47:13 +08:00
|
|
|
* count (3,768/mpi_size)
|
|
|
|
* start (1,2+768*mpi_rank/mpi_size)
|
|
|
|
*
|
|
|
|
*/
|
2005-08-14 04:53:35 +08:00
|
|
|
start[0] = RFFHSTART0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
start[1] = RFFHSTART1+mpi_rank*RFFHCOUNT1;
|
2005-08-14 04:53:35 +08:00
|
|
|
block[0] = RFFHBLOCK0;
|
2004-11-25 05:13:26 +08:00
|
|
|
block[1] = RFFHBLOCK1;
|
2005-08-14 04:53:35 +08:00
|
|
|
stride[0] = RFFHSTRIDE0;
|
2004-11-25 05:13:26 +08:00
|
|
|
stride[1] = RFFHSTRIDE1;
|
2005-08-14 04:53:35 +08:00
|
|
|
count[0] = RFFHCOUNT0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
count[1] = RFFHCOUNT1;
|
2005-08-14 04:53:35 +08:00
|
|
|
|
2004-11-12 05:08:11 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* The first selection of the dataset generated by collective write */
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
2005-10-18 23:47:13 +08:00
|
|
|
|
|
|
|
/* The first selection of the dataset generated by independent write */
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* The Second selection for FILE to read
|
|
|
|
*
|
|
|
|
* block (1,1)
|
2006-08-09 11:16:07 +08:00
|
|
|
* stride(1.1)
|
2005-10-18 23:47:13 +08:00
|
|
|
* count (3,1536/mpi_size)
|
|
|
|
* start (2,4+1536*mpi_rank/mpi_size)
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
start[0] = RFSHSTART0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank;
|
2005-08-14 04:53:35 +08:00
|
|
|
block[0] = RFSHBLOCK0;
|
|
|
|
block[1] = RFSHBLOCK1;
|
|
|
|
stride[0] = RFSHSTRIDE0;
|
2004-11-25 05:13:26 +08:00
|
|
|
stride[1] = RFSHSTRIDE0;
|
2005-08-14 04:53:35 +08:00
|
|
|
count[0] = RFSHCOUNT0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
count[1] = RFSHCOUNT1;
|
2005-10-18 23:47:13 +08:00
|
|
|
|
|
|
|
/* The second selection of the dataset generated by collective write */
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
2005-10-18 23:47:13 +08:00
|
|
|
|
|
|
|
/* The second selection of the dataset generated by independent write */
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create memory dataspace.
|
2005-10-18 23:47:13 +08:00
|
|
|
* rank = 2
|
|
|
|
* mdim1 = 9
|
|
|
|
* mdim2 = 3600
|
|
|
|
*
|
2004-11-25 05:13:26 +08:00
|
|
|
*/
|
|
|
|
mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
|
|
|
|
|
2005-08-14 04:53:35 +08:00
|
|
|
/*
|
2004-11-25 05:13:26 +08:00
|
|
|
* Select two hyperslabs in memory. Hyperslabs has the same
|
2005-10-18 23:47:13 +08:00
|
|
|
* size and shape as the selected hyperslabs for the file dataspace
|
|
|
|
* Only the starting point is different.
|
|
|
|
* The first selection
|
|
|
|
* block (1,1)
|
2006-08-09 11:16:07 +08:00
|
|
|
* stride(1.1)
|
2005-10-18 23:47:13 +08:00
|
|
|
* count (3,768/mpi_size)
|
|
|
|
* start (0,768*mpi_rank/mpi_size)
|
|
|
|
*
|
2004-11-25 05:13:26 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
2005-08-14 04:53:35 +08:00
|
|
|
start[0] = RMFHSTART0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
start[1] = RMFHSTART1+mpi_rank*RMFHCOUNT1;
|
2005-08-14 04:53:35 +08:00
|
|
|
block[0] = RMFHBLOCK0;
|
2004-11-25 05:13:26 +08:00
|
|
|
block[1] = RMFHBLOCK1;
|
2005-08-14 04:53:35 +08:00
|
|
|
stride[0] = RMFHSTRIDE0;
|
2004-11-25 05:13:26 +08:00
|
|
|
stride[1] = RMFHSTRIDE1;
|
2005-08-14 04:53:35 +08:00
|
|
|
count[0] = RMFHCOUNT0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
count[1] = RMFHCOUNT1;
|
2005-10-18 23:47:13 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/*
|
|
|
|
* Select two hyperslabs in memory. Hyperslabs has the same
|
|
|
|
* size and shape as the selected hyperslabs for the file dataspace
|
|
|
|
* Only the starting point is different.
|
|
|
|
* The second selection
|
|
|
|
* block (1,1)
|
2006-08-09 11:16:07 +08:00
|
|
|
* stride(1,1)
|
2005-10-18 23:47:13 +08:00
|
|
|
* count (3,1536/mpi_size)
|
|
|
|
* start (1,2+1536*mpi_rank/mpi_size)
|
|
|
|
*
|
|
|
|
*/
|
2005-08-14 04:53:35 +08:00
|
|
|
start[0] = RMSHSTART0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
start[1] = RMSHSTART1+mpi_rank*RMSHCOUNT1;
|
2005-08-14 04:53:35 +08:00
|
|
|
block[0] = RMSHBLOCK0;
|
2004-11-25 05:13:26 +08:00
|
|
|
block[1] = RMSHBLOCK1;
|
2005-08-14 04:53:35 +08:00
|
|
|
stride[0] = RMSHSTRIDE0;
|
2004-11-25 05:13:26 +08:00
|
|
|
stride[1] = RMSHSTRIDE1;
|
2005-08-14 04:53:35 +08:00
|
|
|
count[0] = RMSHCOUNT0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
count[1] = RMSHCOUNT1;
|
2005-08-14 04:53:35 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-08-14 04:53:35 +08:00
|
|
|
/*
|
2004-11-25 05:13:26 +08:00
|
|
|
* Initialize data buffer.
|
|
|
|
*/
|
2005-08-14 04:53:35 +08:00
|
|
|
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
HDmemset(matrix_out,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
|
|
|
|
HDmemset(matrix_out1,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
|
2004-11-25 05:13:26 +08:00
|
|
|
/*
|
|
|
|
* Read data back to the buffer matrix_out.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid,
|
|
|
|
H5P_DEFAULT, matrix_out);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"H5D independent read succeed");
|
|
|
|
|
2006-08-09 11:16:07 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid,
|
|
|
|
H5P_DEFAULT, matrix_out1);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"H5D independent read succeed");
|
2004-11-25 05:13:26 +08:00
|
|
|
|
|
|
|
ret = 0;
|
2005-10-18 23:47:13 +08:00
|
|
|
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
for (i = 0; i < MSPACE_DIM1*MSPACE_DIM2*mpi_size; i++){
|
|
|
|
if(matrix_out[i]!=matrix_out1[i]) ret = -1;
|
2004-11-25 05:13:26 +08:00
|
|
|
if(ret < 0) break;
|
2004-11-12 05:08:11 +08:00
|
|
|
}
|
2006-08-09 11:16:07 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
VRFY((ret >= 0),"H5D irregular collective write succeed");
|
2005-08-14 04:53:35 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
/*
|
|
|
|
* Close memory file and memory dataspaces.
|
2005-08-14 04:53:35 +08:00
|
|
|
*/
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sclose(mspaceid);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"");
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sclose(fspaceid);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"");
|
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
/*
|
|
|
|
* Close dataset.
|
2005-08-14 04:53:35 +08:00
|
|
|
*/
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Dclose(dataseti);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"");
|
2004-11-12 05:08:11 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Dclose(datasetc);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"");
|
2005-10-18 23:47:13 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
/*
|
|
|
|
* Close property list
|
|
|
|
*/
|
2004-11-12 05:08:11 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
ret = H5Pclose(facc_plist);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"");
|
|
|
|
|
2004-11-12 05:08:11 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
/*
|
|
|
|
* Close the file.
|
2005-08-14 04:53:35 +08:00
|
|
|
*/
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Fclose(file);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"");
|
2004-11-12 05:08:11 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
return ;
|
|
|
|
}
|
2004-11-12 05:08:11 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
* Function: coll_read_test
|
|
|
|
*
|
|
|
|
* Purpose: To test the collectively irregular hyperslab read in chunk
|
|
|
|
storage
|
|
|
|
* Input: number of chunks on each dimension
|
2006-08-09 11:16:07 +08:00
|
|
|
if number is equal to 0, contiguous storage
|
2005-10-18 23:47:13 +08:00
|
|
|
* Return: Success: 0
|
|
|
|
*
|
|
|
|
* Failure: -1
|
|
|
|
*
|
|
|
|
* Programmer: Unknown
|
|
|
|
* Dec 2nd, 2004
|
|
|
|
*
|
|
|
|
* Modifications: Oct 18th, 2005
|
2006-08-09 11:16:07 +08:00
|
|
|
* Note: This test must be used with the correpsonding
|
|
|
|
coll_write_test.
|
2005-10-18 23:47:13 +08:00
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
2004-12-09 07:33:40 +08:00
|
|
|
void coll_read_test(int chunk_factor)
|
2004-11-25 05:13:26 +08:00
|
|
|
{
|
2004-11-12 05:08:11 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
const char *filename;
|
|
|
|
hid_t facc_plist,dxfer_plist;
|
2004-11-25 05:13:26 +08:00
|
|
|
hid_t file, dataseti; /* File and dataset identifiers */
|
2005-10-18 23:47:13 +08:00
|
|
|
hid_t mspaceid, fspaceid1; /* Dataspace identifiers */
|
2005-08-14 04:53:35 +08:00
|
|
|
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
/* Dimension sizes of the dataset (on disk) */
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
#if 0
|
2005-08-14 04:53:35 +08:00
|
|
|
hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the
|
2004-11-25 05:13:26 +08:00
|
|
|
dataset in memory when we
|
|
|
|
read selection from the
|
|
|
|
dataset on the disk */
|
2004-11-12 05:08:11 +08:00
|
|
|
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
#endif
|
2006-08-09 11:16:07 +08:00
|
|
|
hsize_t mdim[2];
|
2005-10-18 23:47:13 +08:00
|
|
|
hsize_t start[2]; /* Start of hyperslab */
|
2004-11-25 05:13:26 +08:00
|
|
|
hsize_t stride[2]; /* Stride of hyperslab */
|
|
|
|
hsize_t count[2]; /* Block count */
|
|
|
|
hsize_t block[2]; /* Block sizes */
|
|
|
|
herr_t ret;
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
unsigned i,j;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
|
|
|
|
int *matrix_out;
|
|
|
|
int *matrix_out1;
|
|
|
|
#if 0
|
2005-10-18 23:47:13 +08:00
|
|
|
int matrix_out[MSPACE_DIM1][MSPACE_DIM2];
|
|
|
|
int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the
|
|
|
|
dataset */
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
|
|
|
|
#endif
|
2005-10-18 23:47:13 +08:00
|
|
|
hbool_t use_gpfs = FALSE;
|
|
|
|
int mpi_size,mpi_rank;
|
2004-11-25 05:13:26 +08:00
|
|
|
|
|
|
|
MPI_Comm comm = MPI_COMM_WORLD;
|
|
|
|
MPI_Info info = MPI_INFO_NULL;
|
|
|
|
|
|
|
|
/*set up MPI parameters */
|
|
|
|
MPI_Comm_size(comm,&mpi_size);
|
|
|
|
MPI_Comm_rank(comm,&mpi_rank);
|
2004-11-12 05:08:11 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
|
|
|
|
/* Obtain file name */
|
2005-08-14 04:53:35 +08:00
|
|
|
filename = GetTestParameters();
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
|
|
|
|
|
|
|
|
/* Initialize the buffer */
|
2006-08-09 11:16:07 +08:00
|
|
|
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
mdim[0] = MSPACE_DIM1;
|
|
|
|
mdim[1] = MSPACE_DIM2*mpi_size;
|
|
|
|
matrix_out =(int*)HDmalloc(sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
|
|
|
|
matrix_out1=(int*)HDmalloc(sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/*** For testing collective hyperslab selection read ***/
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* Obtain file access property list */
|
|
|
|
facc_plist = create_faccess_plist(comm, info, facc_type, use_gpfs);
|
|
|
|
VRFY((facc_plist >= 0),"");
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/*
|
2004-11-25 05:13:26 +08:00
|
|
|
* Open the file.
|
|
|
|
*/
|
2005-10-18 23:47:13 +08:00
|
|
|
file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist);
|
2004-11-25 05:13:26 +08:00
|
|
|
VRFY((file >= 0),"H5Fopen succeeded");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Open the dataset.
|
|
|
|
*/
|
[svn-r14193] Description:
Make H5Dopen versioned and change all internal usage to use H5Dopen2
Add simple regression test for H5Dopen1
Tested on:
FreeBSD/32 6.2 (duty) in debug mode
FreeBSD/64 6.2 (liberty) w/C++ & FORTRAN, in debug mode
Linux/32 2.6 (kagiso) w/PGI compilers, w/C++ & FORTRAN, w/threadsafe,
in debug mode
Linux/64-amd64 2.6 (smirom) w/default API=1.6.x, w/C++ & FORTRAN,
in production mode
Linux/64-ia64 2.6 (cobalt) w/Intel compilers, w/C++ & FORTRAN,
in production mode
Solaris/32 2.10 (linew) w/deprecated symbols disabled, w/C++ & FORTRAN,
w/szip filter, in production mode
Mac OS X/32 10.4.10 (amazon) in debug mode
2007-10-09 03:59:36 +08:00
|
|
|
dataseti = H5Dopen2(file,"independ_write", H5P_DEFAULT);
|
|
|
|
VRFY((dataseti >= 0),"H5Dopen2 succeeded");
|
2005-08-14 04:53:35 +08:00
|
|
|
|
|
|
|
/*
|
2004-11-25 05:13:26 +08:00
|
|
|
* Get dataspace of the open dataset.
|
|
|
|
*/
|
|
|
|
fspaceid1 = H5Dget_space(dataseti);
|
|
|
|
VRFY((fspaceid1 >= 0),"file dataspace obtained succeeded");
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* The First selection for FILE to read
|
|
|
|
*
|
|
|
|
* block (1,1)
|
2006-08-09 11:16:07 +08:00
|
|
|
* stride(1.1)
|
2005-10-18 23:47:13 +08:00
|
|
|
* count (3,768/mpi_size)
|
|
|
|
* start (1,2+768*mpi_rank/mpi_size)
|
|
|
|
*
|
|
|
|
*/
|
2005-08-14 04:53:35 +08:00
|
|
|
start[0] = RFFHSTART0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
start[1] = RFFHSTART1+mpi_rank*RFFHCOUNT1;
|
2005-08-14 04:53:35 +08:00
|
|
|
block[0] = RFFHBLOCK0;
|
2004-11-25 05:13:26 +08:00
|
|
|
block[1] = RFFHBLOCK1;
|
2005-08-14 04:53:35 +08:00
|
|
|
stride[0] = RFFHSTRIDE0;
|
2004-11-25 05:13:26 +08:00
|
|
|
stride[1] = RFFHSTRIDE1;
|
2005-08-14 04:53:35 +08:00
|
|
|
count[0] = RFFHCOUNT0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
count[1] = RFFHCOUNT1;
|
2005-08-14 04:53:35 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* The Second selection for FILE to read
|
|
|
|
*
|
|
|
|
* block (1,1)
|
2006-08-09 11:16:07 +08:00
|
|
|
* stride(1.1)
|
2005-10-18 23:47:13 +08:00
|
|
|
* count (3,1536/mpi_size)
|
|
|
|
* start (2,4+1536*mpi_rank/mpi_size)
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
start[0] = RFSHSTART0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank;
|
2005-10-18 23:47:13 +08:00
|
|
|
block[0] = RFSHBLOCK0;
|
|
|
|
block[1] = RFSHBLOCK1;
|
2005-08-14 04:53:35 +08:00
|
|
|
stride[0] = RFSHSTRIDE0;
|
2004-11-25 05:13:26 +08:00
|
|
|
stride[1] = RFSHSTRIDE0;
|
2005-08-14 04:53:35 +08:00
|
|
|
count[0] = RFSHCOUNT0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
count[1] = RFSHCOUNT1;
|
2004-11-25 05:13:26 +08:00
|
|
|
|
|
|
|
ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create memory dataspace.
|
|
|
|
*/
|
|
|
|
mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
|
|
|
|
|
2005-08-14 04:53:35 +08:00
|
|
|
/*
|
2004-11-25 05:13:26 +08:00
|
|
|
* Select two hyperslabs in memory. Hyperslabs has the same
|
|
|
|
* size and shape as the selected hyperslabs for the file dataspace.
|
2005-10-18 23:47:13 +08:00
|
|
|
* Only the starting point is different.
|
|
|
|
* The first selection
|
|
|
|
* block (1,1)
|
2006-08-09 11:16:07 +08:00
|
|
|
* stride(1.1)
|
2005-10-18 23:47:13 +08:00
|
|
|
* count (3,768/mpi_size)
|
|
|
|
* start (0,768*mpi_rank/mpi_size)
|
|
|
|
*
|
2004-11-25 05:13:26 +08:00
|
|
|
*/
|
|
|
|
|
2005-08-14 04:53:35 +08:00
|
|
|
start[0] = RMFHSTART0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
start[1] = RMFHSTART1+mpi_rank*RMFHCOUNT1;
|
2005-08-14 04:53:35 +08:00
|
|
|
block[0] = RMFHBLOCK0;
|
2004-11-25 05:13:26 +08:00
|
|
|
block[1] = RMFHBLOCK1;
|
2005-08-14 04:53:35 +08:00
|
|
|
stride[0] = RMFHSTRIDE0;
|
2004-11-25 05:13:26 +08:00
|
|
|
stride[1] = RMFHSTRIDE1;
|
2005-08-14 04:53:35 +08:00
|
|
|
count[0] = RMFHCOUNT0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
count[1] = RMFHCOUNT1;
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/*
|
|
|
|
* Select two hyperslabs in memory. Hyperslabs has the same
|
|
|
|
* size and shape as the selected hyperslabs for the file dataspace
|
|
|
|
* Only the starting point is different.
|
|
|
|
* The second selection
|
|
|
|
* block (1,1)
|
2006-08-09 11:16:07 +08:00
|
|
|
* stride(1,1)
|
2005-10-18 23:47:13 +08:00
|
|
|
* count (3,1536/mpi_size)
|
|
|
|
* start (1,2+1536*mpi_rank/mpi_size)
|
|
|
|
*
|
|
|
|
*/
|
2005-08-14 04:53:35 +08:00
|
|
|
start[0] = RMSHSTART0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
start[1] = RMSHSTART1+mpi_rank*RMSHCOUNT1;
|
2005-08-14 04:53:35 +08:00
|
|
|
block[0] = RMSHBLOCK0;
|
2004-11-25 05:13:26 +08:00
|
|
|
block[1] = RMSHBLOCK1;
|
2005-08-14 04:53:35 +08:00
|
|
|
stride[0] = RMSHSTRIDE0;
|
2004-11-25 05:13:26 +08:00
|
|
|
stride[1] = RMSHSTRIDE1;
|
2005-08-14 04:53:35 +08:00
|
|
|
count[0] = RMSHCOUNT0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
count[1] = RMSHCOUNT1;
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"hyperslab selection succeeded");
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
|
2005-08-14 04:53:35 +08:00
|
|
|
/*
|
2004-11-25 05:13:26 +08:00
|
|
|
* Initialize data buffer.
|
|
|
|
*/
|
2005-10-18 23:47:13 +08:00
|
|
|
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
HDmemset(matrix_out,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
|
|
|
|
HDmemset(matrix_out1,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
|
2005-08-14 04:53:35 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
/*
|
|
|
|
* Read data back to the buffer matrix_out.
|
|
|
|
*/
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
dxfer_plist = H5Pcreate(H5P_DATASET_XFER);
|
|
|
|
VRFY((dxfer_plist >= 0),"");
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"MPIO data transfer property list succeed");
|
2006-08-09 11:16:07 +08:00
|
|
|
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
|
|
|
|
ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
|
|
|
|
VRFY((ret>= 0),"set independent IO collectively succeeded");
|
|
|
|
}
|
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* Collective read */
|
|
|
|
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1,
|
|
|
|
dxfer_plist, matrix_out);
|
|
|
|
VRFY((ret >= 0),"H5D collecive read succeed");
|
2004-11-25 05:13:26 +08:00
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
ret = H5Pclose(dxfer_plist);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"");
|
|
|
|
|
2005-10-18 23:47:13 +08:00
|
|
|
/* Independent read */
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1,
|
|
|
|
H5P_DEFAULT, matrix_out1);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"H5D independent read succeed");
|
2005-10-18 23:47:13 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = 0;
|
[svn-r12142] Purpose:
change the array size of collective chunking features of parallel tests.
Description:
Previously array size for collective optimization tests
including
cchunk1,
cchunk2,
cchunk3,
cchunk4,
ccontw,
ccontr,
cschunkw,
cschunkr,
ccchunkw,
ccchunkr
are fixed,
They are only valid for some good number of processors(1,2,3,4,6,8,12,16,24,32,48 etc).
Recently there are more requests for parallel tests to be valid on some odd number of processes such as 5,7,11,13 etc.
Solution:
I change the array size to be dynamic rather than static. Now the fastest change array size is a function of mpi_size. dim2 = constant *mpi_size. After some tunings, theoretically the above tests should be valid for any number of processors. However, other parallel tests still need to be tuned.
To verify the correctness of these tests, using mpirun -np 5 ./testphdf5 -b cchunk1 at heping.
Platforms tested:
h5committest(shanti is refused to be connected)
at heping, 5 and 7 processes are used to verify the correctness.
Misc. update:
2006-03-23 10:50:09 +08:00
|
|
|
for (i = 0; i < MSPACE_DIM1*MSPACE_DIM2*mpi_size; i++){
|
|
|
|
if(matrix_out[i]!=matrix_out1[i])ret = -1;
|
2004-11-25 05:13:26 +08:00
|
|
|
if(ret < 0) break;
|
|
|
|
}
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"H5D contiguous irregular collective read succeed");
|
2004-11-25 05:13:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Close memory file and memory dataspaces.
|
2005-08-14 04:53:35 +08:00
|
|
|
*/
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sclose(mspaceid);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"");
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Sclose(fspaceid1);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"");
|
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
/*
|
|
|
|
* Close dataset.
|
2005-08-14 04:53:35 +08:00
|
|
|
*/
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Dclose(dataseti);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"");
|
2005-10-18 23:47:13 +08:00
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
/*
|
|
|
|
* Close property list
|
|
|
|
*/
|
2005-10-18 23:47:13 +08:00
|
|
|
ret = H5Pclose(facc_plist);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"");
|
|
|
|
|
2004-11-25 05:13:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Close the file.
|
2005-08-14 04:53:35 +08:00
|
|
|
*/
|
2004-11-25 05:13:26 +08:00
|
|
|
ret = H5Fclose(file);
|
2005-08-14 04:53:35 +08:00
|
|
|
VRFY((ret >= 0),"");
|
2004-11-25 05:13:26 +08:00
|
|
|
|
|
|
|
return ;
|
2004-11-12 05:08:11 +08:00
|
|
|
}
|