mirror of
https://github.com/HDFGroup/hdf5.git
synced 2024-11-27 02:10:55 +08:00
427ff7da28
Bug Fix/Code Cleanup/Doc Cleanup/Optimization/Branch Sync :-) Description: Generally speaking, this is the "signed->unsigned" change to selections. However, in the process of merging code back, things got stickier and stickier until I ended up doing a big "sync the two branches up" operation. So... I brought back all the "infrastructure" fixes from the development branch to the release branch (which I think were actually making some improvement in performance) as well as fixed several bugs which had been fixed in one branch, but not the other. I've also tagged the repository before making this checkin with the label "before_signed_unsigned_changes". Platforms tested: FreeBSD 4.10 (sleipnir) w/parallel & fphdf5 FreeBSD 4.10 (sleipnir) w/threadsafe FreeBSD 4.10 (sleipnir) w/backward compatibility Solaris 2.7 (arabica) w/"purify options" Solaris 2.8 (sol) w/FORTRAN & C++ AIX 5.x (copper) w/parallel & FORTRAN IRIX64 6.5 (modi4) w/FORTRAN Linux 2.4 (heping) w/FORTRAN & C++ Misc. update:
416 lines
12 KiB
Plaintext
416 lines
12 KiB
Plaintext
Installation instructions for Parallel HDF5
|
|
-------------------------------------------
|
|
|
|
|
|
1. Overview
|
|
-----------
|
|
|
|
This file contains instructions for the installation of parallel HDF5 (PHDF5).
|
|
PHDF5 requires an MPI compiler with MPI-IO support and a parallel file system.
|
|
If you don't know yet, you should first consult with your system support staff
|
|
of information how to compile an MPI program, how to run an MPI application,
|
|
and how to access the parallel file system. There are sample MPI-IO C and
|
|
Fortran programs in the section of "Sample programs". You can use them to
|
|
run simple tests of your MPI compilers and the parallel file system.
|
|
|
|
If you still have difficulties installing PHDF5 in your system, please
|
|
send mail to
|
|
hdfhelp@ncsa.uiuc.edu
|
|
|
|
In your mail, please include the output of "uname -a". If you have run the
|
|
"configure" command, attach the output of the command and the content of
|
|
the file "config.log".
|
|
|
|
|
|
2. Quick Instruction for known systems
|
|
--------------------------------------
|
|
|
|
The following shows particular steps to run the parallel HDF5
|
|
configure for a few machines we've tested. If your particular platform
|
|
is not shown or somehow the steps do not work for yours, please go
|
|
to the next section for more detailed explanations.
|
|
|
|
------
|
|
Know parallel compilers
|
|
------
|
|
|
|
HDF5 knows several parallel compilers: mpicc, hcc, mpcc, mpcc_r.
|
|
To build parallel HDF5 with one of the above, just set CC as it
|
|
and configure. The "--enable-parallel" is optional in this case.
|
|
|
|
$ CC=/usr/local/mpi/bin/mpicc ./configure --prefix=<install-directory>
|
|
$ make
|
|
$ make check
|
|
$ make install
|
|
|
|
|
|
------
|
|
TFLOPS
|
|
------
|
|
|
|
Follow the instructions in INSTALL_TFLOPS.
|
|
|
|
-------
|
|
IBM SP
|
|
-------
|
|
|
|
First of all, make sure your environment variables are set correctly
|
|
to compile and execute a single process mpi applications for the SP
|
|
machine. Unfortunately, the setting varies from machine to machine.
|
|
E.g., the following works for the Blue machine of LLNL.
|
|
|
|
setenv MP_PROCS 1
|
|
setenv MP_NODES 1
|
|
setenv MP_LABELIO no
|
|
setenv MP_RMPOOL 0
|
|
setenv LLNL_COMPILE_SINGLE_THREADED TRUE # for LLNL site only
|
|
|
|
The shared library configuration for this version is broken. So, only
|
|
static library is supported.
|
|
|
|
Then do the following steps:
|
|
|
|
$ ./configure --disable-shared --prefix=<install-directory>
|
|
$ make # build the library
|
|
$ make check # verify the correctness
|
|
$ make install
|
|
|
|
|
|
We also suggest that you add "-qxlf90=autodealloc" to FFLAGS when
|
|
building parallel with fortran enabled. This can be done by invoking:
|
|
|
|
setenv FFLAGS -qxlf90=autodealloc # 32 bit build
|
|
|
|
or
|
|
|
|
setenv FFLAGS "-q64 -qxlf90=autodealloc" # 64 bit build
|
|
|
|
prior to running configure. Recall that the "-q64" is necessary
|
|
for 64 bit builds.
|
|
|
|
---------------
|
|
SGI Origin 2000
|
|
Cray T3E
|
|
(where MPI-IO is part of system MPI library such as the mpt module)
|
|
---------------
|
|
|
|
#!/bin/sh
|
|
|
|
RUNPARALLEL="mpirun -np 3"
|
|
export RUNPARALLEL
|
|
LIBS="-lmpi"
|
|
export LIBS
|
|
./configure --enable-parallel --prefix=$PWD/installdir
|
|
make
|
|
make check
|
|
make install
|
|
|
|
|
|
***Known problem***
|
|
Some O2K system may encounter an error during make.
|
|
ld32: FATAL 9: I/O error (-lmpi): No such file or directory
|
|
|
|
This is because libtool tries too hard to locate the loader 'ld'
|
|
but ends up with one that does not know where to find the right
|
|
version of libmpi.a for the particular ABI requested.
|
|
The fix is to edit the file 'libtool' at the top of the build directory.
|
|
Search for a string that looks like the following:
|
|
LD="/opt/MIPSpro/MIPSpro_default/opt/MIPSpro/bin/ld -n32"
|
|
|
|
Replace it with something that knows how to find the right libmpi.a.
|
|
E.g.,
|
|
LD="/opt/MIPSpro/MIPSpro_default/opt/MIPSpro/bin/cc -n32"
|
|
|
|
Or you can pre-empt it by setting LD at configure time
|
|
$ LD="cc" ./configure --enable-parallel ...
|
|
|
|
|
|
---------------
|
|
SGI Origin 2000
|
|
Cray T3E
|
|
(where MPI-IO is not part of system MPI library or I want to use my own
|
|
version of MPIO)
|
|
---------------
|
|
|
|
mpi1_inc="" #mpi-1 include
|
|
mpi1_lib="" #mpi-1 library
|
|
mpio_inc=-I$HOME/ROMIO/include #mpio include
|
|
mpio_lib="-L$HOME/ROMIO/lib/IRIX64" #mpio library
|
|
|
|
MPI_INC="$mpio_inc $mpi1_inc"
|
|
MPI_LIB="$mpio_lib $mpi1_lib"
|
|
|
|
#for version 1.1
|
|
CPPFLAGS=$MPI_INC
|
|
export CPPFLAGS
|
|
LDFLAGS=$MPI_LIB
|
|
export LDFLAGS
|
|
RUNPARALLEL="mpirun -np 3"
|
|
export RUNPARALLEL
|
|
LIBS="-lmpio -lmpi"
|
|
export LIBS
|
|
|
|
./configure --enable-parallel --prefix=$PWD/installdir
|
|
make
|
|
make check
|
|
make install
|
|
|
|
|
|
---------------------
|
|
Linux 2.4 and greater
|
|
---------------------
|
|
|
|
Be sure that your installation of MPICH was configured with the following
|
|
configuration command-line option:
|
|
|
|
-cflags="-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64"
|
|
|
|
This allows for >2GB sized files on Linux systems and is only available
|
|
with Linux kernels 2.4 and greater.
|
|
|
|
|
|
------------------
|
|
HP V2500 and N4000
|
|
------------------
|
|
|
|
Follow the instructions in section 3.
|
|
|
|
|
|
3. Detail explanation
|
|
---------------------
|
|
|
|
The HDF5 library can be configured to use MPI and MPI-IO for parallelism
|
|
on a distributed multi-processor system. The easiest way to do this is to
|
|
have a properly installed parallel compiler (e.g., MPICH's mpicc or IBM's
|
|
mpcc_r) and supply that executable as the value of the CC environment
|
|
variable. For examples,
|
|
|
|
$ CC=mpcc_r ./configure
|
|
|
|
$ CC=/usr/local/mpi/bin/mpicc ./configure
|
|
|
|
If no such wrapper script is available then you must specify your normal
|
|
C compiler along with the distribution of MPI/MPI-IO which is to be used
|
|
(values other than `mpich' will be added at a later date):
|
|
|
|
$ ./configure --enable-parallel=mpich
|
|
|
|
If the MPI/MPI-IO include files and/or libraries cannot be found by the
|
|
compiler then their directories must be given as arguments to CPPFLAGS
|
|
and/or LDFLAGS:
|
|
|
|
$ CPPFLAGS=-I/usr/local/mpi/include \
|
|
LDFLAGS=-L/usr/local/mpi/lib/LINUX/ch_p4 \
|
|
./configure --enable-parallel=mpich
|
|
|
|
If a parallel library is being built then configure attempts to determine
|
|
how to run a parallel application on one processor and on many
|
|
processors. If the compiler is `mpicc' and the user hasn't specified
|
|
values for RUNSERIAL and RUNPARALLEL then configure chooses `mpirun' from
|
|
the same directory as `mpicc':
|
|
|
|
RUNSERIAL: /usr/local/mpi/bin/mpirun -np 1
|
|
RUNPARALLEL: /usr/local/mpi/bin/mpirun -np $${NPROCS:=3}
|
|
|
|
The `$${NPROCS:=3}' will be substituted with the value of the NPROCS
|
|
environment variable at the time `make check' is run (or the value 3).
|
|
|
|
|
|
4. Parallel tests
|
|
-----------------
|
|
|
|
The testpar/ directory contains tests for Parallel HDF5 and MPI-IO.
|
|
The t_mpi tests the basic functionalities of some MPI-IO features used by
|
|
Parallel HDF5. It usually exits with non-zero code if a required MPI-IO
|
|
feature does not succeed as expected. One exception is the testing of
|
|
accessing files larger than 2GB. If the underlaying filesystem or if
|
|
the MPI-IO library fails to handle file sizes larger than 2GB, the test
|
|
will print informational messages stating the failure but will not exit
|
|
with non-zero code. Failure to support file size greater than 2GB is
|
|
not a fatal error for HDF5 because HDF5 can use other file-drivers such
|
|
as families of files to by pass the file size limit.
|
|
|
|
By default, the parallel tests use the current directory as the test directory.
|
|
This can be changed by the environment variable $HDF5_PARAPREFIX.
|
|
For example, if the tests should use directory /PFS/user/me, do
|
|
HDF5_PARAPREFIX=/PFS/user/me
|
|
export HDF5_PARAPREFIX
|
|
make check
|
|
|
|
(In some batch job system, you many need to hardset HDF5_PARAPREFIX in
|
|
the shell initial files like .profile, .cshrc, etc.)
|
|
|
|
|
|
5. Sample programs
|
|
------------------
|
|
|
|
Here are sample MPI-IO C and Fortran programs. You may use them to run simple
|
|
tests of your MPI compilers and the parallel file system. The MPI commands
|
|
used here are mpicc, mpif90 and mpirun. Replace them with the commands of
|
|
your system.
|
|
|
|
The programs assume they run in the parallel file system. Thus they create
|
|
the test data file in the current directory. If the parallel file system
|
|
is somewhere else, you need to run the sample programs there or edit the
|
|
programs to use a different file name.
|
|
|
|
Example compiling and running:
|
|
|
|
% mpicc Sample_mpio.c -o c.out
|
|
% mpirun -np 4 c.out
|
|
|
|
% mpif90 Sample_mpio.f90 -o f.out
|
|
% mpirun -np 4 f.out
|
|
|
|
|
|
==> Sample_mpio.c <==
|
|
/* Simple MPI-IO program testing if a parallel file can be created.
|
|
* Default filename can be specified via first program argument.
|
|
* Each process writes something, then reads all data back.
|
|
*/
|
|
|
|
#include <mpi.h>
|
|
#ifndef MPI_FILE_NULL /*MPIO may be defined in mpi.h already */
|
|
# include <mpio.h>
|
|
#endif
|
|
|
|
#define DIMSIZE 10 /* dimension size, avoid powers of 2. */
|
|
#define PRINTID printf("Proc %d: ", mpi_rank)
|
|
|
|
main(int ac, char **av)
|
|
{
|
|
char hostname[128];
|
|
int mpi_size, mpi_rank;
|
|
MPI_File fh;
|
|
char *filename = "./mpitest.data";
|
|
char mpi_err_str[MPI_MAX_ERROR_STRING];
|
|
int mpi_err_strlen;
|
|
int mpi_err;
|
|
char writedata[DIMSIZE], readdata[DIMSIZE];
|
|
char expect_val;
|
|
int i, irank;
|
|
int nerrors = 0; /* number of errors */
|
|
MPI_Offset mpi_off;
|
|
MPI_Status mpi_stat;
|
|
|
|
MPI_Init(&ac, &av);
|
|
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
|
|
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
|
|
|
|
/* get file name if provided */
|
|
if (ac > 1){
|
|
filename = *++av;
|
|
}
|
|
if (mpi_rank==0){
|
|
printf("Testing simple MPIO program with %d processes accessing file %s\n",
|
|
mpi_size, filename);
|
|
printf(" (Filename can be specified via program argument)\n");
|
|
}
|
|
|
|
/* show the hostname so that we can tell where the processes are running */
|
|
if (gethostname(hostname, 128) < 0){
|
|
PRINTID;
|
|
printf("gethostname failed\n");
|
|
return 1;
|
|
}
|
|
PRINTID;
|
|
printf("hostname=%s\n", hostname);
|
|
|
|
if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
|
|
MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE,
|
|
MPI_INFO_NULL, &fh))
|
|
!= MPI_SUCCESS){
|
|
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
|
|
PRINTID;
|
|
printf("MPI_File_open failed (%s)\n", mpi_err_str);
|
|
return 1;
|
|
}
|
|
|
|
/* each process writes some data */
|
|
for (i=0; i < DIMSIZE; i++)
|
|
writedata[i] = mpi_rank*DIMSIZE + i;
|
|
mpi_off = mpi_rank*DIMSIZE;
|
|
if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE, MPI_BYTE,
|
|
&mpi_stat))
|
|
!= MPI_SUCCESS){
|
|
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
|
|
PRINTID;
|
|
printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
|
|
(long) mpi_off, (int) DIMSIZE, mpi_err_str);
|
|
return 1;
|
|
};
|
|
|
|
/* make sure all processes has done writing. */
|
|
MPI_Barrier(MPI_COMM_WORLD);
|
|
|
|
/* each process reads all data and verify. */
|
|
for (irank=0; irank < mpi_size; irank++){
|
|
mpi_off = irank*DIMSIZE;
|
|
if ((mpi_err = MPI_File_read_at(fh, mpi_off, readdata, DIMSIZE, MPI_BYTE,
|
|
&mpi_stat))
|
|
!= MPI_SUCCESS){
|
|
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
|
|
PRINTID;
|
|
printf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n",
|
|
(long) mpi_off, (int) DIMSIZE, mpi_err_str);
|
|
return 1;
|
|
};
|
|
for (i=0; i < DIMSIZE; i++){
|
|
expect_val = irank*DIMSIZE + i;
|
|
if (readdata[i] != expect_val){
|
|
PRINTID;
|
|
printf("read data[%d:%d] got %d, expect %d\n", irank, i,
|
|
readdata[i], expect_val);
|
|
nerrors++;
|
|
}
|
|
}
|
|
}
|
|
if (nerrors)
|
|
return 1;
|
|
|
|
MPI_File_close(&fh);
|
|
|
|
PRINTID;
|
|
printf("all tests passed\n");
|
|
|
|
MPI_Finalize();
|
|
return 0;
|
|
}
|
|
|
|
==> Sample_mpio.f90 <==
|
|
!
|
|
! The following example demonstrates how to create and close a parallel
|
|
! file using MPI-IO calls.
|
|
!
|
|
! USE MPI is the proper way to bring in MPI definitions but many
|
|
! MPI Fortran compiler supports the pseudo standard of INCLUDE.
|
|
! So, HDF5 uses the INCLUDE statement instead.
|
|
!
|
|
|
|
PROGRAM MPIOEXAMPLE
|
|
|
|
! USE MPI
|
|
|
|
IMPLICIT NONE
|
|
|
|
INCLUDE 'mpif.h'
|
|
|
|
CHARACTER(LEN=80), PARAMETER :: filename = "filef.h5" ! File name
|
|
INTEGER :: ierror ! Error flag
|
|
INTEGER :: fh ! File handle
|
|
INTEGER :: amode ! File access mode
|
|
|
|
call MPI_INIT(ierror)
|
|
amode = MPI_MODE_RDWR + MPI_MODE_CREATE + MPI_MODE_DELETE_ON_CLOSE
|
|
call MPI_FILE_OPEN(MPI_COMM_WORLD, filename, amode, MPI_INFO_NULL, fh, ierror)
|
|
print *, "Trying to create ", filename
|
|
if ( ierror .eq. MPI_SUCCESS ) then
|
|
print *, "MPI_FILE_OPEN succeeded"
|
|
call MPI_FILE_CLOSE(fh, ierror)
|
|
else
|
|
print *, "MPI_FILE_OPEN failed"
|
|
endif
|
|
|
|
call MPI_FINALIZE(ierror);
|
|
END PROGRAM
|