mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-02-23 16:20:57 +08:00
[svn-r11939] Purpose:
Supports for collective chunk IO inside parallel HDF5 Description: Added a macro hdf5_mpi_special_collective_io_works to filter out some mpi-io packages that don't support collective IO for no IO contributions in some processes. Solution: Using AC_CACHE_VAL to do the job. Platforms tested: Parallel: IBM AIX 5.2(copper) Linux (heping) mpich-1.2.6 SDSC Teragrid mpich-1.2.5 Linux(Tungsten) mpich-1.2.6 Altix(NCSA cobalt) Seq: Linux(heping) Misc. update:
This commit is contained in:
parent
811131397c
commit
bac54105f6
23
configure
vendored
23
configure
vendored
@ -50023,6 +50023,29 @@ else
|
||||
echo "$as_me:$LINENO: result: no" >&5
|
||||
echo "${ECHO_T}no" >&6
|
||||
fi
|
||||
|
||||
echo "$as_me:$LINENO: checking if MPI-IO can do collective IO when one or more processes don't do IOs" >&5
|
||||
echo $ECHO_N "checking if MPI-IO can do collective IO when one or more processes don't do IOs... $ECHO_C" >&6
|
||||
|
||||
if test "${hdf5_mpi_special_collective_io_works+set}" = set; then
|
||||
echo $ECHO_N "(cached) $ECHO_C" >&6
|
||||
else
|
||||
hdf5_mpi_special_collective_io_works=yes
|
||||
fi
|
||||
|
||||
|
||||
if test ${hdf5_mpi_special_collective_io_works} = "yes"; then
|
||||
|
||||
cat >>confdefs.h <<\_ACEOF
|
||||
#define MPI_SPECIAL_COLLECTIVE_IO_WORKS 1
|
||||
_ACEOF
|
||||
|
||||
echo "$as_me:$LINENO: result: yes" >&5
|
||||
echo "${ECHO_T}yes" >&6
|
||||
else
|
||||
echo "$as_me:$LINENO: result: no" >&5
|
||||
echo "${ECHO_T}no" >&6
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
|
24
configure.in
24
configure.in
@ -2383,9 +2383,13 @@ if test -n "$PARALLEL"; then
|
||||
|
||||
dnl ----------------------------------------------------------------------
|
||||
dnl Check to see whether the complicate MPI derived datatype works.
|
||||
dnl Up to now(Dec. 20th, 2004), we find that IBM's MPIO implemention doesn't
|
||||
dnl In Dec. 20th, 2004, we found that IBM's MPIO implemention didn't
|
||||
dnl handle with the displacement of the complicate MPI type derived datatype
|
||||
dnl correctly. So we add the check here.
|
||||
dnl IBM fixed this bug in their new version that supported MPI-IO around spring 2005.
|
||||
dnl We find that mpich 1.2.5 has the similar bug. The same
|
||||
dnl bug also occurs at SGI IRIX 6.5 C with compiler version lower than or equal to 7.3.
|
||||
dnl In case people still use the old compiler, we keep this flag.
|
||||
AC_MSG_CHECKING([if irregular hyperslab optimization code works inside MPI-IO])
|
||||
|
||||
AC_CACHE_VAL([hdf5_mpi_complex_derived_datatype_works],[hdf5_mpi_complex_derived_datatype_works=yes])
|
||||
@ -2397,6 +2401,24 @@ if test ${hdf5_mpi_complex_derived_datatype_works} = "yes"; then
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
fi
|
||||
|
||||
dnl ----------------------------------------------------------------------
|
||||
dnl Check to see whether MPI-IO can do collective IO successfully when one or more processes don't do
|
||||
dnl any IOs.
|
||||
dnl Up to now(Feb. 8th, 2006), we find that it will cause program hung with mpich 1.2.x version
|
||||
dnl and SGI altix. For those systems, we have to turn off this feature and use independent IO instead.
|
||||
dnl
|
||||
AC_MSG_CHECKING([if MPI-IO can do collective IO when one or more processes don't do IOs])
|
||||
|
||||
AC_CACHE_VAL([hdf5_mpi_special_collective_io_works],[hdf5_mpi_special_collective_io_works=yes])
|
||||
|
||||
if test ${hdf5_mpi_special_collective_io_works} = "yes"; then
|
||||
AC_DEFINE([MPI_SPECIAL_COLLECTIVE_IO_WORKS], [1],
|
||||
[Define if your system can handle special collective IO properly.])
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
fi
|
||||
fi
|
||||
|
||||
dnl ----------------------------------------------------------------------
|
||||
|
Loading…
Reference in New Issue
Block a user