Sync CMake and doxygen changes with develop (#3543)
* Sync CMake and doxygen changes from develop * Add missing images
@ -9,7 +9,7 @@
|
||||
# If you do not have access to either file, you may request a copy from
|
||||
# help@hdfgroup.org.
|
||||
#
|
||||
option (USE_LIBAEC "Use AEC library as SZip Filter" OFF)
|
||||
option (USE_LIBAEC "Use AEC library as SZip Filter" ON)
|
||||
option (USE_LIBAEC_STATIC "Use static AEC library " OFF)
|
||||
option (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 0)
|
||||
option (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 0)
|
||||
|
@ -390,7 +390,9 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES)
|
||||
set(CPACK_WIX_PROPERTY_ARPURLINFOABOUT "${HDF5_PACKAGE_URL}")
|
||||
set(CPACK_WIX_PROPERTY_ARPHELPLINK "${HDF5_PACKAGE_BUGREPORT}")
|
||||
if (BUILD_SHARED_LIBS)
|
||||
set(CPACK_WIX_PATCH_FILE "${HDF_RESOURCES_DIR}/patch.xml")
|
||||
set (WIX_CMP_NAME "${HDF5_LIB_NAME}${CMAKE_DEBUG_POSTFIX}")
|
||||
configure_file (${HDF_RESOURCES_DIR}/patch.xml.in ${HDF5_BINARY_DIR}/patch.xml @ONLY)
|
||||
set(CPACK_WIX_PATCH_FILE "${HDF5_BINARY_DIR}/patch.xml")
|
||||
endif ()
|
||||
elseif (APPLE)
|
||||
list (APPEND CPACK_GENERATOR "STGZ")
|
||||
|
@ -58,7 +58,7 @@ if (NOT MSVC AND NOT MINGW)
|
||||
# General flags
|
||||
if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel")
|
||||
ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/intel-warnings/ifort-general")
|
||||
list (APPEND HDF5_CMAKE_Fortran_FLAGS "-stand:f03" "-free")
|
||||
list (APPEND HDF5_CMAKE_Fortran_FLAGS "-free")
|
||||
elseif (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
ADD_H5_FLAGS (HDF5_CMAKE_Fortran_FLAGS "${HDF5_SOURCE_DIR}/config/gnu-warnings/gfort-general")
|
||||
if (HDF5_ENABLE_DEV_WARNINGS)
|
||||
|
@ -38,12 +38,13 @@ set (${HDF5_PACKAGE_NAME}_BUILD_CPP_LIB @HDF5_BUILD_CPP_LIB@)
|
||||
set (${HDF5_PACKAGE_NAME}_BUILD_JAVA @HDF5_BUILD_JAVA@)
|
||||
set (${HDF5_PACKAGE_NAME}_BUILD_TOOLS @HDF5_BUILD_TOOLS@)
|
||||
set (${HDF5_PACKAGE_NAME}_BUILD_HL_LIB @HDF5_BUILD_HL_LIB@)
|
||||
set (${HDF5_PACKAGE_NAME}_BUILD_HL_TOOLS @HDF5_BUILD_HL_TOOLS@)
|
||||
set (${HDF5_PACKAGE_NAME}_BUILD_HL_GIF_TOOLS @HDF5_BUILD_HL_GIF_TOOLS@)
|
||||
set (${HDF5_PACKAGE_NAME}_ENABLE_THREADSAFE @HDF5_ENABLE_THREADSAFE@)
|
||||
set (${HDF5_PACKAGE_NAME}_ENABLE_PLUGIN_SUPPORT @HDF5_ENABLE_PLUGIN_SUPPORT@)
|
||||
set (${HDF5_PACKAGE_NAME}_ENABLE_Z_LIB_SUPPORT @HDF5_ENABLE_Z_LIB_SUPPORT@)
|
||||
set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_SUPPORT @HDF5_ENABLE_SZIP_SUPPORT@)
|
||||
set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_ENCODING @HDF5_ENABLE_SZIP_ENCODING@)
|
||||
set (${HDF5_PACKAGE_NAME}_ENABLE_ROS3_VFD @HDF5_ENABLE_ROS3_VFD@)
|
||||
set (${HDF5_PACKAGE_NAME}_BUILD_SHARED_LIBS @H5_ENABLE_SHARED_LIB@)
|
||||
set (${HDF5_PACKAGE_NAME}_BUILD_STATIC_LIBS @H5_ENABLE_STATIC_LIB@)
|
||||
set (${HDF5_PACKAGE_NAME}_PACKAGE_EXTLIBS @HDF5_PACKAGE_EXTLIBS@)
|
||||
@ -51,7 +52,8 @@ set (${HDF5_PACKAGE_NAME}_EXPORT_LIBRARIES @HDF5_LIBRARIES_TO_EXPORT@)
|
||||
set (${HDF5_PACKAGE_NAME}_ARCHITECTURE "@CMAKE_GENERATOR_ARCHITECTURE@")
|
||||
set (${HDF5_PACKAGE_NAME}_TOOLSET "@CMAKE_GENERATOR_TOOLSET@")
|
||||
set (${HDF5_PACKAGE_NAME}_DEFAULT_API_VERSION "@DEFAULT_API_VERSION@")
|
||||
set (${HDF5_PACKAGE_NAME}_PARALLEL_FILTERED_WRITES "@PARALLEL_FILTERED_WRITES@")
|
||||
set (${HDF5_PACKAGE_NAME}_PARALLEL_FILTERED_WRITES @PARALLEL_FILTERED_WRITES@)
|
||||
set (${HDF5_PACKAGE_NAME}_INSTALL_MOD_FORTRAN "@HDF5_INSTALL_MOD_FORTRAN@")
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Dependencies
|
||||
@ -67,11 +69,16 @@ if (${HDF5_PACKAGE_NAME}_ENABLE_PARALLEL)
|
||||
find_package(MPI QUIET REQUIRED)
|
||||
endif ()
|
||||
|
||||
if (${HDF5_PACKAGE_NAME}_ENABLE_THREADSAFE)
|
||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package(Threads QUIET REQUIRED)
|
||||
endif ()
|
||||
|
||||
if (${HDF5_PACKAGE_NAME}_BUILD_JAVA)
|
||||
set (${HDF5_PACKAGE_NAME}_JAVA_INCLUDE_DIRS
|
||||
@PACKAGE_CURRENT_BUILD_DIR@/lib/jarhdf5-@HDF5_VERSION_STRING@.jar
|
||||
@PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-api-1.7.33.jar
|
||||
@PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-nop-1.7.33.jar
|
||||
@PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-api-2.0.6.jar
|
||||
@PACKAGE_CURRENT_BUILD_DIR@/lib/slf4j-nop-2.0.6.jar
|
||||
)
|
||||
set (${HDF5_PACKAGE_NAME}_JAVA_LIBRARY "@PACKAGE_CURRENT_BUILD_DIR@/lib")
|
||||
set (${HDF5_PACKAGE_NAME}_JAVA_LIBRARIES "${${HDF5_PACKAGE_NAME}_JAVA_LIBRARY}")
|
||||
@ -143,14 +150,14 @@ foreach (comp IN LISTS ${HDF5_PACKAGE_NAME}_FIND_COMPONENTS)
|
||||
list (REMOVE_ITEM ${HDF5_PACKAGE_NAME}_FIND_COMPONENTS ${comp})
|
||||
set (${HDF5_PACKAGE_NAME}_LIB_TYPE ${${HDF5_PACKAGE_NAME}_LIB_TYPE} ${comp})
|
||||
|
||||
if (${HDF5_PACKAGE_NAME}_BUILD_FORTRAN)
|
||||
if (${HDF5_PACKAGE_NAME}_BUILD_FORTRAN AND ${HDF5_PACKAGE_NAME}_INSTALL_MOD_FORTRAN STREQUAL "SHARED")
|
||||
set (${HDF5_PACKAGE_NAME}_INCLUDE_DIR_FORTRAN "@PACKAGE_INCLUDE_INSTALL_DIR@/shared")
|
||||
endif ()
|
||||
elseif (comp STREQUAL "static")
|
||||
list (REMOVE_ITEM ${HDF5_PACKAGE_NAME}_FIND_COMPONENTS ${comp})
|
||||
set (${HDF5_PACKAGE_NAME}_LIB_TYPE ${${HDF5_PACKAGE_NAME}_LIB_TYPE} ${comp})
|
||||
|
||||
if (${HDF5_PACKAGE_NAME}_BUILD_FORTRAN)
|
||||
if (${HDF5_PACKAGE_NAME}_BUILD_FORTRAN AND ${HDF5_PACKAGE_NAME}_INSTALL_MOD_FORTRAN STREQUAL "STATIC")
|
||||
set (${HDF5_PACKAGE_NAME}_INCLUDE_DIR_FORTRAN "@PACKAGE_INCLUDE_INSTALL_DIR@/static")
|
||||
endif ()
|
||||
endif ()
|
||||
|
@ -11,9 +11,9 @@
|
||||
#
|
||||
# This is the CMakeCache file.
|
||||
|
||||
########################
|
||||
#########################
|
||||
# EXTERNAL cache entries
|
||||
########################
|
||||
#########################
|
||||
|
||||
set (CMAKE_INSTALL_FRAMEWORK_PREFIX "Library/Frameworks" CACHE STRING "Frameworks installation directory" FORCE)
|
||||
|
||||
@ -25,14 +25,14 @@ set (HDF_PACKAGE_NAMESPACE "hdf5::" CACHE STRING "Name for HDF package namespace
|
||||
|
||||
set (HDF5_BUILD_CPP_LIB ON CACHE BOOL "Build HDF5 C++ Library" FORCE)
|
||||
|
||||
set (HDF5_BUILD_EXAMPLES ON CACHE BOOL "Build HDF5 Library Examples" FORCE)
|
||||
|
||||
set (HDF5_BUILD_FORTRAN ON CACHE BOOL "Build FORTRAN support" FORCE)
|
||||
|
||||
set (HDF5_BUILD_HL_LIB ON CACHE BOOL "Build HIGH Level HDF5 Library" FORCE)
|
||||
|
||||
set (HDF5_BUILD_TOOLS ON CACHE BOOL "Build HDF5 Tools" FORCE)
|
||||
|
||||
set (HDF5_BUILD_EXAMPLES ON CACHE BOOL "Build HDF5 Library Examples" FORCE)
|
||||
|
||||
set (HDF5_ENABLE_Z_LIB_SUPPORT ON CACHE BOOL "Enable Zlib Filters" FORCE)
|
||||
|
||||
set (HDF5_ENABLE_SZIP_SUPPORT ON CACHE BOOL "Use SZip Filter" FORCE)
|
||||
|
@ -1,5 +1,5 @@
|
||||
<CPackWiXPatch>
|
||||
<CPackWiXFragment Id="CM_CP_libraries.bin.hdf5.dll">
|
||||
<CPackWiXFragment Id="CM_CP_libraries.bin.@WIX_CMP_NAME@.dll">
|
||||
<Environment Id="PATH"
|
||||
Name="PATH"
|
||||
Value="[CM_DP_libraries.bin]"
|
@ -122,6 +122,10 @@ if (NOT TEST_RESULT EQUAL TEST_EXPECT)
|
||||
file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
|
||||
message (STATUS "Output :\n${TEST_STREAM}")
|
||||
endif ()
|
||||
if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.err")
|
||||
file (READ ${TEST_FOLDER}/${TEST_OUTPUT}.err TEST_STREAM)
|
||||
message (STATUS "Error Output :\n${TEST_STREAM}")
|
||||
endif ()
|
||||
endif ()
|
||||
message (FATAL_ERROR "Failed: Test program ${TEST_PROGRAM} exited != ${TEST_EXPECT}.\n${TEST_ERROR}")
|
||||
endif ()
|
||||
|
@ -42,7 +42,7 @@ elseif(MINGW)
|
||||
set (CMAKE_CROSSCOMPILING_EMULATOR wine)
|
||||
|
||||
include_directories(/usr/${TOOLCHAIN_PREFIX}/include)
|
||||
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS On CACHE BOOL "Export windows symbols")
|
||||
set (CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS On CACHE BOOL "Export windows symbols")
|
||||
else ()
|
||||
set (CMAKE_SYSTEM_NAME Linux)
|
||||
|
||||
|
@ -1,16 +1,16 @@
|
||||
# Uncomment the following to use cross-compiling
|
||||
#set(CMAKE_SYSTEM_NAME Linux)
|
||||
#set (CMAKE_SYSTEM_NAME Linux)
|
||||
|
||||
set(CMAKE_COMPILER_VENDOR "clang")
|
||||
set (CMAKE_COMPILER_VENDOR "clang")
|
||||
|
||||
if(WIN32)
|
||||
set(CMAKE_C_COMPILER clang-cl)
|
||||
set(CMAKE_CXX_COMPILER clang-cl)
|
||||
set (CMAKE_C_COMPILER clang-cl)
|
||||
set (CMAKE_CXX_COMPILER clang-cl)
|
||||
else()
|
||||
set(CMAKE_C_COMPILER clang)
|
||||
set(CMAKE_CXX_COMPILER clang++)
|
||||
set (CMAKE_C_COMPILER clang)
|
||||
set (CMAKE_CXX_COMPILER clang++)
|
||||
endif()
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
set (CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
|
||||
# the following is used if cross-compiling
|
||||
set(CMAKE_CROSSCOMPILING_EMULATOR "")
|
||||
set (CMAKE_CROSSCOMPILING_EMULATOR "")
|
||||
|
@ -1,10 +1,10 @@
|
||||
# The following line will use cross-compiling
|
||||
set(CMAKE_SYSTEM_NAME Linux)
|
||||
set (CMAKE_SYSTEM_NAME Linux)
|
||||
|
||||
set(CMAKE_COMPILER_VENDOR "CrayLinuxEnvironment")
|
||||
set (CMAKE_COMPILER_VENDOR "CrayLinuxEnvironment")
|
||||
|
||||
set(CMAKE_C_COMPILER cc)
|
||||
set(CMAKE_Fortran_COMPILER ftn)
|
||||
set (CMAKE_C_COMPILER cc)
|
||||
set (CMAKE_Fortran_COMPILER ftn)
|
||||
|
||||
# the following is used if cross-compiling
|
||||
set(CMAKE_CROSSCOMPILING_EMULATOR "")
|
||||
set (CMAKE_CROSSCOMPILING_EMULATOR "")
|
||||
|
@ -1,11 +1,11 @@
|
||||
# Uncomment the following line and the correct system name to use cross-compiling
|
||||
#set(CMAKE_SYSTEM_NAME Linux)
|
||||
#set (CMAKE_SYSTEM_NAME Linux)
|
||||
|
||||
set(CMAKE_COMPILER_VENDOR "GCC")
|
||||
set (CMAKE_COMPILER_VENDOR "GCC")
|
||||
|
||||
set(CMAKE_C_COMPILER cc)
|
||||
set(CMAKE_CXX_COMPILER c++)
|
||||
set(CMAKE_Fortran_COMPILER gfortran)
|
||||
set (CMAKE_C_COMPILER cc)
|
||||
set (CMAKE_CXX_COMPILER c++)
|
||||
set (CMAKE_Fortran_COMPILER gfortran)
|
||||
|
||||
# the following is used if cross-compiling
|
||||
set(CMAKE_CROSSCOMPILING_EMULATOR "")
|
||||
set (CMAKE_CROSSCOMPILING_EMULATOR "")
|
||||
|
@ -11,4 +11,4 @@ set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
|
||||
set (CMAKE_CROSSCOMPILING_EMULATOR wine64)
|
||||
|
||||
include_directories(/usr/${TOOLCHAIN_PREFIX}/include)
|
||||
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS On CACHE BOOL "Export windows symbols")
|
||||
set (CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS On CACHE BOOL "Export windows symbols")
|
||||
|
@ -1,11 +1,11 @@
|
||||
# Uncomment the following to use cross-compiling
|
||||
#set(CMAKE_SYSTEM_NAME Linux)
|
||||
#set (CMAKE_SYSTEM_NAME Linux)
|
||||
|
||||
set(CMAKE_COMPILER_VENDOR "PGI")
|
||||
set (CMAKE_COMPILER_VENDOR "PGI")
|
||||
|
||||
set(CMAKE_C_COMPILER pgcc)
|
||||
set(CMAKE_CXX_COMPILER pgc++)
|
||||
set(CMAKE_Fortran_COMPILER pgf90)
|
||||
set (CMAKE_C_COMPILER pgcc)
|
||||
set (CMAKE_CXX_COMPILER pgc++)
|
||||
set (CMAKE_Fortran_COMPILER pgf90)
|
||||
|
||||
# the following is used if cross-compiling
|
||||
set(CMAKE_CROSSCOMPILING_EMULATOR "")
|
||||
set (CMAKE_CROSSCOMPILING_EMULATOR "")
|
||||
|
1010
doxygen/dox/ExamplesAPI.dox
Normal file
@ -50,10 +50,10 @@ Parallel HDF5, and the HDF5-1.10 VDS and SWMR new features:
|
||||
</tr>
|
||||
<tr>
|
||||
<td style="background-color:#F5F5F5">
|
||||
<a href="https://portal.hdfgroup.org/display/HDF5/Introduction+to+Parallel+HDF5">Introduction to Parallel HDF5</a>
|
||||
\ref IntroParHDF5
|
||||
</td>
|
||||
<td>
|
||||
A brief introduction to Parallel HDF5. If you are new to HDF5 please see the @ref LearnBasics topic first.
|
||||
A brief introduction to Parallel HDF5. If you are new to HDF5 please see the @ref LearnBasics topic first.
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -124,7 +124,7 @@ It is a 2-dimensional 5 x 3 array (the dataspace). The datatype should not be co
|
||||
</ul>
|
||||
|
||||
\subsubsection subsec_intro_desc_prop_dspace Dataspaces
|
||||
A dataspace describes the layout of a dataset’s data elements. It can consist of no elements (NULL),
|
||||
A dataspace describes the layout of a dataset's data elements. It can consist of no elements (NULL),
|
||||
a single element (scalar), or a simple array.
|
||||
|
||||
<table>
|
||||
@ -141,7 +141,7 @@ in size (i.e. they are extendible).
|
||||
|
||||
There are two roles of a dataspace:
|
||||
\li It contains the spatial information (logical layout) of a dataset stored in a file. This includes the rank and dimensions of a dataset, which are a permanent part of the dataset definition.
|
||||
\li It describes an application’s data buffers and data elements participating in I/O. In other words, it can be used to select a portion or subset of a dataset.
|
||||
\li It describes an application's data buffers and data elements participating in I/O. In other words, it can be used to select a portion or subset of a dataset.
|
||||
|
||||
<table>
|
||||
<caption>The dataspace is used to describe both the logical layout of a dataset and a subset of a dataset.</caption>
|
||||
@ -602,12 +602,12 @@ Navigate back: \ref index "Main" / \ref GettingStarted
|
||||
@page HDF5Examples HDF5 Examples
|
||||
Example programs of how to use HDF5 are provided below.
|
||||
For HDF-EOS specific examples, see the <a href="http://hdfeos.org/zoo/index.php">examples</a>
|
||||
of how to access and visualize NASA HDF-EOS files using IDL, MATLAB, and NCL on the
|
||||
<a href="http://hdfeos.org/">HDF-EOS Tools and Information Center</a> page.
|
||||
of how to access and visualize NASA HDF-EOS files using Python, IDL, MATLAB, and NCL
|
||||
on the <a href="http://hdfeos.org/">HDF-EOS Tools and Information Center</a> page.
|
||||
|
||||
\section secHDF5Examples Examples
|
||||
\li \ref LBExamples
|
||||
\li <a href="https://portal.hdfgroup.org/display/HDF5/Examples+by+API">Examples by API</a>
|
||||
\li \ref ExAPI
|
||||
\li <a href="https://portal.hdfgroup.org/display/HDF5/Examples+in+the+Source+Code">Examples in the Source Code</a>
|
||||
\li <a href="https://portal.hdfgroup.org/display/HDF5/Other+Examples">Other Examples</a>
|
||||
|
||||
|
569
doxygen/dox/IntroParExamples.dox
Normal file
@ -0,0 +1,569 @@
|
||||
/** @page IntroParContHyperslab Writing by Contiguous Hyperslab
|
||||
|
||||
Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
|
||||
<hr>
|
||||
|
||||
This example shows how to write a contiguous buffer in memory to a contiguous hyperslab in a file. In this case,
|
||||
each parallel process writes a contiguous hyperslab to the file.
|
||||
|
||||
In the C example (figure a), each hyperslab in memory consists of an equal number of consecutive rows. In the FORTRAN
|
||||
90 example (figure b), each hyperslab in memory consists of
|
||||
an equal number of consecutive columns. This reflects the difference in the storage order for C and FORTRAN 90.
|
||||
<table>
|
||||
<tr>
|
||||
<th><strong>Figure a</strong> C Example</th>
|
||||
<th><strong>Figure b</strong> Fortran Example</th>
|
||||
</tr><tr>
|
||||
<td>
|
||||
\image html pcont_hy_figa.gif
|
||||
</td>
|
||||
<td>
|
||||
\image html pcont_hy_figb.gif
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
\section secIntroParContHyperslabC Writing a Contiguous Hyperslab in C
|
||||
In this example, you have a dataset of 8 (rows) x 5 (columns) and each process writes an equal number
|
||||
of rows to the dataset. The dataset hyperslab is defined as follows:
|
||||
\code
|
||||
count [0] = dimsf [0] / number_processes
|
||||
count [1] = dimsf [1]
|
||||
\endcode
|
||||
where,
|
||||
\code
|
||||
dimsf [0] is the number of rows in the dataset
|
||||
dimsf [1] is the number of columns in the dataset
|
||||
\endcode
|
||||
The offset for the hyperslab is different for each process:
|
||||
\code
|
||||
offset [0] = k * count[0]
|
||||
offset [1] = 0
|
||||
\endcode
|
||||
where,
|
||||
\code
|
||||
"k" is the process id number
|
||||
count [0] is the number of rows written in each hyperslab
|
||||
offset [1] = 0 indicates to start at the beginning of the row
|
||||
\endcode
|
||||
|
||||
The number of processes that you could use would be 1, 2, 4, or 8. The number of rows that would be written by each slab is as follows:
|
||||
<table>
|
||||
<tr>
|
||||
<th><strong>Processes</strong></th>
|
||||
<th><strong>Size of count[0](\# of rows) </strong></th>
|
||||
</tr><tr>
|
||||
<td>1</td><td>8</td>
|
||||
</tr><tr>
|
||||
<td>2</td><td>4</td>
|
||||
</tr><tr>
|
||||
<td>4</td><td>2</td>
|
||||
</tr><tr>
|
||||
<td>8</td><td>1</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
If using 4 processes, then process 1 would look like:
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
\image html pcont_hy_figc.gif
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
The code would look like the following:
|
||||
\code
|
||||
71 /*
|
||||
72 * Each process defines dataset in memory and writes it to the hyperslab
|
||||
73 * in the file.
|
||||
74 */
|
||||
75 count[0] = dimsf[0]/mpi_size;
|
||||
76 count[1] = dimsf[1];
|
||||
77 offset[0] = mpi_rank * count[0];
|
||||
78 offset[1] = 0;
|
||||
79 memspace = H5Screate_simple(RANK, count, NULL);
|
||||
80
|
||||
81 /*
|
||||
82 * Select hyperslab in the file.
|
||||
83 */
|
||||
84 filespace = H5Dget_space(dset_id);
|
||||
85 H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, count, NULL);
|
||||
\endcode
|
||||
|
||||
Below is the example program:
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://github.com/HDFGroup/hdf5-examples/blob/master/C/H5Parallel/ph5_hyperslab_by_row.c">hyperslab_by_row.c</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
If using this example with 4 processes, then,
|
||||
\li Process 0 writes "10"s to the file.
|
||||
\li Process 1 writes "11"s.
|
||||
\li Process 2 writes "12"s.
|
||||
\li Process 3 writes "13"s.
|
||||
|
||||
The following is the output from h5dump for the HDF5 file created by this example using 4 processes:
|
||||
\code
|
||||
HDF5 "SDS_row.h5" {
|
||||
GROUP "/" {
|
||||
DATASET "IntArray" {
|
||||
DATATYPE H5T_STD_I32BE
|
||||
DATASPACE SIMPLE { ( 8, 5 ) / ( 8, 5 ) }
|
||||
DATA {
|
||||
10, 10, 10, 10, 10,
|
||||
10, 10, 10, 10, 10,
|
||||
11, 11, 11, 11, 11,
|
||||
11, 11, 11, 11, 11,
|
||||
12, 12, 12, 12, 12,
|
||||
12, 12, 12, 12, 12,
|
||||
13, 13, 13, 13, 13,
|
||||
13, 13, 13, 13, 13
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
\endcode
|
||||
|
||||
|
||||
\section secIntroParContHyperslabFort Writing a Contiguous Hyperslab in Fortran
|
||||
In this example you have a dataset of 5 (rows) x 8 (columns). Since a contiguous hyperslab in Fortran 90
|
||||
consists of consecutive columns, each process will be writing an equal number of columns to the dataset.
|
||||
|
||||
You would define the size of the hyperslab to write to the dataset as follows:
|
||||
\code
|
||||
count(1) = dimsf(1)
|
||||
count(2) = dimsf(2) / number_of_processes
|
||||
\endcode
|
||||
|
||||
where,
|
||||
\code
|
||||
dimsf(1) is the number of rows in the dataset
|
||||
dimsf(2) is the number of columns
|
||||
\endcode
|
||||
|
||||
The offset for the hyperslab dimension would be different for each process:
|
||||
\code
|
||||
offset (1) = 0
|
||||
offset (2) = k * count (2)
|
||||
\endcode
|
||||
|
||||
where,
|
||||
\code
|
||||
offset (1) = 0 indicates to start at the beginning of the column
|
||||
"k" is the process id number
|
||||
"count(2) is the number of columns to be written by each hyperslab
|
||||
\endcode
|
||||
|
||||
The number of processes that could be used in this example are 1, 2, 4, or 8. The number of
|
||||
columns that could be written by each slab is as follows:
|
||||
<table>
|
||||
<tr>
|
||||
<th><strong>Processes</strong></th>
|
||||
<th><strong>Size of count (2)(\# of columns) </strong></th>
|
||||
</tr><tr>
|
||||
<td>1</td><td>8</td>
|
||||
</tr><tr>
|
||||
<td>2</td><td>4</td>
|
||||
</tr><tr>
|
||||
<td>4</td><td>2</td>
|
||||
</tr><tr>
|
||||
<td>8</td><td>1</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
If using 4 processes, the offset and count parameters for Process 1 would look like:
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
\image html pcont_hy_figd.gif
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
The code would look like the following:
|
||||
\code
|
||||
69 ! Each process defines dataset in memory and writes it to the hyperslab
|
||||
70 ! in the file.
|
||||
71 !
|
||||
72 count(1) = dimsf(1)
|
||||
73 count(2) = dimsf(2)/mpi_size
|
||||
74 offset(1) = 0
|
||||
75 offset(2) = mpi_rank * count(2)
|
||||
76 CALL h5screate_simple_f(rank, count, memspace, error)
|
||||
77 !
|
||||
78 ! Select hyperslab in the file.
|
||||
79 !
|
||||
80 CALL h5dget_space_f(dset_id, filespace, error)
|
||||
81 CALL h5sselect_hyperslab_f (filespace, H5S_SELECT_SET_F, offset, count, error)
|
||||
\endcode
|
||||
|
||||
Below is the F90 example program which illustrates how to write contiguous hyperslabs by column in Parallel HDF5:
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://github.com/HDFGroup/hdf5-examples/blob/master/Fortran/H5Parallel/ph5_f90_hyperslab_by_col.F90">hyperslab_by_col.F90</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
If you run this program with 4 processes and look at the output with h5dump you will notice that the output is
|
||||
much like the output shown above for the C example. This is because h5dump is written in C. The data would be
|
||||
displayed in columns if it was printed using Fortran 90 code.
|
||||
|
||||
<hr>
|
||||
Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
|
||||
|
||||
@page IntroParRegularSpaced Writing by Regularly Spaced Data
|
||||
|
||||
Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
|
||||
<hr>
|
||||
|
||||
In this case, each process writes data from a contiguous buffer into disconnected locations in the file, using a regular pattern.
|
||||
|
||||
In C it is done by selecting a hyperslab in a file that consists of regularly spaced columns. In F90, it is done by selecting a
|
||||
hyperslab in a file that consists of regularly spaced rows.
|
||||
<table>
|
||||
<tr>
|
||||
<th><strong>Figure a</strong> C Example</th>
|
||||
<th><strong>Figure b</strong> Fortran Example</th>
|
||||
</tr><tr>
|
||||
<td>
|
||||
\image html preg_figa.gif
|
||||
</td>
|
||||
<td>
|
||||
\image html preg_figb.gif
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
\section secIntroParRegularSpacedC Writing Regularly Spaced Columns in C
|
||||
In this example, you have two processes that write to the same dataset, each writing to
|
||||
every other column in the dataset. For each process the hyperslab in the file is set up as follows:
|
||||
\code
|
||||
89 count[0] = 1;
|
||||
90 count[1] = dimsm[1];
|
||||
91 offset[0] = 0;
|
||||
92 offset[1] = mpi_rank;
|
||||
93 stride[0] = 1;
|
||||
94 stride[1] = 2;
|
||||
95 block[0] = dimsf[0];
|
||||
96 block[1] = 1;
|
||||
\endcode
|
||||
|
||||
The stride is 2 for dimension 1 to indicate that every other position along this
|
||||
dimension will be written to. A stride of 1 indicates that every position along a dimension will be written to.
|
||||
|
||||
For two processes, the mpi_rank will be either 0 or 1. Therefore:
|
||||
\li Process 0 writes to even columns (0, 2, 4...)
|
||||
\li Process 1 writes to odd columns (1, 3, 5...)
|
||||
|
||||
The block size allows each process to write a column of data to every other position in the dataset.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
\image html preg_figc.gif
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Below is an example program for writing hyperslabs by column in Parallel HDF5:
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://github.com/HDFGroup/hdf5-examples/blob/master/C/H5Parallel/ph5_hyperslab_by_col.c">hyperslab_by_col.c</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
The following is the output from h5dump for the HDF5 file created by this example:
|
||||
\code
|
||||
HDF5 "SDS_col.h5" {
|
||||
GROUP "/" {
|
||||
DATASET "IntArray" {
|
||||
DATATYPE H5T_STD_I32BE
|
||||
DATASPACE SIMPLE { ( 8, 6 ) / ( 8, 6 ) }
|
||||
DATA {
|
||||
1, 2, 10, 20, 100, 200,
|
||||
1, 2, 10, 20, 100, 200,
|
||||
1, 2, 10, 20, 100, 200,
|
||||
1, 2, 10, 20, 100, 200,
|
||||
1, 2, 10, 20, 100, 200,
|
||||
1, 2, 10, 20, 100, 200,
|
||||
1, 2, 10, 20, 100, 200,
|
||||
1, 2, 10, 20, 100, 200
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
\endcode
|
||||
|
||||
|
||||
\section secIntroParRegularSpacedFort Writing Regularly Spaced Rows in Fortran
|
||||
In this example, you have two processes that write to the same dataset, each writing to every
|
||||
other row in the dataset. For each process the hyperslab in the file is set up as follows:
|
||||
|
||||
|
||||
You would define the size of the hyperslab to write to the dataset as follows:
|
||||
\code
|
||||
83 ! Each process defines dataset in memory and writes it to
|
||||
84 ! the hyperslab in the file.
|
||||
85 !
|
||||
86 count(1) = dimsm(1)
|
||||
87 count(2) = 1
|
||||
88 offset(1) = mpi_rank
|
||||
89 offset(2) = 0
|
||||
90 stride(1) = 2
|
||||
91 stride(2) = 1
|
||||
92 block(1) = 1
|
||||
93 block(2) = dimsf(2)
|
||||
\endcode
|
||||
|
||||
The stride is 2 for dimension 1 to indicate that every other position along this dimension will
|
||||
be written to. A stride of 1 indicates that every position along a dimension will be written to.
|
||||
|
||||
For two process, the mpi_rank will be either 0 or 1. Therefore:
|
||||
\li Process 0 writes to even rows (0, 2, 4 ...)
|
||||
\li Process 1 writes to odd rows (1, 3, 5 ...)
|
||||
|
||||
The block size allows each process to write a row of data to every other position in the dataset,
|
||||
rather than just a point of data.
|
||||
|
||||
The following shows the data written by Process 1 to the file:
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
\image html preg_figd.gif
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Below is the example program for writing hyperslabs by column in Parallel HDF5:
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://github.com/HDFGroup/hdf5-examples/blob/master/Fortran/H5Parallel/ph5_f90_hyperslab_by_row.F90">hyperslab_by_row.F90</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
The output for h5dump on the file created by this program will look like the output as shown above for the C example. This is
|
||||
because h5dump is written in C. The data would be displayed in rows if it were printed using Fortran 90 code.
|
||||
|
||||
<hr>
|
||||
Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
|
||||
|
||||
@page IntroParPattern Writing by Pattern
|
||||
|
||||
Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
|
||||
<hr>
|
||||
|
||||
This is another example of writing data into disconnected locations in a file. Each process writes data from the contiguous
|
||||
buffer into regularly scattered locations in the file.
|
||||
|
||||
Each process defines a hyperslab in the file as described below and writes data to it. The C and Fortran 90 examples below
|
||||
result in the same data layout in the file.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th><strong>Figure a</strong> C Example</th>
|
||||
<th><strong>Figure b</strong> Fortran Example</th>
|
||||
</tr><tr>
|
||||
<td>
|
||||
\image html ppatt_figa.gif
|
||||
</td>
|
||||
<td>
|
||||
\image html ppatt_figb.gif
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
The C and Fortran 90 examples use four processes to write the pattern shown above. Each process defines a hyperslab by:
|
||||
\li Specifying a stride of 2 for each dimension, which indicates that you wish to write to every other position along a dimension.
|
||||
\li Specifying a different offset for each process:
|
||||
<table>
|
||||
<tr>
|
||||
<th rowspan="3"><strong>C</strong></th><th>Process 0</th><th>Process 1</th><th>Process 2</th><th>Process 3</th>
|
||||
</tr><tr>
|
||||
<td>offset[0] = 0</td><td>offset[0] = 1</td><td>offset[0] = 0</td><td>offset[0] = 1</td>
|
||||
</tr><tr>
|
||||
<td>offset[1] = 0</td><td>offset[1] = 0</td><td>offset[1] = 1</td><td>offset[1] = 1</td>
|
||||
</tr><tr>
|
||||
<th rowspan="3"><strong>Fortran</strong></th><th>Process 0</th><th>Process 1</th><th>Process 2</th><th>Process 3</th>
|
||||
</tr><tr>
|
||||
<td>offset(1) = 0</td><td>offset(1) = 0</td><td>offset(1) = 1</td><td>offset(1) = 1</td>
|
||||
</tr><tr>
|
||||
<td>offset(2) = 0</td><td>offset(2) = 1</td><td>offset(2) = 0</td><td>offset(2) = 1</td>
|
||||
</tr>
|
||||
</table>
|
||||
\li Specifying the size of the slab to write. The count is the number of positions along a dimension to write to. If writing a 4 x 2 slab,
|
||||
then the count would be:
|
||||
<table>
|
||||
<tr>
|
||||
<th><strong>C</strong></th><th>Fortran</th>
|
||||
</tr><tr>
|
||||
<td>count[0] = 4</td><td>count(1) = 2</td>
|
||||
</tr><tr>
|
||||
<td>count[1] = 2</td><td>count(2) = 4</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
For example, the offset, count, and stride parameters for Process 2 would look like:
|
||||
<table>
|
||||
<tr>
|
||||
<th><strong>Figure a</strong> C Example</th>
|
||||
<th><strong>Figure b</strong> Fortran Example</th>
|
||||
</tr><tr>
|
||||
<td>
|
||||
\image html ppatt_figc.gif
|
||||
</td>
|
||||
<td>
|
||||
\image html ppatt_figd.gif
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Below are example programs for writing hyperslabs by pattern in Parallel HDF5:
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://github.com/HDFGroup/hdf5-examples/blob/master/C/H5Parallel/ph5_hyperslab_by_pattern.c">hyperslab_by_pattern.c</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://github.com/HDFGroup/hdf5-examples/blob/master/Fortran/H5Parallel/ph5_f90_hyperslab_by_pattern.F90">hyperslab_by_pattern.F90</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
The following is the output from h5dump for the HDF5 file created in this example:
|
||||
\code
|
||||
HDF5 "SDS_pat.h5" {
|
||||
GROUP "/" {
|
||||
DATASET "IntArray" {
|
||||
DATATYPE H5T_STD_I32BE
|
||||
DATASPACE SIMPLE { ( 8, 4 ) / ( 8, 4 ) }
|
||||
DATA {
|
||||
1, 3, 1, 3,
|
||||
2, 4, 2, 4,
|
||||
1, 3, 1, 3,
|
||||
2, 4, 2, 4,
|
||||
1, 3, 1, 3,
|
||||
2, 4, 2, 4,
|
||||
1, 3, 1, 3,
|
||||
2, 4, 2, 4
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
\endcode
|
||||
The h5dump utility is written in C so the output is in C order.
|
||||
|
||||
|
||||
<hr>
|
||||
Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
|
||||
|
||||
@page IntroParChunk Writing by Chunk
|
||||
|
||||
Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
|
||||
<hr>
|
||||
|
||||
In this example each process writes a "chunk" of data to a dataset. The C and Fortran 90
|
||||
examples result in the same data layout in the file.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th><strong>Figure a</strong> C Example</th>
|
||||
<th><strong>Figure b</strong> Fortran Example</th>
|
||||
</tr><tr>
|
||||
<td>
|
||||
\image html pchunk_figa.gif
|
||||
</td>
|
||||
<td>
|
||||
\image html pchunk_figb.gif
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
For this example, four processes are used, and a 4 x 2 chunk is written to the dataset by each process.
|
||||
|
||||
To do this, you would:
|
||||
\li Use the block parameter to specify a chunk of size 4 x 2 (or 2 x 4 for Fortran).
|
||||
\li Use a different offset (start) for each process, based on the chunk size:
|
||||
<table>
|
||||
<tr>
|
||||
<th rowspan="3"><strong>C</strong></th><th>Process 0</th><th>Process 1</th><th>Process 2</th><th>Process 3</th>
|
||||
</tr><tr>
|
||||
<td>offset[0] = 0</td><td>offset[0] = 0</td><td>offset[0] = 4</td><td>offset[0] = 4</td>
|
||||
</tr><tr>
|
||||
<td>offset[1] = 0</td><td>offset[1] = 2</td><td>offset[1] = 0</td><td>offset[1] = 2</td>
|
||||
</tr><tr>
|
||||
<th rowspan="3"><strong>Fortran</strong></th><th>Process 0</th><th>Process 1</th><th>Process 2</th><th>Process 3</th>
|
||||
</tr><tr>
|
||||
<td>offset(1) = 0</td><td>offset(1) = 2</td><td>offset(1) = 0</td><td>offset(1) = 2</td>
|
||||
</tr><tr>
|
||||
<td>offset(2) = 0</td><td>offset(2) = 0</td><td>offset(2) = 4</td><td>offset(2) = 4</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
For example, the offset and block parameters for Process 2 would look like:
|
||||
<table>
|
||||
<tr>
|
||||
<th><strong>Figure a</strong> C Example</th>
|
||||
<th><strong>Figure b</strong> Fortran Example</th>
|
||||
</tr><tr>
|
||||
<td>
|
||||
\image html pchunk_figc.gif
|
||||
</td>
|
||||
<td>
|
||||
\image html pchunk_figd.gif
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Below are example programs for writing hyperslabs by pattern in Parallel HDF5:
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://github.com/HDFGroup/hdf5-examples/blob/master/C/H5Parallel/ph5_hyperslab_by_chunk.c">hyperslab_by_chunk.c</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://github.com/HDFGroup/hdf5-examples/blob/master/Fortran/H5Parallel/ph5_f90_hyperslab_by_chunk.F90">hyperslab_by_chunk.F90</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
The following is the output from h5dump for the HDF5 file created in this example:
|
||||
\code
|
||||
HDF5 "SDS_chnk.h5" {
|
||||
GROUP "/" {
|
||||
DATASET "IntArray" {
|
||||
DATATYPE H5T_STD_I32BE
|
||||
DATASPACE SIMPLE { ( 8, 4 ) / ( 8, 4 ) }
|
||||
DATA {
|
||||
1, 1, 2, 2,
|
||||
1, 1, 2, 2,
|
||||
1, 1, 2, 2,
|
||||
1, 1, 2, 2,
|
||||
3, 3, 4, 4,
|
||||
3, 3, 4, 4,
|
||||
3, 3, 4, 4,
|
||||
3, 3, 4, 4
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
\endcode
|
||||
The h5dump utility is written in C so the output is in C order.
|
||||
|
||||
<hr>
|
||||
Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
|
||||
|
||||
*/
|
271
doxygen/dox/IntroParHDF5.dox
Normal file
@ -0,0 +1,271 @@
|
||||
/** @page IntroParHDF5 A Brief Introduction to Parallel HDF5
|
||||
|
||||
Navigate back: \ref index "Main" / \ref GettingStarted
|
||||
<hr>
|
||||
|
||||
If you are new to HDF5 please see the @ref LearnBasics topic first.
|
||||
|
||||
\section sec_pintro_overview Overview of Parallel HDF5 (PHDF5) Design
|
||||
There were several requirements that we had for Parallel HDF5 (PHDF5). These were:
|
||||
\li Parallel HDF5 files had to be compatible with serial HDF5 files and sharable
|
||||
between different serial and parallel platforms.
|
||||
\li Parallel HDF5 had to be designed to have a single file image to all processes,
|
||||
rather than having one file per process. Having one file per process can cause expensive
|
||||
post processing, and the files are not usable by different processes.
|
||||
\li A standard parallel I/O interface had to be portable to different platforms.
|
||||
|
||||
With these requirements of HDF5 our initial target was to support MPI programming, but not
|
||||
for shared memory programming. We had done some experimentation with thread-safe support
|
||||
for Pthreads and for OpenMP, and decided to use these.
|
||||
|
||||
Implementation requirements were to:
|
||||
\li Not use Threads, since they were not commonly supported in 1998 when we were looking at this.
|
||||
\li Not have a reserved process, as this might interfere with parallel algorithms.
|
||||
\li Not spawn any processes, as this is not even commonly supported now.
|
||||
|
||||
The following shows the Parallel HDF5 implementation layers.
|
||||
|
||||
|
||||
\subsection subsec_pintro_prog Parallel Programming with HDF5
|
||||
This tutorial assumes that you are somewhat familiar with parallel programming with MPI (Message Passing Interface).
|
||||
|
||||
If you are not familiar with parallel programming, here is a tutorial that may be of interest:
|
||||
<a href="http://www.nersc.gov/users/training/online-tutorials/introduction-to-scientific-i-o/?show_all=1">Tutorial on HDF5 I/O tuning at NERSC</a>
|
||||
|
||||
Some of the terms that you must understand in this tutorial are:
|
||||
<ul>
|
||||
<li>
|
||||
<strong>MPI Communicator</strong>
|
||||
Allows a group of processes to communicate with each other.
|
||||
|
||||
Following are the MPI routines for initializing MPI and the communicator and finalizing a session with MPI:
|
||||
<table>
|
||||
<tr>
|
||||
<th>C</th>
|
||||
<th>Fortran</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>MPI_Init</td>
|
||||
<td>MPI_INIT</td>
|
||||
<td>Initialize MPI (MPI_COMM_WORLD usually)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>MPI_Comm_size</td>
|
||||
<td>MPI_COMM_SIZE</td>
|
||||
<td>Define how many processes are contained in the communicator</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>MPI_Comm_rank</td>
|
||||
<td>MPI_COMM_RANK</td>
|
||||
<td>Define the process ID number within the communicator (from 0 to n-1)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>MPI_Finalize</td>
|
||||
<td>MPI_FINALIZE</td>
|
||||
<td>Exiting MPI</td>
|
||||
</tr>
|
||||
</table>
|
||||
</li>
|
||||
<li>
|
||||
<strong>Collective</strong>
|
||||
MPI defines this to mean all processes of the communicator must participate in the right order.
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
Parallel HDF5 opens a parallel file with a communicator. It returns a file handle to be used for future access to the file.
|
||||
|
||||
All processes are required to participate in the collective Parallel HDF5 API. Different files can be opened using different communicators.
|
||||
|
||||
Examples of what you can do with the Parallel HDF5 collective API:
|
||||
\li File Operation: Create, open and close a file
|
||||
\li Object Creation: Create, open, and close a dataset
|
||||
\li Object Structure: Extend a dataset (increase dimension sizes)
|
||||
\li Dataset Operations: Write to or read from a dataset
|
||||
(Array data transfer can be collective or independent.)
|
||||
|
||||
Once a file is opened by the processes of a communicator:
|
||||
\li All parts of the file are accessible by all processes.
|
||||
\li All objects in the file are accessible by all processes.
|
||||
\li Multiple processes write to the same dataset.
|
||||
\li Each process writes to an individual dataset.
|
||||
|
||||
Please refer to the Supported Configuration Features Summary in the release notes for the current release
|
||||
of HDF5 for an up-to-date list of the platforms that we support Parallel HDF5 on.
|
||||
|
||||
|
||||
\subsection subsec_pintro_create_file Creating and Accessing a File with PHDF5
|
||||
The programming model for creating and accessing a file is as follows:
|
||||
<ol>
|
||||
<li>Set up an access template object to control the file access mechanism.</li>
|
||||
<li>Open the file.</li>
|
||||
<li>Close the file.</li>
|
||||
</ol>
|
||||
|
||||
Each process of the MPI communicator creates an access template and sets it up with MPI parallel
|
||||
access information. This is done with the #H5Pcreate call to obtain the file access property list
|
||||
and the #H5Pset_fapl_mpio call to set up parallel I/O access.
|
||||
|
||||
Following is example code for creating an access template in HDF5:
|
||||
<em>C</em>
|
||||
\code
|
||||
23 MPI_Comm comm = MPI_COMM_WORLD;
|
||||
24 MPI_Info info = MPI_INFO_NULL;
|
||||
25
|
||||
26 /*
|
||||
27 * Initialize MPI
|
||||
28 */
|
||||
29 MPI_Init(&argc, &argv);
|
||||
30 MPI_Comm_size(comm, &mpi_size);
|
||||
31 MPI_Comm_rank(comm, &mpi_rank);
|
||||
32
|
||||
33 /*
|
||||
34 * Set up file access property list with parallel I/O access
|
||||
35 */
|
||||
36 plist_id = H5Pcreate(H5P_FILE_ACCESS); 37 H5Pset_fapl_mpio(plist_id, comm, info);
|
||||
\endcode
|
||||
|
||||
<em>Fortran</em>
|
||||
\code
|
||||
23 comm = MPI_COMM_WORLD
|
||||
24 info = MPI_INFO_NULL
|
||||
25
|
||||
26 CALL MPI_INIT(mpierror)
|
||||
27 CALL MPI_COMM_SIZE(comm, mpi_size, mpierror)
|
||||
28 CALL MPI_COMM_RANK(comm, mpi_rank, mpierror)
|
||||
29 !
|
||||
30 ! Initialize FORTRAN interface
|
||||
31 !
|
||||
32 CALL h5open_f(error)
|
||||
33
|
||||
34 !
|
||||
35 ! Setup file access property list with parallel I/O access.
|
||||
36 !
|
||||
37 CALL h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, error) 38 CALL h5pset_fapl_mpio_f(plist_id, comm, info, error)
|
||||
\endcode
|
||||
|
||||
The following example programs create an HDF5 file using Parallel HDF5:
|
||||
<a href="https://github.com/HDFGroup/hdf5-examples/blob/master/C/H5Parallel/ph5_file_create.c">C: file_create.c</a>
|
||||
<a href="https://github.com/HDFGroup/hdf5-examples/blob/master/Fortran/H5Parallel/ph5_f90_file_create.F90">F90: file_create.F90</a>
|
||||
|
||||
|
||||
\subsection subsec_pintro_create_dset Creating and Accessing a Dataset with PHDF5
|
||||
The programming model for creating and accessing a dataset is as follows:
|
||||
<ol>
|
||||
<li>
|
||||
Create or open a Parallel HDF5 file with a collective call to:
|
||||
#H5Dcreate
|
||||
#H5Dopen
|
||||
</li>
|
||||
<li>
|
||||
Obtain a copy of the file transfer property list and set it to use collective or independent I/O.
|
||||
<ul>
|
||||
<li>
|
||||
Do this by first passing a data transfer property list class type to: #H5Pcreate
|
||||
</li>
|
||||
<li>
|
||||
Then set the data transfer mode to either use independent I/O access or to use collective I/O, with a call to: #H5Pset_dxpl_mpio
|
||||
|
||||
Following are the parameters required by this call:
|
||||
<em>C</em>
|
||||
\code
|
||||
herr_t H5Pset_dxpl_mpio (hid_t dxpl_id, H5FD_mpio_xfer_t xfer_mode )
|
||||
dxpl_id IN: Data transfer property list identifier
|
||||
xfer_mode IN: Transfer mode:
|
||||
H5FD_MPIO_INDEPENDENT - use independent I/O access
|
||||
(default)
|
||||
H5FD_MPIO_COLLECTIVE - use collective I/O access
|
||||
\endcode
|
||||
|
||||
<em>Fortran</em>
|
||||
\code
|
||||
h5pset_dxpl_mpi_f (prp_id, data_xfer_mode, hdferr)
|
||||
prp_id IN: Property List Identifier (INTEGER (HID_T))
|
||||
data_xfer_mode IN: Data transfer mode (INTEGER)
|
||||
H5FD_MPIO_INDEPENDENT_F (0)
|
||||
H5FD_MPIO_COLLECTIVE_F (1)
|
||||
hdferr IN: Error code (INTEGER)
|
||||
\endcode
|
||||
</li>
|
||||
<li>
|
||||
Access the dataset with the defined transfer property list.
|
||||
All processes that have opened a dataset may do collective I/O. Each process may do an independent
|
||||
and arbitrary number of data I/O access calls, using:
|
||||
#H5Dwrite
|
||||
#H5Dread
|
||||
|
||||
If a dataset is unlimited, you can extend it with a collective call to: #H5Dextend
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ol>
|
||||
|
||||
The following code demonstrates a collective write using Parallel HDF5:
|
||||
<em>C</em>
|
||||
\code
|
||||
95 /*
|
||||
96 * Create property list for collective dataset write.
|
||||
97 */
|
||||
98 plist_id = H5Pcreate (H5P_DATASET_XFER); 99 H5Pset_dxpl_mpio (plist_id, H5FD_MPIO_COLLECTIVE);
|
||||
100
|
||||
101 status = H5Dwrite (dset_id, H5T_NATIVE_INT, memspace, filespace,
|
||||
102 plist_id, data);
|
||||
\endcode
|
||||
|
||||
<em>Fortran</em>
|
||||
\code
|
||||
108 ! Create property list for collective dataset write
|
||||
109 !
|
||||
110 CALL h5pcreate_f (H5P_DATASET_XFER_F, plist_id, error) 111 CALL h5pset_dxpl_mpio_f (plist_id, H5FD_MPIO_COLLECTIVE_F, error)
|
||||
112
|
||||
113 !
|
||||
114 ! Write the dataset collectively.
|
||||
115 !
|
||||
116 CALL h5dwrite_f (dset_id, H5T_NATIVE_INTEGER, data, dimsfi, error, &
|
||||
117 file_space_id = filespace, mem_space_id = memspace, xfer_prp = plist_id)
|
||||
\endcode
|
||||
|
||||
The following example programs create an HDF5 dataset using Parallel HDF5:
|
||||
<a href="https://github.com/HDFGroup/hdf5-examples/blob/master/C/H5Parallel/ph5_dataset.c">C: dataset.c</a>
|
||||
<a href="https://github.com/HDFGroup/hdf5-examples/blob/master/Fortran/H5Parallel/ph5_f90_dataset.F90">F90: dataset.F90</a>
|
||||
|
||||
|
||||
\subsubsection subsec_pintro_hyperslabs Hyperslabs
|
||||
The programming model for writing and reading hyperslabs is:
|
||||
/li Each process defines the memory and file hyperslabs.
|
||||
/li Each process executes a partial write/read call which is either collective or independent.
|
||||
|
||||
The memory and file hyperslabs in the first step are defined with the #H5Sselect_hyperslab.
|
||||
|
||||
The start (or offset), count, stride, and block parameters define the portion of the dataset
|
||||
to write to. By changing the values of these parameters you can write hyperslabs with Parallel
|
||||
HDF5 by contiguous hyperslab, by regularly spaced data in a column/row, by patterns, and by chunks:
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
\li @subpage IntroParContHyperslab
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
\li @subpage IntroParRegularSpaced
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
\li @subpage IntroParPattern
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
\li @subpage IntroParChunk
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
|
||||
<hr>
|
||||
Navigate back: \ref index "Main" / \ref GettingStarted
|
||||
|
||||
*/
|
@ -642,7 +642,7 @@ See the programming example for an illustration of the use of these calls.
|
||||
\subsection subsecLBDsetCreateContent File Contents
|
||||
The contents of the file dset.h5 (dsetf.h5 for FORTRAN) are shown below:
|
||||
<table>
|
||||
<caption>Contents of dset.h5 ( dsetf.h5)</caption>
|
||||
<caption>Contents of dset.h5 (dsetf.h5)</caption>
|
||||
<tr>
|
||||
<td>
|
||||
\image html imgLBDsetCreate.gif
|
||||
|
@ -126,7 +126,7 @@ HDF5 Release 1.10
|
||||
<li> \ref subsubsec_dataset_program_transfer
|
||||
<li> \ref subsubsec_dataset_program_read
|
||||
</ul>
|
||||
\li \ref subsec_dataset_transfer Data Transfer
|
||||
\li \ref subsec_dataset_transfer
|
||||
<ul>
|
||||
<li> \ref subsubsec_dataset_transfer_pipe
|
||||
<li> \ref subsubsec_dataset_transfer_filter
|
||||
|
BIN
doxygen/img/pchunk_figa.gif
Normal file
After Width: | Height: | Size: 2.7 KiB |
BIN
doxygen/img/pchunk_figb.gif
Normal file
After Width: | Height: | Size: 2.0 KiB |
BIN
doxygen/img/pchunk_figc.gif
Normal file
After Width: | Height: | Size: 3.1 KiB |
BIN
doxygen/img/pchunk_figd.gif
Normal file
After Width: | Height: | Size: 2.9 KiB |
BIN
doxygen/img/pcont_hy_figa.gif
Normal file
After Width: | Height: | Size: 3.1 KiB |
BIN
doxygen/img/pcont_hy_figb.gif
Normal file
After Width: | Height: | Size: 2.4 KiB |
BIN
doxygen/img/pcont_hy_figc.gif
Normal file
After Width: | Height: | Size: 3.6 KiB |
BIN
doxygen/img/pcont_hy_figd.gif
Normal file
After Width: | Height: | Size: 2.7 KiB |
BIN
doxygen/img/ppatt_figa.gif
Normal file
After Width: | Height: | Size: 2.3 KiB |
BIN
doxygen/img/ppatt_figb.gif
Normal file
After Width: | Height: | Size: 2.4 KiB |
BIN
doxygen/img/ppatt_figc.gif
Normal file
After Width: | Height: | Size: 2.6 KiB |
BIN
doxygen/img/ppatt_figd.gif
Normal file
After Width: | Height: | Size: 2.4 KiB |
BIN
doxygen/img/preg_figa.gif
Normal file
After Width: | Height: | Size: 2.3 KiB |
BIN
doxygen/img/preg_figb.gif
Normal file
After Width: | Height: | Size: 2.0 KiB |
BIN
doxygen/img/preg_figc.gif
Normal file
After Width: | Height: | Size: 3.2 KiB |
BIN
doxygen/img/preg_figd.gif
Normal file
After Width: | Height: | Size: 2.3 KiB |
@ -419,28 +419,32 @@ else ()
|
||||
)
|
||||
endif ()
|
||||
|
||||
set (mod_export_files
|
||||
h5fortran_types.mod
|
||||
hdf5.mod
|
||||
h5fortkit.mod
|
||||
h5global.mod
|
||||
h5a.mod
|
||||
h5d.mod
|
||||
h5e.mod
|
||||
h5f.mod
|
||||
h5g.mod
|
||||
h5i.mod
|
||||
h5l.mod
|
||||
h5lib.mod
|
||||
h5o.mod
|
||||
h5p.mod
|
||||
h5r.mod
|
||||
h5s.mod
|
||||
h5t.mod
|
||||
h5z.mod
|
||||
h5_gen.mod
|
||||
)
|
||||
|
||||
if (BUILD_STATIC_LIBS)
|
||||
set (mod_files
|
||||
${MOD_BUILD_DIR}/h5fortran_types.mod
|
||||
${MOD_BUILD_DIR}/hdf5.mod
|
||||
${MOD_BUILD_DIR}/h5fortkit.mod
|
||||
${MOD_BUILD_DIR}/h5global.mod
|
||||
${MOD_BUILD_DIR}/h5a.mod
|
||||
${MOD_BUILD_DIR}/h5d.mod
|
||||
${MOD_BUILD_DIR}/h5e.mod
|
||||
${MOD_BUILD_DIR}/h5f.mod
|
||||
${MOD_BUILD_DIR}/h5g.mod
|
||||
${MOD_BUILD_DIR}/h5i.mod
|
||||
${MOD_BUILD_DIR}/h5l.mod
|
||||
${MOD_BUILD_DIR}/h5lib.mod
|
||||
${MOD_BUILD_DIR}/h5o.mod
|
||||
${MOD_BUILD_DIR}/h5p.mod
|
||||
${MOD_BUILD_DIR}/h5r.mod
|
||||
${MOD_BUILD_DIR}/h5s.mod
|
||||
${MOD_BUILD_DIR}/h5t.mod
|
||||
${MOD_BUILD_DIR}/h5z.mod
|
||||
${MOD_BUILD_DIR}/h5_gen.mod
|
||||
)
|
||||
foreach (mod_file ${mod_export_files})
|
||||
set (mod_files ${mod_files} ${MOD_BUILD_DIR}/${mod_file})
|
||||
endforeach ()
|
||||
install (
|
||||
FILES
|
||||
${mod_files}
|
||||
@ -462,27 +466,9 @@ if (BUILD_STATIC_LIBS)
|
||||
endif ()
|
||||
|
||||
if (BUILD_SHARED_LIBS)
|
||||
set (modsh_files
|
||||
${MODSH_BUILD_DIR}/h5fortran_types.mod
|
||||
${MODSH_BUILD_DIR}/hdf5.mod
|
||||
${MODSH_BUILD_DIR}/h5fortkit.mod
|
||||
${MODSH_BUILD_DIR}/h5global.mod
|
||||
${MODSH_BUILD_DIR}/h5a.mod
|
||||
${MODSH_BUILD_DIR}/h5d.mod
|
||||
${MODSH_BUILD_DIR}/h5e.mod
|
||||
${MODSH_BUILD_DIR}/h5f.mod
|
||||
${MODSH_BUILD_DIR}/h5g.mod
|
||||
${MODSH_BUILD_DIR}/h5i.mod
|
||||
${MODSH_BUILD_DIR}/h5l.mod
|
||||
${MODSH_BUILD_DIR}/h5lib.mod
|
||||
${MODSH_BUILD_DIR}/h5o.mod
|
||||
${MODSH_BUILD_DIR}/h5p.mod
|
||||
${MODSH_BUILD_DIR}/h5r.mod
|
||||
${MODSH_BUILD_DIR}/h5s.mod
|
||||
${MODSH_BUILD_DIR}/h5t.mod
|
||||
${MODSH_BUILD_DIR}/h5z.mod
|
||||
${MODSH_BUILD_DIR}/h5_gen.mod
|
||||
)
|
||||
foreach (mod_file ${mod_export_files})
|
||||
set (modsh_files ${modsh_files} ${MODSH_BUILD_DIR}/${mod_file})
|
||||
endforeach ()
|
||||
install (
|
||||
FILES
|
||||
${modsh_files}
|
||||
|
@ -235,16 +235,19 @@ endif ()
|
||||
# Add file(s) to CMake Install
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
set (mod_export_files
|
||||
h5ds.mod
|
||||
h5tb.mod
|
||||
h5tb_const.mod
|
||||
h5lt.mod
|
||||
h5lt_const.mod
|
||||
h5im.mod
|
||||
)
|
||||
|
||||
if (BUILD_STATIC_LIBS)
|
||||
set (mod_files
|
||||
${MOD_BUILD_DIR}/h5ds.mod
|
||||
${MOD_BUILD_DIR}/h5tb.mod
|
||||
${MOD_BUILD_DIR}/h5tb_const.mod
|
||||
${MOD_BUILD_DIR}/h5lt.mod
|
||||
${MOD_BUILD_DIR}/h5lt_const.mod
|
||||
${MOD_BUILD_DIR}/h5im.mod
|
||||
)
|
||||
foreach (mod_file ${mod_export_files})
|
||||
set (mod_files ${mod_files} ${MOD_BUILD_DIR}/${mod_file})
|
||||
endforeach ()
|
||||
install (
|
||||
FILES
|
||||
${mod_files}
|
||||
@ -265,14 +268,9 @@ if (BUILD_STATIC_LIBS)
|
||||
endif ()
|
||||
endif ()
|
||||
if (BUILD_SHARED_LIBS)
|
||||
set (modsh_files
|
||||
${MODSH_BUILD_DIR}/h5ds.mod
|
||||
${MODSH_BUILD_DIR}/h5tb.mod
|
||||
${MODSH_BUILD_DIR}/h5tb_const.mod
|
||||
${MODSH_BUILD_DIR}/h5lt.mod
|
||||
${MODSH_BUILD_DIR}/h5lt_const.mod
|
||||
${MODSH_BUILD_DIR}/h5im.mod
|
||||
)
|
||||
foreach (mod_file ${mod_export_files})
|
||||
set (modsh_files ${modsh_files} ${MODSH_BUILD_DIR}/${mod_file})
|
||||
endforeach ()
|
||||
install (
|
||||
FILES
|
||||
${modsh_files}
|
||||
|
@ -80,7 +80,7 @@ endforeach ()
|
||||
|
||||
if (BUILD_TESTING AND HDF5_TEST_EXAMPLES AND HDF5_TEST_SERIAL)
|
||||
get_property (target_name TARGET ${HDF5_JAVA_JNI_LIB_TARGET} PROPERTY OUTPUT_NAME)
|
||||
set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=${target_name}$<$<CONFIG:Debug>:${CMAKE_DEBUG_POSTFIX}>;")
|
||||
set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=${target_name}$<$<OR:$<CONFIG:Debug>,$<CONFIG:Developer>>:${CMAKE_DEBUG_POSTFIX}>;")
|
||||
|
||||
set (last_test "")
|
||||
foreach (example ${HDF_JAVA_EXAMPLES})
|
||||
|
@ -309,7 +309,7 @@ public class TestH5 {
|
||||
@Test
|
||||
public void testH5get_libversion()
|
||||
{
|
||||
int libversion[] = {1, 10, 9};
|
||||
int libversion[] = {1, 10, 11};
|
||||
|
||||
try {
|
||||
H5.H5get_libversion(libversion);
|
||||
|
@ -1308,17 +1308,7 @@ endif ()
|
||||
# Option to build documentation
|
||||
#-----------------------------------------------------------------------------
|
||||
if (DOXYGEN_FOUND)
|
||||
# This cmake function requires that the non-default doxyfile settings are provided with set (DOXYGEN_xxx) commands
|
||||
# In addition the doxyfile aliases @INCLUDE option is not supported and would need to be provided in a set (DOXYGEN_ALIASES) command.
|
||||
# doxygen_add_docs (hdf5lib_doc
|
||||
## ${common_SRCS} ${shared_gen_SRCS} ${H5_PUBLIC_HEADERS} ${H5_PRIVATE_HEADERS} ${H5_GENERATED_HEADERS} ${HDF5_DOXYGEN_DIR}/dox
|
||||
# ${DOXYGEN_INPUT_DIRECTORY}
|
||||
# ALL
|
||||
# WORKING_DIRECTORY ${HDF5_SRC_DIR}
|
||||
# COMMENT "Generating HDF5 library Source Documentation"
|
||||
# )
|
||||
|
||||
# This custom target and doxygen/configure work together
|
||||
# This custom target and doxygen/configure work together
|
||||
# Replace variables inside @@ with the current values
|
||||
add_custom_target (hdf5lib_doc ALL
|
||||
COMMAND ${DOXYGEN_EXECUTABLE} ${HDF5_BINARY_DIR}/Doxyfile
|
||||
|