mirror of
https://github.com/HDFGroup/hdf5.git
synced 2024-11-27 02:10:55 +08:00
[svn-r2753]
Purpose: Adding F90 || tests. Description: I created testpar directory with the following files ptesthdf5_fortran.f90 - F90 test driver program tcheck.f90 thdf5.f90 - handy subroutines to use with the tests thyperslab_wr.f90 - F90 test: collectively writes and reads hyperslabs to/from the dataset Solution: Platforms tested: Partially tested on modi4 (O2K)
This commit is contained in:
parent
d26db0af91
commit
76807a6985
64
fortran/testpar/ptesthdf5_fortran.f90
Normal file
64
fortran/testpar/ptesthdf5_fortran.f90
Normal file
@ -0,0 +1,64 @@
|
||||
!
|
||||
! Main program for parallel HDF5 Fortran tests.
|
||||
|
||||
PROGRAM PHDF5F90TEST
|
||||
|
||||
USE HDF5 ! This module contains all necessary modules
|
||||
USE THDF5
|
||||
USE MPI
|
||||
|
||||
IMPLICIT NONE
|
||||
INTEGER :: error ! Error flags
|
||||
INTEGER :: error_1 = 0 ! Error flags
|
||||
!
|
||||
! MPI definitions and calls.
|
||||
!
|
||||
INTEGER :: mpierror ! MPI error flag
|
||||
INTEGER :: comm, info
|
||||
INTEGER :: mpi_size, mpi_rank
|
||||
comm = MPI_COMM_WORLD
|
||||
info = MPI_INFO_NULL
|
||||
CALL MPI_INIT(mpierror)
|
||||
CALL MPI_COMM_SIZE(comm, mpi_size, mpierror)
|
||||
CALL MPI_COMM_RANK(comm, mpi_rank, mpierror)
|
||||
!
|
||||
! Check that datasets can be divided into equal parts by the processes.
|
||||
!
|
||||
if ( (mod(DIM1, mpi_size) .ne. 0) .or. (mod(DIM2, mpi_size) .ne. 0)) then
|
||||
if (mpi_rank .eq. 0) then
|
||||
write(*,*) "Dimensions must be mupltiples of # of processors"
|
||||
write(*,*) "Exiting..."
|
||||
goto 1000
|
||||
endif
|
||||
endif
|
||||
!
|
||||
! Initialize FORTRAN predefined datatypes
|
||||
!
|
||||
CALL h5init_types_f(error)
|
||||
if (mpi_rank .eq. 0) then
|
||||
write(*,*) '==========================================='
|
||||
write(*,*) ' Parallel Fortran Tests '
|
||||
write(*,*) '==========================================='
|
||||
write(*,*)
|
||||
endif
|
||||
if (mpi_rank .eq. 0) then
|
||||
write(*,*) 'Writing/reading dataset by hyperslabs'
|
||||
endif
|
||||
CALL dataset_wr_by_hyperslabs(error_1)
|
||||
if (error_1 .ne. 0 ) write(*,*) 'Process ', mpi_rank, 'reports failure'
|
||||
if (mpi_rank .eq. 0) then
|
||||
write(*,*)
|
||||
write(*,*) '==========================================='
|
||||
write(*,*) ' Parallel Fortran Tests finished '
|
||||
write(*,*) '==========================================='
|
||||
endif
|
||||
!
|
||||
! Close FORTRAN predefined datatypes.
|
||||
!
|
||||
CALL h5close_types_f(error)
|
||||
|
||||
1000 continue
|
||||
|
||||
CALL MPI_FINALIZE(mpierror)
|
||||
|
||||
END PROGRAM PHDF5F90TEST
|
16
fortran/testpar/tcheck.f90
Normal file
16
fortran/testpar/tcheck.f90
Normal file
@ -0,0 +1,16 @@
|
||||
!
|
||||
!
|
||||
! This module contains check subroutine which is used in
|
||||
! all the fortran h5 test files
|
||||
!
|
||||
|
||||
SUBROUTINE check(string,error,total_error)
|
||||
CHARACTER(LEN=*) :: string
|
||||
INTEGER :: error, total_error
|
||||
if (error .lt. 0) then
|
||||
total_error=total_error+1
|
||||
write(*,*) string, " failed"
|
||||
endif
|
||||
RETURN
|
||||
END SUBROUTINE check
|
||||
|
10
fortran/testpar/thdf5.f90
Normal file
10
fortran/testpar/thdf5.f90
Normal file
@ -0,0 +1,10 @@
|
||||
MODULE THDF5
|
||||
USE HDF5
|
||||
USE MPI
|
||||
|
||||
! Dataset dimensions
|
||||
|
||||
INTEGER, PARAMETER :: DIM1 = 64, &
|
||||
DIM2 = 128
|
||||
|
||||
END MODULE THDF5
|
210
fortran/testpar/thyperslab_wr.f90
Normal file
210
fortran/testpar/thyperslab_wr.f90
Normal file
@ -0,0 +1,210 @@
|
||||
!
|
||||
! This test writes/reads dataset by hyperslabs collectively.
|
||||
|
||||
SUBROUTINE dataset_wr_by_hyperslabs(total_error)
|
||||
USE THDF5
|
||||
IMPLICIT NONE
|
||||
|
||||
CHARACTER(LEN=7), PARAMETER :: filename = "sdsf.h5" ! File name
|
||||
CHARACTER(LEN=8), PARAMETER :: dsetname = "IntArray" ! Dataset name
|
||||
|
||||
INTEGER(HID_T) :: file_id ! File identifier
|
||||
INTEGER(HID_T) :: dset_id ! Dataset identifier
|
||||
INTEGER(HID_T) :: filespace ! Dataspace identifier in file
|
||||
INTEGER(HID_T) :: memspace ! Dataspace identifier in memory
|
||||
INTEGER(HID_T) :: plist_id ! Property list identifier
|
||||
|
||||
INTEGER(HSIZE_T), DIMENSION(2) :: dimsf = (/DIM1,DIM2/) ! Dataset dimensions.
|
||||
|
||||
INTEGER(HSIZE_T), DIMENSION(2) :: count
|
||||
INTEGER(HSIZE_T), DIMENSION(2) :: offset
|
||||
INTEGER, ALLOCATABLE :: data (:,:) ! Data to write
|
||||
INTEGER :: rank = 2 ! Dataset rank
|
||||
|
||||
INTEGER :: total_error, error ! Error flags
|
||||
!
|
||||
! MPI definitions and calls.
|
||||
!
|
||||
INTEGER :: mpierror ! MPI error flag
|
||||
INTEGER :: comm, info
|
||||
INTEGER :: mpi_size, mpi_rank
|
||||
comm = MPI_COMM_WORLD
|
||||
info = MPI_INFO_NULL
|
||||
CALL MPI_COMM_SIZE(comm, mpi_size, mpierror)
|
||||
CALL MPI_COMM_RANK(comm, mpi_rank, mpierror)
|
||||
!
|
||||
! Setup file access property list with parallel I/O access.
|
||||
!
|
||||
CALL h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, error)
|
||||
CALL check("h5pcreate_f", error, total_error)
|
||||
CALL h5pset_fapl_mpio_f(plist_id, comm, info, error)
|
||||
CALL check("h5pset_fapl_mpio_f", error, total_error)
|
||||
|
||||
!
|
||||
! Create the file collectively.
|
||||
!
|
||||
CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, error, access_prp = plist_id)
|
||||
CALL check("h5fcreate_f", error, total_error)
|
||||
CALL h5pclose_f(plist_id, error)
|
||||
CALL check("h5pclose_f", error, total_error)
|
||||
!
|
||||
! Create the data space for the dataset.
|
||||
!
|
||||
CALL h5screate_simple_f(rank, dimsf, filespace, error)
|
||||
CALL check("h5screate_simple_f", error, total_error)
|
||||
|
||||
!
|
||||
! Create the dataset with default properties.
|
||||
!
|
||||
CALL h5dcreate_f(file_id, dsetname, H5T_NATIVE_INTEGER, filespace, &
|
||||
dset_id, error)
|
||||
CALL check("h5dcreate_f", error, total_error)
|
||||
CALL h5sclose_f(filespace, error)
|
||||
CALL check("h5sclose_f", error, total_error)
|
||||
!
|
||||
! Each process defines dataset in memory and writes it to the hyperslab
|
||||
! in the file.
|
||||
!
|
||||
count(1) = dimsf(1)
|
||||
count(2) = dimsf(2)/mpi_size
|
||||
offset(1) = 0
|
||||
offset(2) = mpi_rank * count(2)
|
||||
CALL h5screate_simple_f(rank, count, memspace, error)
|
||||
CALL check("h5screate_simple_f", error, total_error)
|
||||
!
|
||||
! Select hyperslab in the file.
|
||||
!
|
||||
CALL h5dget_space_f(dset_id, filespace, error)
|
||||
CALL check("h5dget_space_f", error, total_error)
|
||||
CALL h5sselect_hyperslab_f (filespace, H5S_SELECT_SET_F, offset, count, error)
|
||||
CALL check("h5sselect_hyperslab_f", error, total_error)
|
||||
!
|
||||
! Initialize data buffer with trivial data.
|
||||
!
|
||||
ALLOCATE ( data(count(1),count(2)))
|
||||
data = mpi_rank + 10
|
||||
!
|
||||
! Create property list for collective dataset write
|
||||
!
|
||||
CALL h5pcreate_f(H5P_DATASET_XFER_F, plist_id, error)
|
||||
CALL check("h5pcreate_f", error, total_error)
|
||||
CALL h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, error)
|
||||
CALL check("h5pset_dxpl_mpio_f", error, total_error)
|
||||
|
||||
!
|
||||
! Write the dataset collectively.
|
||||
!
|
||||
CALL h5dwrite_f(dset_id, H5T_NATIVE_INTEGER, data, error, &
|
||||
file_space_id = filespace, mem_space_id = memspace, xfer_prp = plist_id)
|
||||
CALL check("h5dwrite_f", error, total_error)
|
||||
!
|
||||
! Deallocate data buffer.
|
||||
!
|
||||
DEALLOCATE(data)
|
||||
|
||||
!
|
||||
! Close dataspaces.
|
||||
!
|
||||
CALL h5sclose_f(filespace, error)
|
||||
CALL check("h5sclose_f", error, total_error)
|
||||
CALL h5sclose_f(memspace, error)
|
||||
CALL check("h5sclose_f", error, total_error)
|
||||
|
||||
!
|
||||
! Close the dataset.
|
||||
!
|
||||
CALL h5dclose_f(dset_id, error)
|
||||
CALL check("h5dclose_f", error, total_error)
|
||||
|
||||
CALL h5pclose_f(plist_id, error)
|
||||
CALL check("h5pclose_f", error, total_error)
|
||||
|
||||
!
|
||||
! Close the file.
|
||||
!
|
||||
CALL h5fclose_f(file_id, error)
|
||||
CALL check("h5fclose_f", error, total_error)
|
||||
!
|
||||
! Reopen the file with || access.
|
||||
!
|
||||
CALL h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, error)
|
||||
CALL check("h5pcreate_f", error, total_error)
|
||||
CALL h5pset_fapl_mpio_f(plist_id, comm, info, error)
|
||||
CALL check("h5pset_fapl_mpio_f", error, total_error)
|
||||
CALL h5fopen_f(filename, H5F_ACC_RDWR_F, file_id, plist_id)
|
||||
CALL check("h5fopen_f", error, total_error)
|
||||
CALL h5pclose_f(plist_id, error)
|
||||
CALL check("h5pclose_f", error, total_error)
|
||||
!
|
||||
! Open dataset.
|
||||
!
|
||||
CALL h5dopen_f(file_id, dsetname, dset_id, error)
|
||||
CALL check("h5dopen_f", error, total_error)
|
||||
|
||||
!
|
||||
! Each process defines dataset in memory and reads hyperslab
|
||||
! from the file.
|
||||
!
|
||||
count(1) = dimsf(1)
|
||||
count(2) = dimsf(2)/mpi_size
|
||||
offset(1) = 0
|
||||
offset(2) = mpi_rank * count(2)
|
||||
CALL h5screate_simple_f(rank, count, memspace, error)
|
||||
CALL check("h5screate_simple_f", error, total_error)
|
||||
!
|
||||
! Select hyperslab in the file.
|
||||
!
|
||||
CALL h5dget_space_f(dset_id, filespace, error)
|
||||
CALL check("h5dget_space_f", error, total_error)
|
||||
CALL h5sselect_hyperslab_f (filespace, H5S_SELECT_SET_F, offset, count, error)
|
||||
CALL check("h5sselect_hyperslab_f", error, total_error)
|
||||
!
|
||||
! Allocate data buffer.
|
||||
!
|
||||
ALLOCATE ( data(count(1),count(2)))
|
||||
ALLOCATE ( data_out(count(1),count(2)))
|
||||
data = mpi_rank + 10
|
||||
!
|
||||
! Create property list for collective dataset write
|
||||
!
|
||||
CALL h5pcreate_f(H5P_DATASET_XFER_F, plist_id, error)
|
||||
CALL check("h5pcreate_f", error, total_error)
|
||||
CALL h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, error)
|
||||
CALL check("h5pset_dxpl_mpio_f", error, total_error)
|
||||
|
||||
!
|
||||
! Write the dataset collectively.
|
||||
!
|
||||
CALL h5dread_f(dset_id, H5T_NATIVE_INTEGER, data_out, error, &
|
||||
file_space_id = filespace, mem_space_id = memspace, xfer_prp = plist_id)
|
||||
CALL check("h5dread_f", error, total_error)
|
||||
if( data .ne .data_out) total_error = total_error + 1
|
||||
!
|
||||
! Deallocate data buffer.
|
||||
!
|
||||
DEALLOCATE(data)
|
||||
DEALLOCATE(data_out)
|
||||
!
|
||||
! Close dataspaces.
|
||||
!
|
||||
CALL h5sclose_f(filespace, error)
|
||||
CALL check("h5sclose_f", error, total_error)
|
||||
CALL h5sclose_f(memspace, error)
|
||||
CALL check("h5sclose_f", error, total_error)
|
||||
!
|
||||
! Close property list.
|
||||
!
|
||||
CALL h5pclose_f(plist_id, error)
|
||||
CALL check("h5pclose_f", error, total_error)
|
||||
!
|
||||
! Close dataset.
|
||||
!
|
||||
CALL h5dclose_f(dset_id, error)
|
||||
CALL check("h5dclose_f", error, total_error)
|
||||
!
|
||||
! Close the file.
|
||||
!
|
||||
CALL h5fclose_f(file_id, error)
|
||||
CALL check("h5fclose_f", error, total_error)
|
||||
|
||||
END SUBROUTINE dataset_wr_by_hyperslabs
|
Loading…
Reference in New Issue
Block a user