mirror of
https://github.com/Unidata/netcdf-c.git
synced 2025-02-17 16:50:18 +08:00
added F77 parallel test and clean up a little
This commit is contained in:
parent
d469f39e3c
commit
414b6fefb2
@ -2,7 +2,6 @@
|
||||
|
||||
# This shell runs some parallel I/O tests.
|
||||
|
||||
mpd&
|
||||
set -e
|
||||
echo
|
||||
echo "Testing MPI parallel I/O without netCDF..."
|
||||
|
@ -25,5 +25,4 @@ for tst in $list; do
|
||||
done
|
||||
|
||||
echo "SUCCESS!!!"
|
||||
|
||||
exit 0
|
||||
|
@ -145,8 +145,9 @@ endif #BUILD_F90
|
||||
# Test parallel I/O for F77.
|
||||
if TEST_F77
|
||||
if TEST_PARALLEL
|
||||
check_PROGRAMS += ftst_parallel
|
||||
check_PROGRAMS += ftst_parallel ftst_parallel_nasa
|
||||
ftst_parallel_SOURCES = ftst_parallel.F
|
||||
ftst_parallel_nasa_SOURCES = ftst_parallel_nasa.F
|
||||
TESTS += run_f77_par_test.sh
|
||||
CLEANFILES += ftst_parallel.nc
|
||||
endif # TEST_PARALLEL
|
||||
|
142
nf_test/ftst_parallel_nasa.F
Normal file
142
nf_test/ftst_parallel_nasa.F
Normal file
@ -0,0 +1,142 @@
|
||||
! This is part of the netCDF package. Copyright 2011 University
|
||||
! Corporation for Atmospheric Research/Unidata. See COPYRIGHT file
|
||||
! for conditions of use.
|
||||
|
||||
! This program tests netCDF-4 parallel I/O from fortran. This
|
||||
! variation of the test was contributed by Dan at NASA. Thanks Dan!
|
||||
|
||||
program ftst_parallel_nasa
|
||||
implicit none
|
||||
include 'netcdf.inc'
|
||||
include 'mpif.h'
|
||||
|
||||
character*(*) FILE_NAME
|
||||
parameter (FILE_NAME = 'ftst_parallel_nasa.nc')
|
||||
|
||||
integer MAX_DIMS
|
||||
parameter (MAX_DIMS = 2)
|
||||
integer NX, NY
|
||||
parameter (NX = 16)
|
||||
parameter (NY = 16)
|
||||
integer NUM_PROC
|
||||
parameter (NUM_PROC = 4)
|
||||
integer ncid, varid, dimids(MAX_DIMS)
|
||||
integer x_dimid, y_dimid
|
||||
real data_out(NY / 2, NX / 2), data_in(NY / 2, NX / 2)
|
||||
integer mode_flag
|
||||
integer x, y, retval
|
||||
integer p, my_rank, ierr
|
||||
integer start(MAX_DIMS), count(MAX_DIMS)
|
||||
|
||||
call MPI_Init(ierr)
|
||||
call MPI_Comm_rank(MPI_COMM_WORLD, my_rank, ierr)
|
||||
call MPI_Comm_size(MPI_COMM_WORLD, p, ierr)
|
||||
|
||||
if (my_rank .eq. 0) then
|
||||
print *, ' '
|
||||
print *, '*** Testing netCDF-4 parallel I/O from F77 again.'
|
||||
endif
|
||||
|
||||
! There must be 4 procs for this test.
|
||||
if (p .ne. 4) then
|
||||
print *, 'This test program must be run on 4 processors.'
|
||||
stop 2
|
||||
endif
|
||||
|
||||
! Create some pretend data.
|
||||
do x = 1, NX / 2
|
||||
do y = 1, NY / 2
|
||||
data_out(y, x) = real(my_rank)
|
||||
end do
|
||||
end do
|
||||
|
||||
! Create the netCDF file.
|
||||
mode_flag = IOR(nf_netcdf4, nf_clobber)
|
||||
! mode_flag = IOR(nf_netcdf4, nf_classic_model)
|
||||
mode_flag = IOR(mode_flag, nf_mpiio)
|
||||
retval = nf_create_par(FILE_NAME, mode_flag, MPI_COMM_WORLD,
|
||||
$ MPI_INFO_NULL, ncid)
|
||||
if (retval .ne. nf_noerr) stop 3
|
||||
|
||||
! Define the dimensions.
|
||||
retval = nf_def_dim(ncid, "x", NX, x_dimid)
|
||||
if (retval .ne. nf_noerr) stop 4
|
||||
retval = nf_def_dim(ncid, "y", NY, y_dimid)
|
||||
if (retval .ne. nf_noerr) stop 5
|
||||
dimids(1) = x_dimid
|
||||
dimids(2) = y_dimid
|
||||
|
||||
! Define the variable.
|
||||
retval = nf_def_var(ncid, "data", NF_FLOAT, MAX_DIMS, dimids,
|
||||
$ varid)
|
||||
if (retval .ne. nf_noerr) stop 6
|
||||
|
||||
! With classic model netCDF-4 file, enddef must be called.
|
||||
retval = nf_enddef(ncid)
|
||||
if (retval .ne. nf_noerr) stop 7
|
||||
|
||||
! Determine what part of the variable will be written for this
|
||||
! processor. It's a checkerboard decomposition.
|
||||
count(1) = NX / 2
|
||||
count(2) = NY / 2
|
||||
if (my_rank .eq. 0) then
|
||||
start(1) = 1
|
||||
start(2) = 1
|
||||
else if (my_rank .eq. 1) then
|
||||
start(1) = NX / 2 + 1
|
||||
start(2) = 1
|
||||
else if (my_rank .eq. 2) then
|
||||
start(1) = 1
|
||||
start(2) = NY / 2 + 1
|
||||
else if (my_rank .eq. 3) then
|
||||
start(1) = NX / 2 + 1
|
||||
start(2) = NY / 2 + 1
|
||||
endif
|
||||
|
||||
! Write this processor's data.
|
||||
retval = nf_put_vara_real(ncid, varid, start, count, data_out)
|
||||
if (retval .ne. nf_noerr) then
|
||||
print*,'Error writing data ', retval
|
||||
print*, NF_STRERROR(retval)
|
||||
stop 8
|
||||
endif
|
||||
|
||||
! Close the file.
|
||||
retval = nf_close(ncid)
|
||||
if (retval .ne. nf_noerr) stop 9
|
||||
|
||||
! Reopen the file.
|
||||
retval = nf_open_par(FILE_NAME, IOR(nf_nowrite, nf_mpiio),
|
||||
$ MPI_COMM_WORLD, MPI_INFO_NULL, ncid)
|
||||
if (retval .ne. nf_noerr) stop 10
|
||||
|
||||
! Set collective access on this variable. This will cause all
|
||||
! reads/writes to happen together on every processor. Fairly
|
||||
! pointless, in this contexct, but I want to at least call this
|
||||
! function once in my testing.
|
||||
retval = nf_var_par_access(ncid, varid, nf_collective)
|
||||
if (retval .ne. nf_noerr) stop 11
|
||||
|
||||
! Read this processor's data.
|
||||
retval = nf_get_vara_real(ncid, varid, start, count, data_in)
|
||||
if (retval .ne. nf_noerr) stop 12
|
||||
|
||||
! Check the data.
|
||||
do x = 1, NX / 2
|
||||
do y = 1, NY / 2
|
||||
if (data_in(y, x) .ne. my_rank) then
|
||||
print*,data_in(y, x), ' NE ', my_rank
|
||||
stop 13
|
||||
endif
|
||||
end do
|
||||
end do
|
||||
|
||||
! Close the file.
|
||||
retval = nf_close(ncid)
|
||||
if (retval .ne. nf_noerr) stop 14
|
||||
|
||||
call MPI_Finalize(ierr)
|
||||
|
||||
if (my_rank .eq. 0) print *,'*** SUCCESS!'
|
||||
|
||||
end program ftst_parallel_nasa
|
@ -1 +1,12 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This shell runs some parallel I/O tests for the F77 API.
|
||||
|
||||
set -e
|
||||
echo "Testing netCDF parallel I/O through the F77 API..."
|
||||
|
||||
mpiexec -n 4 ./ftst_parallel
|
||||
mpiexec -n 4 ./ftst_parallel_nasa
|
||||
|
||||
echo "SUCCESS!!!"
|
||||
exit 0
|
||||
|
@ -1,7 +1,16 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This shell runs some parallel I/O tests.
|
||||
|
||||
set -e
|
||||
echo "Testing netCDF parallel I/O through the F90 API..."
|
||||
mpiexec -n 4 ./f90tst_parallel
|
||||
mpiexec -n 4 ./f90tst_parallel2
|
||||
mpiexec -n 4 ./f90tst_parallel3
|
||||
mpiexec -n 8 ./f90tst_nc4_par
|
||||
#mpiexec -n 4 ./f90tst_parallel_fill
|
||||
|
||||
echo "SUCCESS!!!"
|
||||
exit 0
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user