mirror of
https://github.com/Unidata/netcdf-c.git
synced 2024-12-15 08:30:11 +08:00
63 lines
1.7 KiB
C
63 lines
1.7 KiB
C
|
/* Copyright 2022, UCAR/Unidata See COPYRIGHT file for copying and
|
||
|
* redistribution conditions.
|
||
|
*
|
||
|
* This parallel I/O test checks the behavior of nc_inq_dimlen() after
|
||
|
* parallel I/O writes.
|
||
|
*
|
||
|
* This program taken from a PNetCDF issue:
|
||
|
* https://github.com/Parallel-NetCDF/PnetCDF/issues/72, thanks
|
||
|
* wkliao!
|
||
|
*
|
||
|
* wkliao, Ed Hartnett, 4/11/22
|
||
|
*/
|
||
|
|
||
|
#include <nc_tests.h>
|
||
|
#include "err_macros.h"
|
||
|
#include <stdio.h>
|
||
|
#include <stdlib.h>
|
||
|
#include <mpi.h>
|
||
|
#include <netcdf.h>
|
||
|
#include <netcdf_par.h>
|
||
|
|
||
|
#define FILENAME "tst_parallel6.nc"
|
||
|
|
||
|
int main(int argc, char** argv)
|
||
|
{
|
||
|
int err = NC_NOERR, rank, nprocs;
|
||
|
int ncid, cmode, varid, dimid;
|
||
|
size_t start[1], count[1], nrecs;
|
||
|
|
||
|
MPI_Init(&argc, &argv);
|
||
|
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
|
||
|
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
|
||
|
|
||
|
if (nc_create_par(FILENAME, NC_CLOBBER | NC_NETCDF4, MPI_COMM_WORLD,
|
||
|
MPI_INFO_NULL, &ncid)) ERR;
|
||
|
|
||
|
if (nc_def_dim(ncid, "time", NC_UNLIMITED, &dimid)) ERR;
|
||
|
if (nc_def_var(ncid, "var", NC_INT, 1, &dimid, &varid)) ERR;
|
||
|
if (nc_var_par_access(ncid, varid, NC_COLLECTIVE)) ERR;
|
||
|
if (nc_enddef(ncid)) ERR;
|
||
|
|
||
|
start[0] = rank;
|
||
|
count[0] = 1;
|
||
|
if (nc_put_vara_int(ncid, varid, start, count, &rank)) ERR;
|
||
|
MPI_Barrier(MPI_COMM_WORLD);
|
||
|
nc_redef(ncid);
|
||
|
nc_enddef(ncid);
|
||
|
if (nc_inq_dimlen(ncid, dimid, &nrecs)) ERR;
|
||
|
|
||
|
if (nrecs != nprocs)
|
||
|
{
|
||
|
printf("Rank %d error at line %d of file %s:\n",rank,__LINE__,__FILE__);
|
||
|
printf("\tafter writing start=%zd count=%zd\n", start[0], count[0]);
|
||
|
printf("\texpecting number of records = %d but got %ld\n",
|
||
|
nprocs, nrecs);
|
||
|
ERR;
|
||
|
}
|
||
|
if (nc_close(ncid)) ERR;
|
||
|
|
||
|
MPI_Finalize();
|
||
|
return 0;
|
||
|
}
|