/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the files COPYING and Copyright.html. COPYING can be found at the root * * of the source code distribution tree; Copyright.html can be found at the * * root level of an installed copy of the electronic HDF5 document set and * * is linked from the top-level documents page. It can also be found at * * http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have * * access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* * Main driver of the Parallel NetCDF4 tests * */ #include #define FILE_NAME "tst_parallel3.nc" /*2,3,4 dimensional test, the first dimension is unlimited, time. */ #define NDIMS1 2 #define NDIMS2 4 #define DIMSIZE /*4 */ 768*2 #define DIMSIZE2 4 #define DIMSIZE3 4 #define TIMELEN 1 /*BIGFILE, >2G, >4G, >8G file big file is created but no actually data is written Dimensional size is defined inside the function */ #define ATTRNAME1 "valid_range" #define ATTRNAME2 "scale_factor" #define ATTRNAME3 "title" /* The number of processors should be a good number for the dimension to be divided evenly, the best set of number of processor should be 2 power n. However, for NetCDF4 tests, the following numbers are generally treated as good numbers: 1,2,3,4,6,8,12,16,24,32,48,64,96,128,192,256 The maximum number of processor is 256.*/ int test_pio(int); int test_pio_attr(int); int test_pio_big(int); int test_pio_hyper(int); int test_pio_extend(int); char* getenv_all(MPI_Comm comm, int root, const char* name); int facc_type; int facc_type_open; char file_name[NC_MAX_NAME + 1]; int main(int argc, char **argv) { int mpi_size, mpi_rank; /* mpi variables */ int i; int NUMP[16] ={1,2,3,4,6,8,12,16,24,32,48,64,96,128,192,256}; int size_flag = 0; /* Un-buffer the stdout and stderr */ setbuf(stderr, NULL); setbuf(stdout, NULL); MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); if (mpi_rank == 0) printf("\n*** Testing more advanced parallel access.\n"); for (i = 0; i < 16; i++){ if(mpi_size == NUMP[i]) { size_flag = 1; break; } } if(!size_flag){ printf("mpi_size is wrong\n"); printf(" The number of processor must be chosen from\n"); printf(" 1,2,3,4,6,8,12,16,24,32,48,64,96,128,192,256 \n"); return -1; } facc_type = NC_NETCDF4|NC_MPIIO; facc_type_open = NC_MPIIO; /* Create file name. */ sprintf(file_name, "%s/%s", TEMP_LARGE, FILE_NAME); /* Test NetCDF4 with MPI-IO driver */ if (mpi_rank == 0) printf("*** Testing parallel IO for raw-data with MPI-IO (driver)..."); if(test_pio(NC_INDEPENDENT)!=0) ERR; if(test_pio(NC_COLLECTIVE)!=0) ERR; if (mpi_rank == 0) SUMMARIZE_ERR; if (mpi_rank == 0) printf("*** Testing parallel IO for meta-data with MPI-IO (driver)..."); if(test_pio_attr(NC_INDEPENDENT)!=0) ERR; if(test_pio_attr(NC_COLLECTIVE)!=0) ERR; if (mpi_rank == 0) SUMMARIZE_ERR; if (mpi_rank == 0) printf("*** Testing parallel IO for different hyperslab selections with MPI-IO (driver)..."); if(test_pio_hyper(NC_INDEPENDENT)!=0)ERR; if(test_pio_hyper(NC_COLLECTIVE)!=0) ERR; if (mpi_rank == 0) SUMMARIZE_ERR; if (mpi_rank == 0) printf("*** Testing parallel IO for extending variables with MPI-IO (driver)..."); if(test_pio_extend(NC_COLLECTIVE)!=0) ERR; if (mpi_rank == 0) SUMMARIZE_ERR; /* Note: When the MPI-POSIX VFD is not compiled in to HDF5, the NC_MPIPOSIX * flag will be aliased to the NC_MPIIO flag within the library, and * therefore this test will exercise the aliasing, with the MPI-IO VFD, * under that configuration. -QAK */ if (mpi_rank == 0) printf("*** Testing parallel IO for raw-data with MPIPOSIX-IO (driver)..."); facc_type = NC_NETCDF4|NC_MPIPOSIX; facc_type_open = NC_MPIPOSIX; if(test_pio(NC_INDEPENDENT)!=0) ERR; if(test_pio(NC_COLLECTIVE)!=0) ERR; if (mpi_rank == 0) SUMMARIZE_ERR; if (mpi_rank == 0) printf("*** Testing parallel IO for meta-data with MPIPOSIX-IO (driver)..."); if(test_pio_attr(NC_INDEPENDENT)!=0) ERR; if(test_pio_attr(NC_COLLECTIVE)!=0) ERR; if (mpi_rank == 0) SUMMARIZE_ERR; if (mpi_rank == 0) printf("*** Testing parallel IO for different hyperslab selections " "with MPIPOSIX-IO (driver)..."); if(test_pio_hyper(NC_INDEPENDENT)!=0)ERR; if(test_pio_hyper(NC_COLLECTIVE)!=0) ERR; if (mpi_rank == 0) SUMMARIZE_ERR; if (mpi_rank == 0) printf("*** Testing parallel IO for extending variables with MPIPOSIX-IO (driver)..."); if(test_pio_extend(NC_COLLECTIVE)!=0) ERR; if (mpi_rank == 0) SUMMARIZE_ERR; /* if(!getenv_all(MPI_COMM_WORLD,0,"NETCDF4_NOCLEANUP")) */ remove(file_name); MPI_Finalize(); if (mpi_rank == 0) FINAL_RESULTS; return 0; } /* Both read and write will be tested */ int test_pio(int flag) { /* MPI stuff. */ int mpi_size, mpi_rank; MPI_Comm comm = MPI_COMM_WORLD; MPI_Info info = MPI_INFO_NULL; /* Netcdf-4 stuff. */ int ncid; int nvid,uvid; int rvid; unsigned m,k,j,i; /* two dimensional integer data test */ int dimids[NDIMS1]; size_t start[NDIMS1]; size_t count[NDIMS1]; int *data; int *tempdata; int *rdata; int *temprdata; /* four dimensional integer data test, time dimension is unlimited.*/ int dimuids[NDIMS2]; size_t ustart[NDIMS2]; size_t ucount[NDIMS2]; int *udata; int *tempudata; int *rudata; int *temprudata; /* Initialize MPI. */ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* Create a parallel netcdf-4 file. */ if (nc_create_par(file_name, facc_type, comm, info, &ncid)) ERR; /* The first case is two dimensional variables, no unlimited dimension */ /* Create two dimensions. */ if (nc_def_dim(ncid, "d1", DIMSIZE2, dimids)) ERR; if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR; /* Create one var. */ if (nc_def_var(ncid, "v1", NC_INT, NDIMS1, dimids, &nvid)) ERR; if (nc_enddef(ncid)) ERR; /* Set up slab for this process. */ start[0] = 0; start[1] = mpi_rank * DIMSIZE/mpi_size; count[0] = DIMSIZE2; count[1] = DIMSIZE/mpi_size; /* start parallel netcdf4 */ if (nc_var_par_access(ncid, nvid, flag)) ERR; if (!(data = malloc(sizeof(int)*count[1]*count[0]))) ERR; tempdata = data; for (j = 0; j < count[0]; j++){ for (i = 0; i < count[1]; i++) { *tempdata = mpi_rank * (j + 1); tempdata++; } } /* Write two dimensional integer data */ if (nc_put_vara_int(ncid, nvid, start, count, data)) ERR; free(data); /* Case 2: create four dimensional integer data, one dimension is unlimited. */ /* Create four dimensions. */ if (nc_def_dim(ncid, "ud1", NC_UNLIMITED, dimuids)) ERR; if (nc_def_dim(ncid, "ud2", DIMSIZE3, &dimuids[1])) ERR; if (nc_def_dim(ncid, "ud3", DIMSIZE2, &dimuids[2])) ERR; if (nc_def_dim(ncid, "ud4", DIMSIZE, &dimuids[3])) ERR; /* Create one var. */ if (nc_def_var(ncid, "uv1", NC_INT, NDIMS2, dimuids, &uvid)) ERR; if (nc_enddef(ncid)) ERR; /* Set up selection parameters */ ustart[0] = 0; ustart[1] = 0; ustart[2] = 0; ustart[3] = DIMSIZE*mpi_rank/mpi_size; ucount[0] = TIMELEN; ucount[1] = DIMSIZE3; ucount[2] = DIMSIZE2; ucount[3] = DIMSIZE/mpi_size; /* Access parallel */ if (nc_var_par_access(ncid, uvid, flag)) ERR; /* Create phony data. */ if (!(udata = malloc(ucount[0]*ucount[1]*ucount[2]*ucount[3]*sizeof(int)))) ERR; tempudata = udata; for( m=0; m= 0) { if(env == NULL) env = (char*) malloc(len+1); else if(strlen(env) < len) env = (char*) realloc(env, len+1); MPI_Bcast(env, len, MPI_CHAR, root, comm); env[len] = '\0'; } else { if(env) free(env); env = NULL; } } MPI_Barrier(comm); return env; }