2018-05-14 22:49:13 +08:00
|
|
|
/* Copyright 2018, UCAR/Unidata See COPYRIGHT file for copying and
|
|
|
|
* redistribution conditions.
|
|
|
|
*
|
2020-01-07 00:08:27 +08:00
|
|
|
* This program tests netcdf-4 parallel I/O.
|
2018-05-14 22:49:13 +08:00
|
|
|
*
|
|
|
|
* Ed Hartnett
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <nc_tests.h>
|
|
|
|
#include "err_macros.h"
|
|
|
|
#include <mpi.h>
|
|
|
|
|
|
|
|
#define FILE "tst_parallel5.nc"
|
2018-05-15 04:25:56 +08:00
|
|
|
#define VAR_NAME "TheIrishRover"
|
|
|
|
#define DIM_NAME "number_of_masts"
|
|
|
|
#define MASTS 27
|
2018-05-15 00:06:35 +08:00
|
|
|
#define NDIMS1 1
|
2018-05-14 22:49:13 +08:00
|
|
|
#define DIMSIZE 4
|
|
|
|
#define NUM_PROC 4
|
|
|
|
#define NUM_SLABS 10
|
2018-05-15 04:25:56 +08:00
|
|
|
#define NUM_ACCESS_TESTS 2
|
2020-05-09 01:00:56 +08:00
|
|
|
#define HDF5_DEFAULT_CACHE_SIZE 1048576
|
|
|
|
#define HDF5_DEFAULT_NELEMS 521
|
|
|
|
#define HDF5_DEFAULT_PREEMPTION 0.75
|
2018-05-14 22:49:13 +08:00
|
|
|
|
2020-05-08 22:58:42 +08:00
|
|
|
int
|
|
|
|
nc4_hdf5_get_chunk_cache(int ncid, size_t *sizep, size_t *nelemsp,
|
2020-05-09 01:11:56 +08:00
|
|
|
float *preemptionp);
|
2020-05-08 22:58:42 +08:00
|
|
|
|
2018-05-14 22:49:13 +08:00
|
|
|
int
|
|
|
|
main(int argc, char **argv)
|
|
|
|
{
|
2019-08-14 22:22:35 +08:00
|
|
|
int mpi_size, mpi_rank;
|
|
|
|
MPI_Comm comm = MPI_COMM_WORLD;
|
|
|
|
MPI_Info info = MPI_INFO_NULL;
|
|
|
|
int ncid, v1id, dimid;
|
|
|
|
size_t start[NDIMS1] = {0}, count[NDIMS1] = {0};
|
|
|
|
int data = MASTS;
|
|
|
|
int data_in = TEST_VAL_42;
|
|
|
|
int acc;
|
|
|
|
|
|
|
|
/* Initialize MPI. */
|
|
|
|
MPI_Init(&argc, &argv);
|
|
|
|
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
|
|
|
|
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
|
|
|
|
|
|
|
|
/* Require exactly 4 tasks. */
|
|
|
|
if (mpi_size != NUM_PROC) ERR;
|
|
|
|
|
|
|
|
if (!mpi_rank)
|
|
|
|
printf("\n*** Testing parallel I/O.\n");
|
|
|
|
|
|
|
|
if (!mpi_rank)
|
|
|
|
printf("*** testing whether we can write 0 elements from some tasks...");
|
|
|
|
{
|
|
|
|
for (acc = 0; acc < NUM_ACCESS_TESTS; acc++)
|
|
|
|
{
|
|
|
|
/* Create a parallel netcdf-4 file. */
|
2022-02-05 05:44:38 +08:00
|
|
|
/* nc_set_log_level(3); */
|
2019-08-14 22:22:35 +08:00
|
|
|
if (nc_create_par(FILE, NC_NETCDF4, comm, info, &ncid)) ERR;
|
|
|
|
|
|
|
|
/* Create a dimension. */
|
|
|
|
if (nc_def_dim(ncid, DIM_NAME, DIMSIZE, &dimid)) ERR;
|
|
|
|
|
|
|
|
/* Create one var. */
|
|
|
|
if (nc_def_var(ncid, VAR_NAME, NC_INT, NDIMS1, &dimid, &v1id)) ERR;
|
|
|
|
|
|
|
|
/* Write metadata to file. */
|
|
|
|
if (nc_enddef(ncid)) ERR;
|
|
|
|
|
|
|
|
/* Set up slab for this process. */
|
|
|
|
if (!mpi_rank)
|
|
|
|
count[0] = 1;
|
|
|
|
|
|
|
|
if (nc_var_par_access(ncid, v1id, acc ? NC_COLLECTIVE : NC_INDEPENDENT)) ERR;
|
|
|
|
|
|
|
|
/* Write phoney data. */
|
|
|
|
if (nc_put_vara_int(ncid, v1id, start, count, &data)) ERR;
|
|
|
|
|
|
|
|
if (nc_sync(ncid)) ERR;
|
|
|
|
|
|
|
|
/* Read phoney data. */
|
|
|
|
if (nc_get_vara_int(ncid, v1id, start, count, &data_in)) ERR;
|
|
|
|
|
|
|
|
/* Task 0 has MASTS, the others have data_in remaining, as
|
|
|
|
* initialized, at TEST_VAL_42. */
|
|
|
|
if (data_in != (mpi_rank ? TEST_VAL_42 : MASTS)) ERR;
|
|
|
|
|
|
|
|
/* Close the netcdf file. */
|
|
|
|
if (nc_close(ncid)) ERR;
|
2022-02-05 05:44:38 +08:00
|
|
|
/* nc_set_log_level(-1); */
|
2019-08-14 22:22:35 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!mpi_rank)
|
|
|
|
SUMMARIZE_ERR;
|
|
|
|
|
|
|
|
if (!mpi_rank)
|
|
|
|
printf("*** testing enum type and parallel I/O...");
|
|
|
|
{
|
|
|
|
for (acc = 0; acc < NUM_ACCESS_TESTS; acc++)
|
|
|
|
{
|
2018-05-15 22:09:52 +08:00
|
|
|
#define ENUM_NAME "cargo"
|
|
|
|
#define ENUM_VAR_NAME "in_the_hold_of_the_Irish_Rover"
|
|
|
|
#define NUM_ENUM_FIELDS 8
|
2019-08-14 22:22:35 +08:00
|
|
|
int typeid;
|
|
|
|
int f;
|
|
|
|
char field_name[NUM_ENUM_FIELDS][NC_MAX_NAME + 1] = {"bags of the best Sligo rags", "barrels of bones",
|
|
|
|
"bails of old nanny goats' tails", "barrels of stones",
|
|
|
|
"dogs", "hogs", "barrels of porter",
|
|
|
|
"sides of old blind horses hides"};
|
|
|
|
unsigned long long field_value[NUM_ENUM_FIELDS] = {1000000, 2000000, 3000000, 4000000,
|
|
|
|
5000000, 6000000, 7000000, 8000000};
|
|
|
|
unsigned long long data = 1000000, data_in = TEST_VAL_42;
|
|
|
|
|
|
|
|
/* Create a parallel netcdf-4 file. */
|
|
|
|
if (nc_create_par(FILE, NC_NETCDF4, comm, info, &ncid)) ERR;
|
2018-05-16 16:44:46 +08:00
|
|
|
|
2018-05-16 16:26:13 +08:00
|
|
|
/* Create a dimension. */
|
2019-08-14 22:22:35 +08:00
|
|
|
if (nc_def_dim(ncid, DIM_NAME, DIMSIZE, &dimid)) ERR;
|
|
|
|
|
|
|
|
/* Create an enum type. */
|
|
|
|
if (nc_def_enum(ncid, NC_UINT64, ENUM_NAME, &typeid)) ERR;
|
|
|
|
for (f = 0; f < NUM_ENUM_FIELDS; f++)
|
|
|
|
if (nc_insert_enum(ncid, typeid, field_name[f], &field_value[f])) ERR;
|
2018-05-16 16:44:46 +08:00
|
|
|
|
2018-05-16 16:26:13 +08:00
|
|
|
/* Create one var. */
|
2019-08-14 22:22:35 +08:00
|
|
|
if (nc_def_var(ncid, ENUM_VAR_NAME, typeid, NDIMS1, &dimid, &v1id)) ERR;
|
2018-05-16 16:44:46 +08:00
|
|
|
|
2018-05-16 16:26:13 +08:00
|
|
|
/* Write metadata to file. */
|
|
|
|
if (nc_enddef(ncid)) ERR;
|
2018-05-16 16:44:46 +08:00
|
|
|
|
2018-05-16 16:26:13 +08:00
|
|
|
/* Set up slab for this process. */
|
2019-08-14 22:22:35 +08:00
|
|
|
if (!mpi_rank)
|
|
|
|
count[0] = 1;
|
|
|
|
|
|
|
|
if (nc_var_par_access(ncid, v1id, acc ? NC_COLLECTIVE : NC_INDEPENDENT)) ERR;
|
2018-05-16 16:44:46 +08:00
|
|
|
|
2018-05-16 16:26:13 +08:00
|
|
|
/* Write phoney data. */
|
2019-08-14 22:22:35 +08:00
|
|
|
if (nc_put_vara(ncid, v1id, start, count, &data)) ERR;
|
|
|
|
|
|
|
|
if (nc_sync(ncid)) ERR;
|
|
|
|
|
|
|
|
/* Read phoney data. */
|
|
|
|
if (nc_get_vara(ncid, v1id, start, count, &data_in)) ERR;
|
|
|
|
|
|
|
|
/* Task 0 has 1000000, the others have data_in remaining, as
|
|
|
|
* initialized, at TEST_VAL_42. */
|
|
|
|
if (data_in != (mpi_rank ? TEST_VAL_42 : 1000000)) ERR;
|
2018-05-16 16:44:46 +08:00
|
|
|
|
2018-05-16 16:26:13 +08:00
|
|
|
/* Close the netcdf file. */
|
|
|
|
if (nc_close(ncid)) ERR;
|
2019-08-14 22:22:35 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!mpi_rank)
|
|
|
|
SUMMARIZE_ERR;
|
|
|
|
if (!mpi_rank)
|
|
|
|
printf("*** testing compound type and parallel I/O...");
|
|
|
|
{
|
|
|
|
for (acc = 0; acc < NUM_ACCESS_TESTS; acc++)
|
|
|
|
{
|
|
|
|
#define COMPOUND_NAME "crew_info"
|
|
|
|
#define COMPOUND_VAR_NAME "whale_of_a_crew"
|
|
|
|
#define NUM_CREW 5
|
|
|
|
#define CREW_DIM_NAME "number_of_crew"
|
|
|
|
int typeid;
|
|
|
|
struct crew
|
|
|
|
{
|
|
|
|
char name[NC_MAX_NAME + 1];
|
|
|
|
char description[NC_MAX_NAME + 1];
|
|
|
|
char origin[NC_MAX_NAME + 1];
|
|
|
|
int age;
|
|
|
|
};
|
|
|
|
struct crew data = {"Mick McCann", "the skipper of the Irish Rover",
|
|
|
|
"from the banks of the Bann", 42};
|
|
|
|
struct crew data_in = {"", "", "", -42};
|
|
|
|
int dim_size = NC_MAX_NAME + 1;
|
|
|
|
|
|
|
|
/* Create a parallel netcdf-4 file. */
|
|
|
|
if (nc_create_par(FILE, NC_NETCDF4, comm, info, &ncid)) ERR;
|
|
|
|
|
|
|
|
/* Create a dimension. */
|
|
|
|
if (nc_def_dim(ncid, CREW_DIM_NAME, NUM_CREW, &dimid)) ERR;
|
|
|
|
|
|
|
|
/* Create a compound type. */
|
|
|
|
if (nc_def_compound(ncid, sizeof(struct crew), COMPOUND_NAME, &typeid)) ERR;
|
|
|
|
if (nc_insert_array_compound(ncid, typeid, "name", NC_COMPOUND_OFFSET(struct crew, name), NC_CHAR, 1, &dim_size)) ERR;
|
|
|
|
if (nc_insert_array_compound(ncid, typeid, "description", NC_COMPOUND_OFFSET(struct crew, description), NC_CHAR, 1, &dim_size)) ERR;
|
|
|
|
if (nc_insert_array_compound(ncid, typeid, "origin", NC_COMPOUND_OFFSET(struct crew, origin), NC_CHAR, 1, &dim_size)) ERR;
|
|
|
|
if (nc_insert_compound(ncid, typeid, "age", NC_COMPOUND_OFFSET(struct crew, age), NC_INT)) ERR;
|
|
|
|
|
|
|
|
/* Create one var. */
|
|
|
|
if (nc_def_var(ncid, COMPOUND_VAR_NAME, typeid, NDIMS1, &dimid, &v1id)) ERR;
|
2018-05-16 16:26:13 +08:00
|
|
|
|
2019-08-14 22:22:35 +08:00
|
|
|
/* Write metadata to file. */
|
|
|
|
if (nc_enddef(ncid)) ERR;
|
2018-05-16 16:26:13 +08:00
|
|
|
|
2019-08-14 22:22:35 +08:00
|
|
|
/* Set up slab for this process. */
|
|
|
|
if (!mpi_rank)
|
|
|
|
count[0] = 1;
|
2018-05-16 16:26:13 +08:00
|
|
|
|
2019-08-14 22:22:35 +08:00
|
|
|
if (nc_var_par_access(ncid, v1id, acc ? NC_COLLECTIVE : NC_INDEPENDENT)) ERR;
|
2018-05-16 16:26:13 +08:00
|
|
|
|
2019-08-14 22:22:35 +08:00
|
|
|
/* Write phoney data. */
|
|
|
|
if (nc_put_vara(ncid, v1id, start, count, &data)) ERR;
|
2018-05-14 22:49:13 +08:00
|
|
|
|
2019-08-14 22:22:35 +08:00
|
|
|
if (nc_sync(ncid)) ERR;
|
2018-05-14 22:49:13 +08:00
|
|
|
|
2019-08-14 22:22:35 +08:00
|
|
|
/* Read phoney data. */
|
|
|
|
if (nc_get_vara(ncid, v1id, start, count, &data_in)) ERR;
|
2018-05-15 22:09:52 +08:00
|
|
|
|
2019-08-14 22:22:35 +08:00
|
|
|
/* Task 0 has data, the others have nothing. */
|
|
|
|
if (!mpi_rank)
|
|
|
|
{
|
|
|
|
if (strcmp(data_in.name, data.name) || strcmp(data_in.description, data.description) ||
|
|
|
|
strcmp(data_in.origin, data.origin) || data_in.age != data.age) ERR;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (strcmp(data_in.name, "") || strcmp(data_in.description, "") ||
|
|
|
|
strcmp(data_in.origin, "") || data_in.age != -42) ERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Close the netcdf file. */
|
|
|
|
if (nc_close(ncid)) ERR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!mpi_rank)
|
|
|
|
SUMMARIZE_ERR;
|
|
|
|
if (!mpi_rank)
|
|
|
|
printf("*** testing string type and parallel I/O...");
|
|
|
|
{
|
|
|
|
for (acc = 0; acc < NUM_ACCESS_TESTS; acc++)
|
|
|
|
{
|
|
|
|
#define STORY_VAR_NAME "fate_of_the_Irish_Rover"
|
|
|
|
#define STORY_DIM_NAME "number_of_lines"
|
|
|
|
#define STORY_LEN 8
|
|
|
|
char *story[STORY_LEN] = {"We had sailed seven years when the measles broke out",
|
|
|
|
"And the ship lost it's way in the fog",
|
|
|
|
"And that whale of the crew was reduced down to two",
|
|
|
|
"Just myself and the captain's old dog",
|
|
|
|
"Then the ship struck a rock, oh Lord what a shock",
|
|
|
|
"The bulkhead was turned right over",
|
|
|
|
"Turned nine times around, and the poor dog was drowned",
|
|
|
|
"I'm the last of the Irish Rover"};
|
|
|
|
char *story_in[STORY_LEN];
|
|
|
|
int s;
|
|
|
|
|
|
|
|
/* Create a netcdf-4 file. Turns out that HDF5 does not
|
|
|
|
* support VLEN writes with parallel I/O. Strings are
|
|
|
|
* VLENS. So here I write a file with task 0 and then read it
|
|
|
|
* with all tasks. */
|
|
|
|
if (!mpi_rank)
|
|
|
|
{
|
|
|
|
if (nc_create(FILE, NC_NETCDF4, &ncid)) ERR;
|
|
|
|
|
|
|
|
/* Create a dimension. */
|
|
|
|
if (nc_def_dim(ncid, STORY_DIM_NAME, STORY_LEN, &dimid)) ERR;
|
|
|
|
|
|
|
|
/* Create one var. */
|
|
|
|
if (nc_def_var(ncid, STORY_VAR_NAME, NC_STRING, NDIMS1, &dimid, &v1id)) ERR;
|
|
|
|
|
|
|
|
/* Write metadata to file. */
|
|
|
|
if (nc_enddef(ncid)) ERR;
|
|
|
|
|
|
|
|
/* Set up slab for this process. */
|
|
|
|
count[0] = STORY_LEN;
|
|
|
|
|
|
|
|
/* Write phoney data. */
|
|
|
|
if (nc_put_vara(ncid, v1id, start, count, story)) ERR;
|
|
|
|
|
|
|
|
/* Close the netcdf file. */
|
|
|
|
if (nc_close(ncid)) ERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now try parallel read. */
|
|
|
|
if (nc_open_par(FILE, 0, comm, info, &ncid)) ERR;
|
|
|
|
|
|
|
|
/* Task 0 reads all 8 lines, other tasks read 0. */
|
|
|
|
if (nc_get_vara(ncid, v1id, start, count, story_in)) ERR;
|
|
|
|
|
|
|
|
if (!mpi_rank)
|
|
|
|
{
|
|
|
|
for (s = 0; s < STORY_LEN; s++)
|
|
|
|
if (strcmp(story_in[s], story[s])) ERR;
|
|
|
|
if (nc_free_string(STORY_LEN, (char **)story_in)) ERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Close the netcdf file. */
|
|
|
|
if (nc_close(ncid)) ERR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!mpi_rank)
|
|
|
|
SUMMARIZE_ERR;
|
|
|
|
|
|
|
|
if (!mpi_rank)
|
|
|
|
printf("*** testing NC_BYTE type and parallel I/O...");
|
|
|
|
{
|
|
|
|
/* This test is related to
|
|
|
|
* https://github.com/Unidata/netcdf-c/issues/1462. */
|
|
|
|
int ncid, varid;
|
2019-08-14 22:50:09 +08:00
|
|
|
signed char test_data_in, test_data = 42;
|
2019-08-14 22:22:35 +08:00
|
|
|
|
|
|
|
/* Crate a file with a scalar NC_BYTE value. */
|
|
|
|
if (nc_create_par(FILE, NC_NETCDF4, MPI_COMM_WORLD, MPI_INFO_NULL,
|
|
|
|
&ncid)) ERR;
|
2020-05-08 22:58:42 +08:00
|
|
|
if (nc_def_var(ncid, VAR_NAME, NC_BYTE, 0, NULL, &varid)) ERR;
|
2019-08-14 22:22:35 +08:00
|
|
|
if (nc_enddef(ncid)) ERR;
|
2020-03-05 04:31:47 +08:00
|
|
|
if (nc_put_var_schar(ncid, varid, &test_data)) ERR;
|
2019-08-14 22:22:35 +08:00
|
|
|
if (nc_close(ncid)) ERR;
|
|
|
|
|
2019-08-14 22:50:09 +08:00
|
|
|
/* Reopen the file and check. */
|
2019-08-14 22:22:35 +08:00
|
|
|
if (nc_open_par(FILE, 0, comm, info, &ncid)) ERR;
|
2020-03-05 04:31:47 +08:00
|
|
|
if (nc_get_var_schar(ncid, varid, &test_data_in)) ERR;
|
2019-08-14 22:50:09 +08:00
|
|
|
if (test_data_in != test_data) ERR;
|
2019-08-14 22:22:35 +08:00
|
|
|
if (nc_close(ncid)) ERR;
|
|
|
|
}
|
|
|
|
if (!mpi_rank)
|
|
|
|
SUMMARIZE_ERR;
|
2020-05-08 22:58:42 +08:00
|
|
|
if (!mpi_rank)
|
2020-05-09 01:12:47 +08:00
|
|
|
printf("*** testing cache settings for sequentially-opened files...");
|
2020-05-08 22:58:42 +08:00
|
|
|
{
|
|
|
|
/* This test is related to
|
|
|
|
* https://github.com/Unidata/netcdf-c/issues/1715. */
|
2020-07-09 02:24:48 +08:00
|
|
|
int ncid;
|
2020-05-09 01:11:56 +08:00
|
|
|
size_t size, nelems;
|
|
|
|
float preemption;
|
2020-05-08 22:58:42 +08:00
|
|
|
|
|
|
|
/* Create a file with parallel I/O and check cache settings. */
|
|
|
|
if (nc_create_par(FILE, NC_NETCDF4|NC_CLOBBER, MPI_COMM_WORLD, MPI_INFO_NULL,
|
|
|
|
&ncid)) ERR;
|
2020-05-09 01:11:56 +08:00
|
|
|
if (nc4_hdf5_get_chunk_cache(ncid, &size, &nelems, &preemption)) ERR;
|
|
|
|
if (size != HDF5_DEFAULT_CACHE_SIZE || nelems != HDF5_DEFAULT_NELEMS ||
|
|
|
|
preemption != HDF5_DEFAULT_PREEMPTION) ERR;
|
|
|
|
/* printf("%ld %ld %g\n", size, nelems, preemption); */
|
2020-05-08 22:58:42 +08:00
|
|
|
if (nc_close(ncid)) ERR;
|
|
|
|
|
2020-05-09 01:00:56 +08:00
|
|
|
/* Create a file with sequential I/O and check cache settings
|
2020-05-09 01:11:56 +08:00
|
|
|
* on processor 0. Now, instead of being set to the HDF5
|
|
|
|
* defaults, the chunk settings are set to the netCDF
|
|
|
|
* defaults. */
|
|
|
|
if (!mpi_rank)
|
|
|
|
{
|
|
|
|
if (nc_create(FILE, NC_NETCDF4|NC_CLOBBER, &ncid)) ERR;
|
|
|
|
if (nc4_hdf5_get_chunk_cache(ncid, &size, &nelems, &preemption)) ERR;
|
|
|
|
/* printf("%ld %ld %g\n", size, nelems, preemption); */
|
|
|
|
if (size != CHUNK_CACHE_SIZE || nelems != CHUNK_CACHE_NELEMS ||
|
|
|
|
preemption != CHUNK_CACHE_PREEMPTION) ERR;
|
|
|
|
if (nc_close(ncid)) ERR;
|
|
|
|
}
|
2020-05-08 22:58:42 +08:00
|
|
|
|
|
|
|
/* Reopen the file and check. */
|
|
|
|
if (nc_open_par(FILE, 0, comm, info, &ncid)) ERR;
|
2020-05-09 01:11:56 +08:00
|
|
|
if (nc4_hdf5_get_chunk_cache(ncid, &size, &nelems, &preemption)) ERR;
|
|
|
|
if (size != HDF5_DEFAULT_CACHE_SIZE || nelems != HDF5_DEFAULT_NELEMS ||
|
|
|
|
preemption != HDF5_DEFAULT_PREEMPTION) ERR;
|
2020-05-08 22:58:42 +08:00
|
|
|
if (nc_close(ncid)) ERR;
|
|
|
|
|
|
|
|
/* Open the file with sequential I/O and check cache settings
|
2020-05-09 01:11:56 +08:00
|
|
|
* on processor 0. Now, instead of being set to the HDF5
|
|
|
|
* defaults, the chunk settings are set to the netCDF
|
|
|
|
* defaults. */
|
|
|
|
if (!mpi_rank)
|
|
|
|
{
|
|
|
|
if (nc_open(FILE, 0, &ncid)) ERR;
|
|
|
|
if (nc4_hdf5_get_chunk_cache(ncid, &size, &nelems, &preemption)) ERR;
|
|
|
|
if (size != CHUNK_CACHE_SIZE || nelems != CHUNK_CACHE_NELEMS ||
|
|
|
|
preemption != CHUNK_CACHE_PREEMPTION) ERR;
|
|
|
|
if (nc_close(ncid)) ERR;
|
|
|
|
}
|
2020-05-08 22:58:42 +08:00
|
|
|
}
|
|
|
|
if (!mpi_rank)
|
|
|
|
SUMMARIZE_ERR;
|
Enhance/Fix filter support
re: Discussion https://github.com/Unidata/netcdf-c/discussions/2214
The primary change is to support so-called "standard filters".
A standard filter is one that is defined by the following
netcdf-c API:
````
int nc_def_var_XXX(int ncid, int varid, size_t nparams, unsigned* params);
int nc_inq_var_XXXX(int ncid, int varid, int* usefilterp, unsigned* params);
````
So for example, zstandard would be a standard filter by defining
the functions *nc_def_var_zstandard* and *nc_inq_var_zstandard*.
In order to define these functions, we need a new dispatch function:
````
int nc_inq_filter_avail(int ncid, unsigned filterid);
````
This function, combined with the existing filter API can be used
to implement arbitrary standard filters using a simple code pattern.
Note that I would have preferred that this function return a list
of all available filters, but HDF5 does not support that functionality.
So this PR implements the dispatch function and implements
the following standard functions:
+ bzip2
+ zstandard
+ blosc
Specific test cases are also provided for HDF5 and NCZarr.
Over time, other specific standard filters will be defined.
## Primary Changes
* Add nc_inq_filter_avail() to netcdf-c API.
* Add standard filter implementations to test use of *nc_inq_filter_avail*.
* Bump the dispatch table version number and add to all the relevant
dispatch tables (libsrc, libsrcp, etc).
* Create a program to invoke nc_inq_filter_avail so that it is accessible
to shell scripts.
* Cleanup szip support to properly support szip
when HDF5 is disabled. This involves detecting
libsz separately from testing if HDF5 supports szip.
* Integrate shuffle and fletcher32 into the existing
filter API. This means that, for example, nc_def_var_fletcher32
is now a wrapper around nc_def_var_filter.
* Extend the Codec defaulting to allow multiple default shared libraries.
## Misc. Changes
* Modify configure.ac/CMakeLists.txt to look for the relevant
libraries implementing standard filters.
* Modify libnetcdf.settings to list available standard filters
(including deflate and szip).
* Add CMake test modules to locate libbz2 and libzstd.
* Cleanup the HDF5 memory manager function use in the plugins.
* remove unused file include//ncfilter.h
* remove tests for the HDF5 memory operations e.g. H5allocate_memory.
* Add flag to ncdump to force use of _Filter instead of _Deflate
or _Shuffle or _Fletcher32. Used for testing.
2022-03-15 02:39:37 +08:00
|
|
|
#ifdef HAVE_H5Z_SZIP
|
2020-01-18 05:01:07 +08:00
|
|
|
#ifdef HDF5_SUPPORTS_PAR_FILTERS
|
2020-01-07 01:17:28 +08:00
|
|
|
#define SZIP_DIM_LEN 256
|
|
|
|
#define SZIP_DIM_NAME "Barrels"
|
|
|
|
#define SZIP_VAR_NAME "Best_Sligo_Rags"
|
2020-01-07 01:21:00 +08:00
|
|
|
#define SZIP_PIXELS_PER_BLOCK 32
|
2020-01-07 00:08:27 +08:00
|
|
|
if (!mpi_rank)
|
2020-01-07 01:17:28 +08:00
|
|
|
printf("*** testing szip compression with parallel I/O...");
|
2020-01-07 00:08:27 +08:00
|
|
|
{
|
2020-01-07 01:17:28 +08:00
|
|
|
int ncid, dimid, varid;
|
|
|
|
float *data;
|
2020-01-07 01:22:45 +08:00
|
|
|
float *data_in;
|
2020-01-07 01:17:28 +08:00
|
|
|
int elements_per_pe = SZIP_DIM_LEN/mpi_size;
|
|
|
|
size_t start[NDIMS1], count[NDIMS1];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Create test data. */
|
|
|
|
if (!(data = malloc(elements_per_pe * sizeof(float)))) ERR;
|
|
|
|
for (i = 0; i < elements_per_pe; i++)
|
|
|
|
data[i] = mpi_rank + i * 0.1;
|
2020-01-07 00:08:27 +08:00
|
|
|
|
|
|
|
/* Crate a file with a scalar NC_BYTE value. */
|
|
|
|
if (nc_create_par(FILE, NC_NETCDF4, MPI_COMM_WORLD, MPI_INFO_NULL,
|
|
|
|
&ncid)) ERR;
|
2020-01-07 01:17:28 +08:00
|
|
|
if (nc_def_dim(ncid, SZIP_DIM_NAME, SZIP_DIM_LEN, &dimid)) ERR;
|
|
|
|
if (nc_def_var(ncid, SZIP_VAR_NAME, NC_FLOAT, NDIMS1, &dimid, &varid)) ERR;
|
2020-03-03 07:31:56 +08:00
|
|
|
if (nc_def_var_szip(ncid, varid, H5_SZIP_NN_OPTION_MASK,
|
|
|
|
SZIP_PIXELS_PER_BLOCK)) ERR;
|
2020-01-07 00:08:27 +08:00
|
|
|
if (nc_enddef(ncid)) ERR;
|
2020-01-07 01:17:28 +08:00
|
|
|
start[0] = mpi_rank * elements_per_pe;
|
|
|
|
count[0] = elements_per_pe;
|
2020-03-05 04:31:47 +08:00
|
|
|
if (nc_put_vara_float(ncid, varid, start, count, data)) ERR;
|
2020-01-07 00:08:27 +08:00
|
|
|
if (nc_close(ncid)) ERR;
|
|
|
|
|
|
|
|
/* Reopen the file and check. */
|
|
|
|
if (nc_open_par(FILE, 0, comm, info, &ncid)) ERR;
|
2020-01-07 01:22:45 +08:00
|
|
|
if (!(data_in = malloc(elements_per_pe * sizeof(float)))) ERR;
|
2020-03-05 04:31:47 +08:00
|
|
|
if (nc_get_vara_float(ncid, varid, start, count, data_in)) ERR;
|
2020-01-07 00:08:27 +08:00
|
|
|
if (nc_close(ncid)) ERR;
|
2020-01-07 01:23:59 +08:00
|
|
|
for (i = 0; i < elements_per_pe; i++)
|
|
|
|
if (data_in[i] != data[i]) ERR;
|
2020-01-07 01:17:28 +08:00
|
|
|
|
|
|
|
/* Release resources. */
|
2020-01-07 01:22:45 +08:00
|
|
|
free(data_in);
|
2020-01-07 01:17:28 +08:00
|
|
|
free(data);
|
2020-01-07 00:08:27 +08:00
|
|
|
}
|
|
|
|
if (!mpi_rank)
|
|
|
|
SUMMARIZE_ERR;
|
2020-01-18 05:01:07 +08:00
|
|
|
#endif /* HDF5_SUPPORTS_PAR_FILTERS */
|
Enhance/Fix filter support
re: Discussion https://github.com/Unidata/netcdf-c/discussions/2214
The primary change is to support so-called "standard filters".
A standard filter is one that is defined by the following
netcdf-c API:
````
int nc_def_var_XXX(int ncid, int varid, size_t nparams, unsigned* params);
int nc_inq_var_XXXX(int ncid, int varid, int* usefilterp, unsigned* params);
````
So for example, zstandard would be a standard filter by defining
the functions *nc_def_var_zstandard* and *nc_inq_var_zstandard*.
In order to define these functions, we need a new dispatch function:
````
int nc_inq_filter_avail(int ncid, unsigned filterid);
````
This function, combined with the existing filter API can be used
to implement arbitrary standard filters using a simple code pattern.
Note that I would have preferred that this function return a list
of all available filters, but HDF5 does not support that functionality.
So this PR implements the dispatch function and implements
the following standard functions:
+ bzip2
+ zstandard
+ blosc
Specific test cases are also provided for HDF5 and NCZarr.
Over time, other specific standard filters will be defined.
## Primary Changes
* Add nc_inq_filter_avail() to netcdf-c API.
* Add standard filter implementations to test use of *nc_inq_filter_avail*.
* Bump the dispatch table version number and add to all the relevant
dispatch tables (libsrc, libsrcp, etc).
* Create a program to invoke nc_inq_filter_avail so that it is accessible
to shell scripts.
* Cleanup szip support to properly support szip
when HDF5 is disabled. This involves detecting
libsz separately from testing if HDF5 supports szip.
* Integrate shuffle and fletcher32 into the existing
filter API. This means that, for example, nc_def_var_fletcher32
is now a wrapper around nc_def_var_filter.
* Extend the Codec defaulting to allow multiple default shared libraries.
## Misc. Changes
* Modify configure.ac/CMakeLists.txt to look for the relevant
libraries implementing standard filters.
* Modify libnetcdf.settings to list available standard filters
(including deflate and szip).
* Add CMake test modules to locate libbz2 and libzstd.
* Cleanup the HDF5 memory manager function use in the plugins.
* remove unused file include//ncfilter.h
* remove tests for the HDF5 memory operations e.g. H5allocate_memory.
* Add flag to ncdump to force use of _Filter instead of _Deflate
or _Shuffle or _Fletcher32. Used for testing.
2022-03-15 02:39:37 +08:00
|
|
|
#endif /* HAVE_H5Z_SZIP */
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
|
Enhance/Fix filter support
re: Discussion https://github.com/Unidata/netcdf-c/discussions/2214
The primary change is to support so-called "standard filters".
A standard filter is one that is defined by the following
netcdf-c API:
````
int nc_def_var_XXX(int ncid, int varid, size_t nparams, unsigned* params);
int nc_inq_var_XXXX(int ncid, int varid, int* usefilterp, unsigned* params);
````
So for example, zstandard would be a standard filter by defining
the functions *nc_def_var_zstandard* and *nc_inq_var_zstandard*.
In order to define these functions, we need a new dispatch function:
````
int nc_inq_filter_avail(int ncid, unsigned filterid);
````
This function, combined with the existing filter API can be used
to implement arbitrary standard filters using a simple code pattern.
Note that I would have preferred that this function return a list
of all available filters, but HDF5 does not support that functionality.
So this PR implements the dispatch function and implements
the following standard functions:
+ bzip2
+ zstandard
+ blosc
Specific test cases are also provided for HDF5 and NCZarr.
Over time, other specific standard filters will be defined.
## Primary Changes
* Add nc_inq_filter_avail() to netcdf-c API.
* Add standard filter implementations to test use of *nc_inq_filter_avail*.
* Bump the dispatch table version number and add to all the relevant
dispatch tables (libsrc, libsrcp, etc).
* Create a program to invoke nc_inq_filter_avail so that it is accessible
to shell scripts.
* Cleanup szip support to properly support szip
when HDF5 is disabled. This involves detecting
libsz separately from testing if HDF5 supports szip.
* Integrate shuffle and fletcher32 into the existing
filter API. This means that, for example, nc_def_var_fletcher32
is now a wrapper around nc_def_var_filter.
* Extend the Codec defaulting to allow multiple default shared libraries.
## Misc. Changes
* Modify configure.ac/CMakeLists.txt to look for the relevant
libraries implementing standard filters.
* Modify libnetcdf.settings to list available standard filters
(including deflate and szip).
* Add CMake test modules to locate libbz2 and libzstd.
* Cleanup the HDF5 memory manager function use in the plugins.
* remove unused file include//ncfilter.h
* remove tests for the HDF5 memory operations e.g. H5allocate_memory.
* Add flag to ncdump to force use of _Filter instead of _Deflate
or _Shuffle or _Fletcher32. Used for testing.
2022-03-15 02:39:37 +08:00
|
|
|
#ifdef HAVE_H5Z_SZIP
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
#ifdef HDF5_SUPPORTS_PAR_FILTERS
|
|
|
|
#define SZIP_DIM_LEN 256
|
|
|
|
#define SZIP_DIM_NAME "Barrels"
|
|
|
|
#define SZIP_VAR_NAME "Best_Sligo_Rags"
|
|
|
|
#define SZIP_PIXELS_PER_BLOCK 32
|
|
|
|
if (!mpi_rank)
|
|
|
|
printf("*** testing szip compression with parallel I/O...");
|
|
|
|
{
|
|
|
|
int ncid, dimid, varid;
|
|
|
|
float *data;
|
|
|
|
float *data_in;
|
|
|
|
int elements_per_pe = SZIP_DIM_LEN/mpi_size;
|
|
|
|
size_t start[NDIMS1], count[NDIMS1];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Create test data. */
|
|
|
|
if (!(data = malloc(elements_per_pe * sizeof(float)))) ERR;
|
|
|
|
for (i = 0; i < elements_per_pe; i++)
|
|
|
|
data[i] = mpi_rank + i * 0.1;
|
|
|
|
|
|
|
|
/* Crate a file with a scalar NC_BYTE value. */
|
|
|
|
if (nc_create_par(FILE, NC_NETCDF4, MPI_COMM_WORLD, MPI_INFO_NULL,
|
|
|
|
&ncid)) ERR;
|
|
|
|
if (nc_def_dim(ncid, SZIP_DIM_NAME, SZIP_DIM_LEN, &dimid)) ERR;
|
|
|
|
if (nc_def_var(ncid, SZIP_VAR_NAME, NC_FLOAT, NDIMS1, &dimid, &varid)) ERR;
|
|
|
|
if (nc_def_var_szip(ncid, varid, NC_SZIP_NN, SZIP_PIXELS_PER_BLOCK)) ERR;
|
|
|
|
if (nc_enddef(ncid)) ERR;
|
|
|
|
start[0] = mpi_rank * elements_per_pe;
|
|
|
|
count[0] = elements_per_pe;
|
|
|
|
if (nc_put_vara_float(ncid, varid, start, count, data));
|
|
|
|
if (nc_close(ncid)) ERR;
|
|
|
|
|
|
|
|
/* Reopen the file and check. */
|
|
|
|
if (nc_open_par(FILE, 0, comm, info, &ncid)) ERR;
|
|
|
|
if (!(data_in = malloc(elements_per_pe * sizeof(float)))) ERR;
|
|
|
|
if (nc_get_vara_float(ncid, varid, start, count, data_in));
|
|
|
|
if (nc_close(ncid)) ERR;
|
|
|
|
for (i = 0; i < elements_per_pe; i++)
|
|
|
|
if (data_in[i] != data[i]) ERR;
|
|
|
|
|
|
|
|
/* Release resources. */
|
|
|
|
free(data_in);
|
|
|
|
free(data);
|
|
|
|
}
|
|
|
|
if (!mpi_rank)
|
|
|
|
SUMMARIZE_ERR;
|
|
|
|
#endif /* HDF5_SUPPORTS_PAR_FILTERS */
|
Enhance/Fix filter support
re: Discussion https://github.com/Unidata/netcdf-c/discussions/2214
The primary change is to support so-called "standard filters".
A standard filter is one that is defined by the following
netcdf-c API:
````
int nc_def_var_XXX(int ncid, int varid, size_t nparams, unsigned* params);
int nc_inq_var_XXXX(int ncid, int varid, int* usefilterp, unsigned* params);
````
So for example, zstandard would be a standard filter by defining
the functions *nc_def_var_zstandard* and *nc_inq_var_zstandard*.
In order to define these functions, we need a new dispatch function:
````
int nc_inq_filter_avail(int ncid, unsigned filterid);
````
This function, combined with the existing filter API can be used
to implement arbitrary standard filters using a simple code pattern.
Note that I would have preferred that this function return a list
of all available filters, but HDF5 does not support that functionality.
So this PR implements the dispatch function and implements
the following standard functions:
+ bzip2
+ zstandard
+ blosc
Specific test cases are also provided for HDF5 and NCZarr.
Over time, other specific standard filters will be defined.
## Primary Changes
* Add nc_inq_filter_avail() to netcdf-c API.
* Add standard filter implementations to test use of *nc_inq_filter_avail*.
* Bump the dispatch table version number and add to all the relevant
dispatch tables (libsrc, libsrcp, etc).
* Create a program to invoke nc_inq_filter_avail so that it is accessible
to shell scripts.
* Cleanup szip support to properly support szip
when HDF5 is disabled. This involves detecting
libsz separately from testing if HDF5 supports szip.
* Integrate shuffle and fletcher32 into the existing
filter API. This means that, for example, nc_def_var_fletcher32
is now a wrapper around nc_def_var_filter.
* Extend the Codec defaulting to allow multiple default shared libraries.
## Misc. Changes
* Modify configure.ac/CMakeLists.txt to look for the relevant
libraries implementing standard filters.
* Modify libnetcdf.settings to list available standard filters
(including deflate and szip).
* Add CMake test modules to locate libbz2 and libzstd.
* Cleanup the HDF5 memory manager function use in the plugins.
* remove unused file include//ncfilter.h
* remove tests for the HDF5 memory operations e.g. H5allocate_memory.
* Add flag to ncdump to force use of _Filter instead of _Deflate
or _Shuffle or _Fletcher32. Used for testing.
2022-03-15 02:39:37 +08:00
|
|
|
#endif /* HAVE_H5Z_SZIP */
|
2019-08-14 22:22:35 +08:00
|
|
|
|
|
|
|
/* Shut down MPI. */
|
|
|
|
MPI_Finalize();
|
|
|
|
|
|
|
|
if (!mpi_rank)
|
|
|
|
FINAL_RESULTS;
|
|
|
|
|
|
|
|
return 0;
|
2018-05-14 22:49:13 +08:00
|
|
|
}
|