mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-02-17 16:10:24 +08:00
Merging in latest from upstream (HDFFV/hdf5:refs/heads/develop)
* commit 'bf570b1a7ca3b9cbd4a59f0933a19ce1bcc99103': (71 commits) Amend tests to explicitly use H5Dcreate2 and H5Dopen2 Revert malloc's back to using hard-coded type for sizeof Minor comment refactoring Update documentation Finish up Parallel Compression test scaling work Partial update for scaling parallel filters tests Modify t_dset.c in lieu of Parallel Compression changes Updated H5C__flush_single_entry() in H5C.c to correct duplicate metadata write bug observed in 1.10.1. Amend MANIFEST Add test for write parallel; read serial case Fix uninitialized array issue in test Test updates Fix bug where incorrect amount of data was being read from the file Add data verification for first half of tests Start adding data verification Switch tests over to use testing macros Updates to parallel filters tests Move test files to testpar directory Add test file to build process Suggested changes from code review ...
This commit is contained in:
commit
539c17b2f4
2
MANIFEST
2
MANIFEST
@ -1236,6 +1236,8 @@
|
||||
./testpar/t_file.c
|
||||
./testpar/t_file_image.c
|
||||
./testpar/t_filter_read.c
|
||||
./testpar/t_filters_parallel.c
|
||||
./testpar/t_filters_parallel.h
|
||||
./testpar/t_mdset.c
|
||||
./testpar/t_mpi.c
|
||||
./testpar/t_ph5basic.c
|
||||
|
26
src/H5C.c
26
src/H5C.c
@ -6273,17 +6273,27 @@ H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTINSERT, FAIL, "unable to insert skip list item")
|
||||
} /* end if */
|
||||
else
|
||||
{
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
|
||||
if(entry_ptr->prefetched) {
|
||||
HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
|
||||
mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type;
|
||||
} /* end if */
|
||||
else
|
||||
mem_type = entry_ptr->type->mem_type;
|
||||
if(entry_ptr->prefetched) {
|
||||
HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
|
||||
mem_type = cache_ptr->
|
||||
class_table_ptr[entry_ptr->prefetch_type_id]->
|
||||
mem_type;
|
||||
} /* end if */
|
||||
else
|
||||
mem_type = entry_ptr->type->mem_type;
|
||||
|
||||
if(H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, dxpl_id, entry_ptr->image_ptr) < 0)
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file")
|
||||
if(H5F_block_write(f, mem_type, entry_ptr->addr,
|
||||
entry_ptr->size, dxpl_id,
|
||||
entry_ptr->image_ptr) < 0)
|
||||
|
||||
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
|
||||
"Can't write image to file")
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
}
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
} /* end if */
|
||||
|
||||
/* if the entry has a notify callback, notify it that we have
|
||||
|
@ -299,9 +299,6 @@ static herr_t H5D__chunk_unlock(const H5D_io_info_t *io_info,
|
||||
static herr_t H5D__chunk_cache_prune(const H5D_t *dset, hid_t dxpl_id,
|
||||
const H5D_dxpl_cache_t *dxpl_cache, size_t size);
|
||||
static herr_t H5D__chunk_prune_fill(H5D_chunk_it_ud1_t *udata, hbool_t new_unfilt_chunk);
|
||||
static herr_t H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info,
|
||||
const H5F_block_t *old_chunk, H5F_block_t *new_chunk, hbool_t *need_insert,
|
||||
hsize_t scaled[]);
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
static herr_t H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id,
|
||||
H5D_chunk_coll_info_t *chunk_info, size_t chunk_size, const void *fill_buf);
|
||||
@ -6558,7 +6555,7 @@ done:
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
static herr_t
|
||||
herr_t
|
||||
H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old_chunk,
|
||||
H5F_block_t *new_chunk, hbool_t *need_insert, hsize_t scaled[])
|
||||
{
|
||||
|
@ -1213,10 +1213,6 @@ H5D__create(H5F_t *file, hid_t type_id, const H5S_t *space, hid_t dcpl_id,
|
||||
/* Don't allow compact datasets to allocate space later */
|
||||
if(layout->type == H5D_COMPACT && fill->alloc_time != H5D_ALLOC_TIME_EARLY)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "compact dataset must have early space allocation")
|
||||
|
||||
/* If MPI VFD is used, no filter support yet. */
|
||||
if(H5F_HAS_FEATURE(file, H5FD_FEAT_HAS_MPI) && pline->nused > 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, NULL, "Parallel I/O does not support filters yet")
|
||||
} /* end if */
|
||||
|
||||
/* Set the latest version of the layout, pline & fill messages, if requested */
|
||||
|
68
src/H5Dio.c
68
src/H5Dio.c
@ -714,11 +714,6 @@ H5D__write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
if(H5T_get_class(type_info.mem_type, TRUE) == H5T_REFERENCE &&
|
||||
H5T_get_ref_type(type_info.mem_type) == H5R_DATASET_REGION)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "Parallel IO does not support writing region reference datatypes yet")
|
||||
|
||||
/* Can't write to chunked datasets with filters, in parallel */
|
||||
if(dataset->shared->layout.type == H5D_CHUNKED &&
|
||||
dataset->shared->dcpl_cache.pline.nused > 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot write to chunked storage with filters in parallel")
|
||||
} /* end if */
|
||||
else {
|
||||
/* Collective access is not permissible without a MPI based VFD */
|
||||
@ -1195,7 +1190,7 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, hid_t dxpl_id,
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "can't retrieve MPI communicator")
|
||||
|
||||
/* Check if we can set direct MPI-IO read/write functions */
|
||||
if((opt = H5D__mpio_opt_possible(io_info, file_space, mem_space, type_info, fm, dx_plist)) < 0)
|
||||
if((opt = H5D__mpio_opt_possible(io_info, file_space, mem_space, type_info, dx_plist)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "invalid check for direct IO dataspace ")
|
||||
|
||||
/* Check if we can use the optimized parallel I/O routines */
|
||||
@ -1207,6 +1202,67 @@ H5D__ioinfo_adjust(H5D_io_info_t *io_info, const H5D_t *dset, hid_t dxpl_id,
|
||||
io_info->io_ops.single_write = H5D__mpio_select_write;
|
||||
} /* end if */
|
||||
else {
|
||||
/* Check if there are any filters in the pipeline. If there are,
|
||||
* we cannot break to independent I/O if this is a write operation;
|
||||
* otherwise there will be metadata inconsistencies in the file.
|
||||
*/
|
||||
if (io_info->op_type == H5D_IO_OP_WRITE && io_info->dset->shared->dcpl_cache.pline.nused > 0) {
|
||||
H5D_mpio_no_collective_cause_t cause;
|
||||
uint32_t local_no_collective_cause;
|
||||
uint32_t global_no_collective_cause;
|
||||
hbool_t local_error_message_previously_written = FALSE;
|
||||
hbool_t global_error_message_previously_written = FALSE;
|
||||
size_t index;
|
||||
char local_no_collective_cause_string[256] = "";
|
||||
char global_no_collective_cause_string[256] = "";
|
||||
const char *cause_strings[] = { "independent I/O was requested",
|
||||
"datatype conversions were required",
|
||||
"data transforms needed to be applied",
|
||||
"optimized MPI types flag wasn't set",
|
||||
"one of the dataspaces was neither simple nor scalar",
|
||||
"dataset was not contiguous or chunked" };
|
||||
|
||||
if (H5P_get(dx_plist, H5D_MPIO_LOCAL_NO_COLLECTIVE_CAUSE_NAME, &local_no_collective_cause) < 0)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get local no collective cause value")
|
||||
if (H5P_get(dx_plist, H5D_MPIO_GLOBAL_NO_COLLECTIVE_CAUSE_NAME, &global_no_collective_cause) < 0)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get global no collective cause value")
|
||||
|
||||
/* Append each of the "reason for breaking collective I/O" error messages to the
|
||||
* local and global no collective cause strings */
|
||||
for (cause = 1, index = 0; cause < H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE; cause <<= 1, index++) {
|
||||
size_t cause_strlen = strlen(cause_strings[index]);
|
||||
|
||||
if (cause & local_no_collective_cause) {
|
||||
/* Check if there were any previous error messages included. If so, prepend a semicolon
|
||||
* to separate the messages.
|
||||
*/
|
||||
if (local_error_message_previously_written) strncat(local_no_collective_cause_string, "; ", 2);
|
||||
|
||||
strncat(local_no_collective_cause_string, cause_strings[index], cause_strlen);
|
||||
|
||||
local_error_message_previously_written = TRUE;
|
||||
} /* end if */
|
||||
|
||||
if (cause & global_no_collective_cause) {
|
||||
/* Check if there were any previous error messages included. If so, prepend a semicolon
|
||||
* to separate the messages.
|
||||
*/
|
||||
if (global_error_message_previously_written) strncat(global_no_collective_cause_string, "; ", 2);
|
||||
|
||||
strncat(global_no_collective_cause_string, cause_strings[index], cause_strlen);
|
||||
|
||||
global_error_message_previously_written = TRUE;
|
||||
} /* end if */
|
||||
} /* end for */
|
||||
|
||||
HGOTO_ERROR(H5E_IO, H5E_NO_INDEPENDENT, FAIL, "Can't perform independent write with filters in pipeline.\n"
|
||||
" The following caused a break from collective I/O:\n"
|
||||
" Local causes: %s\n"
|
||||
" Global causes: %s",
|
||||
local_no_collective_cause_string,
|
||||
global_no_collective_cause_string);
|
||||
} /* end if */
|
||||
|
||||
/* If we won't be doing collective I/O, but the user asked for
|
||||
* collective I/O, change the request to use independent I/O, but
|
||||
* mark it so that we remember to revert the change.
|
||||
|
1665
src/H5Dmpio.c
1665
src/H5Dmpio.c
File diff suppressed because it is too large
Load Diff
@ -617,6 +617,9 @@ H5_DLL herr_t H5D__select_write(const H5D_io_info_t *io_info,
|
||||
H5_DLL herr_t H5D__scatter_mem(const void *_tscat_buf,
|
||||
const H5S_t *space, H5S_sel_iter_t *iter, size_t nelmts,
|
||||
const H5D_dxpl_cache_t *dxpl_cache, void *_buf);
|
||||
H5_DLL size_t H5D__gather_mem(const void *_buf,
|
||||
const H5S_t *space, H5S_sel_iter_t *iter, size_t nelmts,
|
||||
const H5D_dxpl_cache_t *dxpl_cache, void *_tgath_buf/*out*/);
|
||||
H5_DLL herr_t H5D__scatgath_read(const H5D_io_info_t *io_info,
|
||||
const H5D_type_info_t *type_info,
|
||||
hsize_t nelmts, const H5S_t *file_space, const H5S_t *mem_space);
|
||||
@ -666,6 +669,8 @@ H5_DLL herr_t H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id,
|
||||
const hsize_t *scaled, H5D_chunk_ud_t *udata);
|
||||
H5_DLL herr_t H5D__chunk_allocated(H5D_t *dset, hid_t dxpl_id, hsize_t *nbytes);
|
||||
H5_DLL herr_t H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_t old_dim[]);
|
||||
H5_DLL herr_t H5D__chunk_file_alloc(const H5D_chk_idx_info_t *idx_info, const H5F_block_t *old_chunk,
|
||||
H5F_block_t *new_chunk, hbool_t *need_insert, hsize_t scaled[]);
|
||||
H5_DLL herr_t H5D__chunk_update_old_edge_chunks(H5D_t *dset, hid_t dxpl_id,
|
||||
hsize_t old_dim[]);
|
||||
H5_DLL herr_t H5D__chunk_prune_by_extent(H5D_t *dset, hid_t dxpl_id,
|
||||
@ -768,8 +773,7 @@ H5_DLL herr_t H5D__chunk_collective_write(H5D_io_info_t *io_info,
|
||||
* memory and the file */
|
||||
H5_DLL htri_t H5D__mpio_opt_possible(const H5D_io_info_t *io_info,
|
||||
const H5S_t *file_space, const H5S_t *mem_space,
|
||||
const H5D_type_info_t *type_info, const H5D_chunk_map_t *fm,
|
||||
H5P_genplist_t *dx_plist);
|
||||
const H5D_type_info_t *type_info, H5P_genplist_t *dx_plist);
|
||||
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
|
||||
|
@ -47,9 +47,6 @@ static herr_t H5D__scatter_file(const H5D_io_info_t *io_info,
|
||||
static size_t H5D__gather_file(const H5D_io_info_t *io_info,
|
||||
const H5S_t *file_space, H5S_sel_iter_t *file_iter, size_t nelmts,
|
||||
void *buf);
|
||||
static size_t H5D__gather_mem(const void *_buf,
|
||||
const H5S_t *space, H5S_sel_iter_t *iter, size_t nelmts,
|
||||
const H5D_dxpl_cache_t *dxpl_cache, void *_tgath_buf/*out*/);
|
||||
static herr_t H5D__compound_opt_read(size_t nelmts, const H5S_t *mem_space,
|
||||
H5S_sel_iter_t *iter, const H5D_dxpl_cache_t *dxpl_cache,
|
||||
const H5D_type_info_t *type_info, void *user_buf/*out*/);
|
||||
@ -303,6 +300,7 @@ H5D__scatter_mem (const void *_tscat_buf, const H5S_t *space,
|
||||
HDassert(space);
|
||||
HDassert(iter);
|
||||
HDassert(nelmts > 0);
|
||||
HDassert(dxpl_cache);
|
||||
HDassert(buf);
|
||||
|
||||
/* Allocate the vector I/O arrays */
|
||||
@ -364,7 +362,7 @@ done:
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
static size_t
|
||||
size_t
|
||||
H5D__gather_mem(const void *_buf, const H5S_t *space,
|
||||
H5S_sel_iter_t *iter, size_t nelmts, const H5D_dxpl_cache_t *dxpl_cache,
|
||||
void *_tgath_buf/*out*/)
|
||||
@ -387,6 +385,7 @@ H5D__gather_mem(const void *_buf, const H5S_t *space,
|
||||
HDassert(space);
|
||||
HDassert(iter);
|
||||
HDassert(nelmts > 0);
|
||||
HDassert(dxpl_cache);
|
||||
HDassert(tgath_buf);
|
||||
|
||||
/* Allocate the vector I/O arrays */
|
||||
|
@ -166,7 +166,7 @@ typedef enum H5D_mpio_no_collective_cause_t {
|
||||
H5D_MPIO_MPI_OPT_TYPES_ENV_VAR_DISABLED = 0x08,
|
||||
H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES = 0x10,
|
||||
H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET = 0x20,
|
||||
H5D_MPIO_FILTERS = 0x40
|
||||
H5D_MPIO_NO_COLLECTIVE_MAX_CAUSE = 0x40
|
||||
} H5D_mpio_no_collective_cause_t;
|
||||
|
||||
/********************/
|
||||
|
@ -243,6 +243,8 @@ MINOR, LINK, H5E_CANTSORT, Can't sort objects
|
||||
MINOR, MPI, H5E_MPI, Some MPI function failed
|
||||
MINOR, MPI, H5E_MPIERRSTR, MPI Error String
|
||||
MINOR, MPI, H5E_CANTRECV, Can't receive data
|
||||
MINOR, MPI, H5E_CANTGATHER, Can't gather data
|
||||
MINOR, MPI, H5E_NO_INDEPENDENT, Can't perform independent IO
|
||||
|
||||
# Heap errors
|
||||
MINOR, HEAP, H5E_CANTRESTORE, Can't restore condition
|
||||
|
@ -621,10 +621,6 @@ H5_trace(const double *returning, const char *func, const char *type, ...)
|
||||
fprintf(out, "%sH5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET", flag_already_displayed ? " | " : "");
|
||||
flag_already_displayed = TRUE;
|
||||
} /* end if */
|
||||
if(nocol_cause_mode & H5D_MPIO_FILTERS) {
|
||||
fprintf(out, "%sH5D_MPIO_FILTERS", flag_already_displayed ? " | " : "");
|
||||
flag_already_displayed = TRUE;
|
||||
} /* end if */
|
||||
|
||||
/* Display '<none>' if there's no flags set */
|
||||
if(!flag_already_displayed)
|
||||
|
@ -23,7 +23,7 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test
|
||||
|
||||
# Test programs. These are our main targets.
|
||||
#
|
||||
TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pflush1 t_pflush2 t_pshutdown t_prestart t_init_term t_shapesame
|
||||
TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pflush1 t_pflush2 t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel
|
||||
|
||||
check_PROGRAMS = $(TEST_PROG_PARA)
|
||||
|
||||
|
@ -2651,11 +2651,8 @@ compress_readAll(void)
|
||||
nerrors++;
|
||||
}
|
||||
|
||||
/* Writing to the compressed, chunked dataset in parallel should fail */
|
||||
H5E_BEGIN_TRY {
|
||||
ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
|
||||
} H5E_END_TRY;
|
||||
VRFY((ret < 0), "H5Dwrite failed");
|
||||
ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
|
||||
VRFY((ret >= 0), "H5Dwrite succeeded");
|
||||
|
||||
ret = H5Pclose(xfer_plist);
|
||||
VRFY((ret >= 0), "H5Pclose succeeded");
|
||||
|
2475
testpar/t_filters_parallel.c
Normal file
2475
testpar/t_filters_parallel.c
Normal file
File diff suppressed because it is too large
Load Diff
212
testpar/t_filters_parallel.h
Normal file
212
testpar/t_filters_parallel.h
Normal file
@ -0,0 +1,212 @@
|
||||
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
|
||||
* Copyright by The HDF Group. *
|
||||
* Copyright by the Board of Trustees of the University of Illinois. *
|
||||
* All rights reserved. *
|
||||
* *
|
||||
* This file is part of HDF5. The full HDF5 copyright notice, including *
|
||||
* terms governing use, modification, and redistribution, is contained in *
|
||||
* the files COPYING and Copyright.html. COPYING can be found at the root *
|
||||
* of the source code distribution tree; Copyright.html can be found at the *
|
||||
* root level of an installed copy of the electronic HDF5 document set and *
|
||||
* is linked from the top-level documents page. It can also be found at *
|
||||
* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
|
||||
* access to either file, you may request a copy from help@hdfgroup.org. *
|
||||
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
|
||||
|
||||
/*
|
||||
* Programmer: Jordan Henderson
|
||||
* 01/31/2017
|
||||
*
|
||||
* This file contains #defines for tests of the use
|
||||
* of filters in parallel HDF5, implemented in
|
||||
* H5Dmpio.c
|
||||
*/
|
||||
|
||||
#ifndef TEST_PARALLEL_FILTERS_H_
|
||||
#define TEST_PARALLEL_FILTERS_H_
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "stdlib.h"
|
||||
#include "testpar.h"
|
||||
|
||||
/* Used to load other filters than GZIP */
|
||||
/* #define DYNAMIC_FILTER */ /* Uncomment and define the fields below to use a dynamically loaded filter */
|
||||
#define FILTER_NUM_CDVALUES 1
|
||||
const unsigned int cd_values[FILTER_NUM_CDVALUES] = { 0 };
|
||||
H5Z_filter_t filter_id;
|
||||
unsigned int flags = 0;
|
||||
size_t cd_nelmts = FILTER_NUM_CDVALUES;
|
||||
|
||||
/* Utility Macros */
|
||||
#define STRINGIFY(type) #type
|
||||
|
||||
/* Common defines for all tests */
|
||||
#define C_DATATYPE long
|
||||
#define COMPOUND_C_DATATYPE cmpd_filtered_t
|
||||
#define C_DATATYPE_STR(type) STRINGIFY(type)
|
||||
#define HDF5_DATATYPE_NAME H5T_NATIVE_LONG
|
||||
|
||||
#define GEN_DATA(i) INCREMENTAL_DATA(i)
|
||||
#define INCREMENTAL_DATA(i) ((size_t) mpi_rank + i) /* Generates incremental test data */
|
||||
|
||||
/* For experimental purposes only, will cause tests to fail data verification phase - JTH */
|
||||
/* #define GEN_DATA(i) RANK_DATA(i) */ /* Given an index value i, generates test data based upon selected mode */
|
||||
#define RANK_DATA(i) (mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */
|
||||
|
||||
#ifdef DYNAMIC_FILTER
|
||||
#define SET_FILTER(dcpl) H5Pset_filter(dcpl, filter_id, flags, FILTER_NUM_CDVALUES, cd_values) /* Test other filter in parallel */
|
||||
#else
|
||||
#define SET_FILTER(dcpl) H5Pset_deflate(dcpl, 6) /* Test GZIP filter in parallel */
|
||||
#endif
|
||||
|
||||
#define DIM0_SCALE_FACTOR 4
|
||||
#define DIM1_SCALE_FACTOR 2
|
||||
|
||||
/* Defines for the one-chunk filtered dataset test */
|
||||
#define ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset"
|
||||
#define ONE_CHUNK_FILTERED_DATASET_DIMS 2
|
||||
#define ONE_CHUNK_FILTERED_DATASET_NROWS (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
|
||||
#define ONE_CHUNK_FILTERED_DATASET_NCOLS (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
|
||||
#define ONE_CHUNK_FILTERED_DATASET_CH_NROWS ONE_CHUNK_FILTERED_DATASET_NROWS
|
||||
#define ONE_CHUNK_FILTERED_DATASET_CH_NCOLS ONE_CHUNK_FILTERED_DATASET_NCOLS
|
||||
|
||||
/* Defines for the unshared filtered chunks write test */
|
||||
#define UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks"
|
||||
#define UNSHARED_FILTERED_CHUNKS_DATASET_DIMS 2
|
||||
#define UNSHARED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR)
|
||||
#define UNSHARED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
|
||||
#define UNSHARED_FILTERED_CHUNKS_CH_NROWS (UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size)
|
||||
#define UNSHARED_FILTERED_CHUNKS_CH_NCOLS (UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size)
|
||||
|
||||
/* Defines for the shared filtered chunks write test */
|
||||
#define SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks"
|
||||
#define SHARED_FILTERED_CHUNKS_DATASET_DIMS 2
|
||||
#define SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size)
|
||||
#define SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size)
|
||||
#define SHARED_FILTERED_CHUNKS_NROWS (SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR)
|
||||
#define SHARED_FILTERED_CHUNKS_NCOLS (SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR)
|
||||
|
||||
/* Defines for the filtered chunks write test where a process has no selection */
|
||||
#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks"
|
||||
#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
|
||||
#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
|
||||
#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
|
||||
#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS (SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
|
||||
#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS (SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
|
||||
#define SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1)
|
||||
|
||||
/* Defines for the filtered chunks write test where no process has a selection */
|
||||
#define ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks"
|
||||
#define ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
|
||||
#define ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
|
||||
#define ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
|
||||
#define ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS (ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
|
||||
#define ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS (ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
|
||||
|
||||
/* Defines for the filtered chunks write test with a point selection */
|
||||
#define POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks"
|
||||
#define POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
|
||||
#define POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
|
||||
#define POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
|
||||
#define POINT_SELECTION_FILTERED_CHUNKS_NROWS (POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
|
||||
#define POINT_SELECTION_FILTERED_CHUNKS_NCOLS (POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
|
||||
|
||||
/* Defines for the filtered dataset interleaved write test */
|
||||
#define INTERLEAVED_WRITE_FILTERED_DATASET_NAME "interleaved_write_filtered_dataset"
|
||||
#define INTERLEAVED_WRITE_FILTERED_DATASET_DIMS 2
|
||||
#define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS (mpi_size)
|
||||
#define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR)
|
||||
#define INTERLEAVED_WRITE_FILTERED_DATASET_NROWS (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS * DIM0_SCALE_FACTOR)
|
||||
#define INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR)
|
||||
|
||||
/* Defines for the 3D unshared filtered dataset separate page write test */
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_separate_pages"
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size)
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS (UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size)
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS (UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size)
|
||||
|
||||
/* Defines for the 3D unshared filtered dataset same page write test */
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_same_pages"
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size)
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS (UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size)
|
||||
#define UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS (UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size)
|
||||
|
||||
/* Defines for the 3d shared filtered dataset write test */
|
||||
#define SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks"
|
||||
#define SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS 3
|
||||
#define SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size)
|
||||
#define SHARED_FILTERED_CHUNKS_3D_CH_NCOLS (DIM1_SCALE_FACTOR)
|
||||
#define SHARED_FILTERED_CHUNKS_3D_NROWS (SHARED_FILTERED_CHUNKS_3D_CH_NROWS * DIM0_SCALE_FACTOR)
|
||||
#define SHARED_FILTERED_CHUNKS_3D_NCOLS (SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR)
|
||||
#define SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size)
|
||||
|
||||
/* Struct type for the compound datatype filtered dataset tests */
|
||||
typedef struct {
|
||||
short field1;
|
||||
int field2;
|
||||
long field3;
|
||||
double field4;
|
||||
} COMPOUND_C_DATATYPE;
|
||||
|
||||
/* Defines for the compound datatype filtered dataset no conversion write test with unshared chunks */
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_no_conversion"
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC (COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size)
|
||||
|
||||
/* Defines for the compound datatype filtered dataset no conversion write test with shared chunks */
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_no_conversion"
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1
|
||||
#define COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS
|
||||
|
||||
/* Defines for the compound datatype filtered dataset type conversion write test with unshared chunks */
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_type_conversion"
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC (COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size)
|
||||
|
||||
/* Defines for the compound datatype filtered dataset type conversion write test with shared chunks */
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_type_conversion"
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1
|
||||
#define COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS
|
||||
|
||||
/* Defines for the write file serially/read in parallel test */
|
||||
#define WRITE_SERIAL_READ_PARALLEL_DATASET_NAME "write_serial_read_parallel"
|
||||
#define WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS 3
|
||||
#define WRITE_SERIAL_READ_PARALLEL_NROWS (mpi_size * DIM0_SCALE_FACTOR)
|
||||
#define WRITE_SERIAL_READ_PARALLEL_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
|
||||
#define WRITE_SERIAL_READ_PARALLEL_DEPTH (mpi_size)
|
||||
#define WRITE_SERIAL_READ_PARALLEL_CH_NROWS (WRITE_SERIAL_READ_PARALLEL_NROWS / mpi_size)
|
||||
#define WRITE_SERIAL_READ_PARALLEL_CH_NCOLS (WRITE_SERIAL_READ_PARALLEL_NCOLS / mpi_size)
|
||||
|
||||
/* Defines for the write file in parallel/read serially test */
|
||||
#define WRITE_PARALLEL_READ_SERIAL_DATASET_NAME "write_parallel_read_serial"
|
||||
#define WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS 3
|
||||
#define WRITE_PARALLEL_READ_SERIAL_NROWS (mpi_size * DIM0_SCALE_FACTOR)
|
||||
#define WRITE_PARALLEL_READ_SERIAL_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
|
||||
#define WRITE_PARALLEL_READ_SERIAL_DEPTH (mpi_size)
|
||||
#define WRITE_PARALLEL_READ_SERIAL_CH_NROWS (WRITE_PARALLEL_READ_SERIAL_NROWS / mpi_size)
|
||||
#define WRITE_PARALLEL_READ_SERIAL_CH_NCOLS (WRITE_PARALLEL_READ_SERIAL_NCOLS / mpi_size)
|
||||
|
||||
#endif /* TEST_PARALLEL_FILTERS_H_ */
|
Loading…
Reference in New Issue
Block a user