hdf5/test/chunk_info.c

2245 lines
83 KiB
C
Raw Normal View History

/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by The HDF Group. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
* distribution tree, or in https://www.hdfgroup.org/licenses. *
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
*
* Purpose: Tests chunk query API functions
*
* Test structure:
* main()
2019-09-01 14:25:59 +08:00
* test_basic_query()
* test_get_chunk_info_highest_v18()
* test_get_chunk_info_v110()
* test_chunk_info_single_chunk()
* test_chunk_info_implicit()
* test_chunk_info_fixed_array()
* test_chunk_info_extensible_array()
* test_chunk_info_version2_btrees()
* test_failed_attempts()
* test_flt_msk_with_skip_compress()
*
* Helper functions:
* verify_idx_nchunks()
2019-09-01 14:25:59 +08:00
* verify_get_chunk_info()
* verify_get_chunk_info_by_coord()
* verify_empty_chunk_info()
* index_type_str()
*
*/
#define H5D_FRIEND
2020-09-30 22:27:10 +08:00
#define H5D_TESTING /* to use H5D__ functions */
#include "H5Dpkg.h"
#include "testhdf5.h"
#ifdef H5_HAVE_FILTER_DEFLATE
#include "zlib.h"
#endif
/* Test file names, using H5F_libver_t as indices */
static const char *FILENAME[] = {"tchunk_info_earliest",
"tchunk_info_v18",
"tchunk_info_v110",
"tchunk_info_v112",
"tchunk_info_v114",
"tchunk_info_v116",
NULL};
2019-08-29 13:53:40 +08:00
/* File to be used in test_failed_attempts */
2020-09-30 22:27:10 +08:00
#define FILTERMASK_FILE "tflt_msk"
#define BASIC_FILE "basic_query"
/* Parameters for testing chunk querying */
2020-09-30 22:27:10 +08:00
#define SIMPLE_CHUNKED_DSET_NAME "Chunked Dataset"
#define CONTIGUOUS_DSET_NAME "Contiguous Dataset"
#define EMPTY_DSET_NAME "Empty Dataset"
#define EMPTY_EARLY_ALLOC_DSET_NAME "Empty Dataset with ALLOC_TIME_EARLY"
#define SINGLE_CHUNK_DSET_NAME "Single Chunk Index Dataset"
#define IMPLICIT_INDEX_DSET_NAME "Implicit Index Dataset"
#define FIXED_ARR_INDEX_DSET_NAME "Fixed Array Index Dataset"
#define EXT_ARR_INDEX_DSET_NAME "Extensible Array Index Dataset"
#define V2_BTREE_INDEX_DSET_NAME "Version 2 B-Tree Index Dataset"
#define SKIP_FILTER_DSET_NAME "Dataset with Skipping One Filter"
#define FILENAME_BUF_SIZE 256 /* Size for file names */
#define RANK 2 /* Rank for datasets */
/* Dimension of the dataset */
2020-09-30 22:27:10 +08:00
#define NX 24
#define NY 16
/* Dimension of the chunk */
2020-09-30 22:27:10 +08:00
#define CHUNK_NX 6
#define CHUNK_NY 4
/* X/Y coords of first chunk written */
2020-09-30 22:27:10 +08:00
#define START_CHK_X 0
#define START_CHK_Y 2
/* X/Y coord of last chunk written */
2020-09-30 22:27:10 +08:00
#define END_CHK_X 2
#define END_CHK_Y 4
/* X and Y coords of an empty chunk */
2020-09-30 22:27:10 +08:00
#define EMPTY_CHK_X 0
#define EMPTY_CHK_Y 0
/* Size of a chunk when the entire dataset is a one single chunk */
2020-09-30 22:27:10 +08:00
#define SINGLE_CHK_SIZE (NX * NY * sizeof(int))
/* Size of a chunk */
2020-09-30 22:27:10 +08:00
#define CHK_SIZE (CHUNK_NX * CHUNK_NY * sizeof(int))
/* Size of an empty chunk */
2020-09-30 22:27:10 +08:00
#define EMPTY_CHK_SIZE 0
/* Number of maximum chunks without extending */
2020-09-30 22:27:10 +08:00
#define NUM_CHUNKS ((NX / CHUNK_NX) * (NY / CHUNK_NY))
/* Number of chunks that have been written */
2020-09-30 22:27:10 +08:00
#define NUM_CHUNKS_WRITTEN 4
#define ONE_CHUNK_WRITTEN 1
#define TWO_CHUNKS_WRITTEN 2
#define NO_CHUNK_WRITTEN 0
/* For testing invalid arguments */
2020-09-30 22:27:10 +08:00
#define NONEXIST_CHK_INDEX 3
#define OUTOFRANGE_CHK_INDEX 5
#define INVALID_CHK_INDEX 5
/* For compressed data */
2020-09-30 22:27:10 +08:00
#define DEFLATE_SIZE_ADJUST(s) (ceil(((double)(s)) * 1.001) + 12.0)
/* For use in error reporting */
2020-09-30 22:27:10 +08:00
#define MSG_CHK_ADDR "Chunk address should not be HADDR_UNDEF because of H5D_ALLOC_TIME_EARLY."
#define MSG_CHK_SIZE "Chunk size should not be 0 because of H5D_ALLOC_TIME_EARLY."
/* Utility function to initialize arguments */
void reinit_vars(unsigned *read_flt_msk, haddr_t *addr, hsize_t *size);
/* Helper function containing common code that verifies indexing type
and number of chunks */
2020-09-30 22:27:10 +08:00
static int verify_idx_nchunks(hid_t dset, hid_t dspace, H5D_chunk_index_t exp_idx_type,
hsize_t exp_num_chunks);
static int verify_get_chunk_info(hid_t dset, hid_t dspace, hsize_t chk_index, hsize_t exp_chk_size,
const hsize_t *exp_offset, unsigned exp_flt_msk);
2020-09-30 22:27:10 +08:00
static int verify_get_chunk_info_by_coord(hid_t dset, hsize_t *offset, hsize_t exp_chk_size,
unsigned exp_flt_msk);
static int verify_empty_chunk_info(hid_t dset, hsize_t *offset);
static const char *index_type_str(H5D_chunk_index_t idx_type);
/*-------------------------------------------------------------------------
* Function: reinit_vars (helper function)
*
* Purpose: Wipes out variables for the next use, used in various tests.
*
* Return: Won't fail
*
* Date: September 2018
*
*-------------------------------------------------------------------------
*/
2020-09-30 22:27:10 +08:00
void
reinit_vars(unsigned *read_flt_msk, haddr_t *addr, hsize_t *size)
{
2020-09-30 22:27:10 +08:00
if (read_flt_msk)
*read_flt_msk = 0;
2020-09-30 22:27:10 +08:00
if (addr)
*addr = 0;
2020-09-30 22:27:10 +08:00
if (size)
*size = 0;
}
/*-------------------------------------------------------------------------
* Function: verify_get_chunk_info (helper function)
*
* Purpose: Verifies that H5Dget_chunk_info returns correct
* values for a chunk.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Date: August 2019
*
*-------------------------------------------------------------------------
*/
static int
verify_get_chunk_info(hid_t dset, hid_t dspace, hsize_t chk_index, hsize_t exp_chk_size,
const hsize_t *exp_offset, unsigned exp_flt_msk)
{
2020-09-30 22:27:10 +08:00
unsigned read_flt_msk = 0; /* Read filter mask */
hsize_t out_offset[2] = {0, 0}; /* Buffer to get offset coordinates */
2020-09-30 22:27:10 +08:00
hsize_t size = 0; /* Size of an allocated/written chunk */
haddr_t addr = 0; /* Address of an allocated/written chunk */
2020-09-30 22:27:10 +08:00
if (H5Dget_chunk_info(dset, dspace, chk_index, out_offset, &read_flt_msk, &addr, &size) < 0)
TEST_ERROR;
if (HADDR_UNDEF == addr)
FAIL_PUTS_ERROR("address cannot be HADDR_UNDEF");
if (size != exp_chk_size)
FAIL_PUTS_ERROR("unexpected chunk size");
if (read_flt_msk != exp_flt_msk)
FAIL_PUTS_ERROR("unexpected filter mask");
if (out_offset[0] != exp_offset[0])
FAIL_PUTS_ERROR("unexpected offset[0]");
if (out_offset[1] != exp_offset[1])
FAIL_PUTS_ERROR("unexpected offset[1]");
return SUCCEED;
error:
return FAIL;
}
/*-------------------------------------------------------------------------
* Function: verify_get_chunk_info_by_coord (helper function)
*
* Purpose: Verifies that H5Dget_chunk_info_by_coord returns correct
* values for a chunk.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Date: August 2019
*
*-------------------------------------------------------------------------
*/
static int
verify_get_chunk_info_by_coord(hid_t dset, hsize_t *offset, hsize_t exp_chk_size, unsigned exp_flt_msk)
{
2020-09-30 22:27:10 +08:00
unsigned read_flt_msk = 0; /* Read filter mask */
hsize_t size = 0; /* Size of an allocated/written chunk */
haddr_t addr = 0; /* Address of an allocated/written chunk */
/* Get info of the chunk at logical coordinates specified by offset */
2020-09-30 22:27:10 +08:00
if (H5Dget_chunk_info_by_coord(dset, offset, &read_flt_msk, &addr, &size) < 0)
TEST_ERROR;
if (HADDR_UNDEF == addr)
FAIL_PUTS_ERROR("address cannot be HADDR_UNDEF");
if (size != exp_chk_size)
FAIL_PUTS_ERROR("unexpected chunk size");
if (read_flt_msk != exp_flt_msk)
FAIL_PUTS_ERROR("unexpected filter mask");
return SUCCEED;
error:
return FAIL;
}
/*-------------------------------------------------------------------------
* Function: verify_empty_chunk_info (helper function)
*
* Purpose: Verifies that H5Dget_chunk_info_by_coord returns correct
* values for an empty chunk.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Date: August 2018
*
*-------------------------------------------------------------------------
*/
static int
verify_empty_chunk_info(hid_t dset, hsize_t *offset)
{
2020-09-30 22:27:10 +08:00
unsigned read_flt_msk = 0; /* Read filter mask */
hsize_t size = 0; /* Size of an allocated/written chunk */
haddr_t addr = 0; /* Address of an allocated/written chunk */
/* Get info of the chunk at logical coordinates specified by offset */
2020-09-30 22:27:10 +08:00
if (H5Dget_chunk_info_by_coord(dset, offset, &read_flt_msk, &addr, &size) < 0)
TEST_ERROR;
if (HADDR_UNDEF != addr)
FAIL_PUTS_ERROR("address was not HADDR_UNDEF");
if (EMPTY_CHK_SIZE != size)
FAIL_PUTS_ERROR("size was not EMPTY_CHK_SIZE");
return SUCCEED;
error:
return FAIL;
}
/*-------------------------------------------------------------------------
* Function: index_type_str (helper function)
*
* Purpose: Returns the string containing the text associated with the
* given indexing scheme. For use in error messages.
*
* Return: Success: a valid indexing scheme string
* Failure: a note indicating the indexing type is invalid
*
* Date: August 2019
*
*-------------------------------------------------------------------------
*/
2020-09-30 22:27:10 +08:00
static const char *
index_type_str(H5D_chunk_index_t idx_type)
{
switch (idx_type) {
case H5D_CHUNK_IDX_SINGLE:
2020-09-30 22:27:10 +08:00
return ("Single Chunk index type");
case H5D_CHUNK_IDX_NONE:
2020-09-30 22:27:10 +08:00
return ("Implicit index type");
case H5D_CHUNK_IDX_FARRAY:
2020-09-30 22:27:10 +08:00
return ("Fixed Array index type");
case H5D_CHUNK_IDX_EARRAY:
2020-09-30 22:27:10 +08:00
return ("Extensible Array index type");
case H5D_CHUNK_IDX_BT2:
2020-09-30 22:27:10 +08:00
return ("Version 2 B-tree index type");
case H5D_CHUNK_IDX_BTREE:
2020-09-30 22:27:10 +08:00
return ("Version 1 B-tree index type (default)");
case H5D_CHUNK_IDX_NTYPES:
default:
2020-09-30 22:27:10 +08:00
return ("invalid index type");
}
} /* index_type_str */
/*-------------------------------------------------------------------------
* Function: verify_selected_chunks (helper function)
*
* Purpose: Reads the chunks within the boundary {start,end} and verify
* the values against the populated data.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Date: August 2019
*
*-------------------------------------------------------------------------
*/
static int
verify_selected_chunks(hid_t dset, hid_t plist, const hsize_t *start, const hsize_t *end)
{
int read_buf[CHUNK_NX][CHUNK_NY];
2020-09-30 22:27:10 +08:00
int expected_buf[NUM_CHUNKS][CHUNK_NX][CHUNK_NY]; /* Expected data */
unsigned read_flt_msk = 0; /* Filter mask read back */
hsize_t offset[2] = {0, 0}; /* Offset coordinates of a chunk */
hsize_t chk_index; /* Chunk index */
hsize_t ii, jj; /* Array indices */
int n;
2023-06-30 03:33:46 +08:00
memset(&read_buf, 0, sizeof(read_buf));
/* Initialize the array of chunk data for all NUM_CHUNKS chunks, this is
the same as the written data and will be used to verify the read data */
2020-09-30 22:27:10 +08:00
for (n = 0; n < NUM_CHUNKS; n++)
for (ii = 0; ii < CHUNK_NX; ii++)
for (jj = 0; jj < CHUNK_NY; jj++)
expected_buf[n][ii][jj] = (int)(ii * jj) + 1;
/* Read each chunk within the boundary of {start,end} and verify the
values against the expected data */
chk_index = 0;
2020-09-30 22:27:10 +08:00
for (ii = start[0]; ii < end[0]; ii++)
for (jj = start[1]; jj < end[1]; jj++, chk_index++) {
offset[0] = ii * CHUNK_NX;
offset[1] = jj * CHUNK_NY;
/* Read the current chunk */
2020-09-30 22:27:10 +08:00
if (H5Dread_chunk(dset, plist, offset, &read_flt_msk, read_buf) < 0)
TEST_ERROR;
/* Verify that read chunk is the same as the corresponding written one */
2023-06-30 03:33:46 +08:00
if (memcmp(expected_buf[chk_index], read_buf, CHUNK_NX * CHUNK_NY) != 0) {
fprintf(stderr,
"Read chunk differs from written chunk at offset (%" PRIuHSIZE ",%" PRIuHSIZE ")\n",
offset[0], offset[1]);
return FAIL;
}
}
return SUCCEED;
error:
return FAIL;
} /* verify_selected_chunks */
/*-------------------------------------------------------------------------
* Function: write_selected_chunks (helper function)
*
* Purpose: Verifies that chunk indexing scheme and number of chunks of
* the dataset matches the expected values, then write data to
* a subset of chunks. This function opens the dataset then
* closes it after writing.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Date: August 2019
*
*-------------------------------------------------------------------------
*/
static int
write_selected_chunks(hid_t dset, hid_t plist, const hsize_t *start, const hsize_t *end, unsigned flt_msk)
{
2020-09-30 22:27:10 +08:00
int direct_buf[NUM_CHUNKS][CHUNK_NX][CHUNK_NY]; /* Data in chunks */
hsize_t offset[2]; /* Offset coordinates of a chunk */
hsize_t chk_index; /* Chunk index */
hsize_t ii, jj; /* Array indices */
int n;
/* Initialize the array of chunk data for all NUM_CHUNKS chunks */
2020-09-30 22:27:10 +08:00
for (n = 0; n < NUM_CHUNKS; n++)
for (ii = 0; ii < CHUNK_NX; ii++)
for (jj = 0; jj < CHUNK_NY; jj++)
direct_buf[n][ii][jj] = (int)(ii * jj) + 1;
/* Write NUM_CHUNKS_WRITTEN chunks at the following logical coords:
(0,2) (0,3) (1,2) (1,3) */
chk_index = 0;
2020-09-30 22:27:10 +08:00
for (ii = start[0]; ii < end[0]; ii++)
for (jj = start[1]; jj < end[1]; jj++, chk_index++) {
offset[0] = ii * CHUNK_NX;
offset[1] = jj * CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (H5Dwrite_chunk(dset, plist, flt_msk, offset, CHK_SIZE, (void *)direct_buf[chk_index]) < 0)
TEST_ERROR;
}
return SUCCEED;
error:
return FAIL;
} /* write_selected_chunks */
/*-------------------------------------------------------------------------
* Function: verify_idx_nchunks (helper function)
*
* Purpose: Verifies that chunk indexing scheme and number of chunks of
* the dataset match the expected values.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Date: August 2019
*
*-------------------------------------------------------------------------
*/
static int
verify_idx_nchunks(hid_t dset, hid_t dspace, H5D_chunk_index_t exp_idx_type, hsize_t exp_num_chunks)
{
2020-09-30 22:27:10 +08:00
H5D_chunk_index_t idx_type; /* Dataset chunk index type */
hsize_t nchunks = 0; /* Number of chunks */
/* Get the chunk indexing type of the dataset */
2020-09-30 22:27:10 +08:00
if (H5Dget_chunk_index_type(dset, &idx_type) < 0)
TEST_ERROR;
/* Ensure the correct chunk indexing scheme is used */
2020-09-30 22:27:10 +08:00
if (idx_type != exp_idx_type) {
char msg[256];
HDsnprintf(msg, sizeof(msg), "Should be using %s.\n", index_type_str(idx_type));
FAIL_PUTS_ERROR(msg);
}
/* Get and verify the number of chunks */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, dspace, &nchunks) < 0)
TEST_ERROR;
if (nchunks != exp_num_chunks)
FAIL_PUTS_ERROR("unexpected number of chunks");
/* Get and verify the number of chunks again, passing in H5S_ALL */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, H5S_ALL, &nchunks) < 0)
TEST_ERROR;
if (nchunks != exp_num_chunks)
FAIL_PUTS_ERROR("unexpected number of chunks");
return SUCCEED;
error:
return FAIL;
} /* verify_idx_nchunks */
/*-------------------------------------------------------------------------
* Function: test_get_chunk_info_highest_v18
*
* Purpose: Test getting various chunk information
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Note: Note that the dataspace argument in these new functions is
* currently not used. The functionality involved the dataspace
* will be implemented in the next version.
*
* Description:
* This function tests the new API functions added for EED-343:
* H5Dget_num_chunks, H5Dget_chunk_info, and
* H5Dget_chunk_info_by_coord for high bound up to 1.8.
*
* Date: September 2018
*
*-------------------------------------------------------------------------
*/
static herr_t
test_get_chunk_info_highest_v18(hid_t fapl)
{
2020-09-30 22:27:10 +08:00
char filename[FILENAME_BUF_SIZE]; /* File name */
hid_t chunkfile = H5I_INVALID_HID; /* File ID */
hid_t dspace = H5I_INVALID_HID; /* Dataspace ID */
hid_t dset = H5I_INVALID_HID; /* Dataset ID */
hid_t cparms = H5I_INVALID_HID; /* Creation plist */
hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY}; /* Chunk dimensions */
2020-09-30 22:27:10 +08:00
int direct_buf[CHUNK_NX][CHUNK_NY]; /* Data chunk */
hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* 2 unlimited dims */
2020-09-30 22:27:10 +08:00
hsize_t out_offset[2]; /* Buffer to get offset coordinates */
hsize_t size = 0; /* Size of an allocated/written chunk */
hsize_t nchunks = 0; /* Number of chunks */
haddr_t addr = 0; /* Address of an allocated/written chunk */
hsize_t chk_index = 0; /* Index of a chunk */
hsize_t dims[2] = {NX, NY}; /* Dataset dimensions */
unsigned flt_msk = 0; /* Filter mask */
unsigned read_flt_msk = 0; /* Filter mask after direct read */
int fillvalue = -1; /* Fill value */
hsize_t offset[2] = {0, 0}; /* Offset coordinates of a chunk */
#ifdef H5_HAVE_FILTER_DEFLATE
2021-06-17 04:45:26 +08:00
int aggression = 9; /* Compression aggression setting */
const Bytef *z_src = (const Bytef *)(direct_buf);
Bytef *z_dst; /* Destination buffer */
uLongf z_dst_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(CHK_SIZE);
uLong z_src_nbytes = (uLong)CHK_SIZE;
#endif
void *inbuf = NULL; /* Pointer to new buffer */
2020-09-30 22:27:10 +08:00
hsize_t chunk_size = CHK_SIZE; /* Size of a chunk, can be compressed or not */
hsize_t ii, jj; /* Array indices */
int n; /* Used as chunk index, but int to avoid conversion warning */
herr_t ret; /* Temporary returned value for verifying failure */
TESTING("getting chunk information in file with version prior to 1.10");
/* Create the file */
h5_fixname(FILENAME[H5F_LIBVER_V18], fapl, filename, sizeof filename);
/* Set version bounds for creating the file. High bound to V18 to test
chunked dataset that use B-tree v1 structures to index chunks. */
2020-09-30 22:27:10 +08:00
if (H5Pset_libver_bounds(fapl, H5F_LIBVER_EARLIEST, H5F_LIBVER_V18) < 0)
TEST_ERROR;
chunkfile = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
2020-09-30 22:27:10 +08:00
if (chunkfile < 0)
TEST_ERROR;
/* Create the file and memory dataspace */
2020-09-30 22:27:10 +08:00
if ((dspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
TEST_ERROR;
/* Set dset creation properties with chunking, compression, and fillvalue */
2020-09-30 22:27:10 +08:00
if ((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pset_chunk(cparms, RANK, chunk_dims) < 0)
TEST_ERROR;
#ifdef H5_HAVE_FILTER_DEFLATE
2020-09-30 22:27:10 +08:00
if (H5Pset_deflate(cparms, (unsigned)aggression) < 0)
TEST_ERROR;
#endif /* end H5_HAVE_FILTER_DEFLATE */
/* Set fill value */
2020-09-30 22:27:10 +08:00
if (H5Pset_fill_value(cparms, H5T_NATIVE_INT, &fillvalue) < 0)
TEST_ERROR;
/* Create a new dataset using cparms creation properties */
2020-09-30 22:27:10 +08:00
dset = H5Dcreate2(chunkfile, SIMPLE_CHUNKED_DSET_NAME, H5T_NATIVE_INT, dspace, H5P_DEFAULT, cparms,
H5P_DEFAULT);
if (dset < 0)
TEST_ERROR;
/* Initialize a chunk of data */
2020-09-30 22:27:10 +08:00
for (ii = 0; ii < CHUNK_NX; ii++)
for (jj = 0; jj < CHUNK_NY; jj++)
direct_buf[ii][jj] = (int)(ii * jj) + 1;
#ifdef H5_HAVE_FILTER_DEFLATE
/* Allocate input (compressed) buffer */
inbuf = calloc(1, z_dst_nbytes);
/* zlib-friendly alias for the input buffer */
z_dst = (Bytef *)inbuf;
/* Perform compression from the source to the destination buffer */
ret = compress2(z_dst, &z_dst_nbytes, z_src, z_src_nbytes, aggression);
/* Set the chunk size to the compressed chunk size */
chunk_size = (hsize_t)z_dst_nbytes;
/* Check for various zlib errors */
2020-09-30 22:27:10 +08:00
if (Z_BUF_ERROR == ret) {
fprintf(stderr, "overflow");
TEST_ERROR;
2020-09-30 22:27:10 +08:00
}
else if (Z_MEM_ERROR == ret) {
fprintf(stderr, "deflate memory error");
TEST_ERROR;
2020-09-30 22:27:10 +08:00
}
else if (Z_OK != ret) {
fprintf(stderr, "other deflate error");
TEST_ERROR;
}
#else
/* Allocate input (non-compressed) buffer */
if (NULL == (inbuf = calloc(1, CHK_SIZE)))
TEST_ERROR;
2023-06-30 03:33:46 +08:00
memcpy(inbuf, direct_buf, CHK_SIZE);
#endif /* end H5_HAVE_FILTER_DEFLATE */
/* Write only NUM_CHUNKS_WRITTEN chunks at the following logical coords:
(0,2) (0,3) (1,2) (1,3) */
n = 0;
2020-09-30 22:27:10 +08:00
for (ii = START_CHK_X; ii < END_CHK_X; ii++)
for (jj = START_CHK_Y; jj < END_CHK_Y; jj++, n++) {
offset[0] = ii * CHUNK_NX;
offset[1] = jj * CHUNK_NY;
2020-09-30 22:27:10 +08:00
ret = H5Dwrite_chunk(dset, H5P_DEFAULT, flt_msk, offset, chunk_size, (void *)inbuf);
if (ret < 0)
TEST_ERROR;
}
/* Free the input buffer */
2020-09-30 22:27:10 +08:00
if (inbuf)
free(inbuf);
2020-09-30 22:27:10 +08:00
if (H5Fflush(dset, H5F_SCOPE_LOCAL) < 0)
TEST_ERROR;
/* Close the dataset */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
/* ...open it again to test the chunk query functions */
2020-09-30 22:27:10 +08:00
if ((dset = H5Dopen2(chunkfile, SIMPLE_CHUNKED_DSET_NAME, H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Get and verify the number of chunks written */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, dspace, &nchunks) < 0)
TEST_ERROR;
if (NUM_CHUNKS_WRITTEN != nchunks)
FAIL_PUTS_ERROR("unexpected number of chunks");
/* Get and verify info of the last written chunk again, passing in H5S_ALL
this time */
offset[0] = 6;
offset[1] = 12;
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info(dset, H5S_ALL, NUM_CHUNKS_WRITTEN - 1, chunk_size, offset, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info failed\n");
/* Attempt to get info of a non-existing chunk, should fail */
chk_index = OUTOFRANGE_CHK_INDEX;
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
ret = H5Dget_chunk_info(dset, H5S_ALL, chk_index, out_offset, &read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
2020-09-30 22:27:10 +08:00
if (ret != FAIL)
FAIL_PUTS_ERROR(" Attempt to get info of a non-existing chunk.");
/* Attempt to get info of empty chunks, verify the returned addr and size */
offset[0] = 0;
offset[1] = 0;
2020-09-30 22:27:10 +08:00
if (verify_empty_chunk_info(dset, offset) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord on empty chunk failed\n");
offset[0] = 3 * CHUNK_NX;
offset[1] = 3 * CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (verify_empty_chunk_info(dset, offset) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord on empty chunk failed\n");
/* Go through all written chunks, get their info and verify the values */
chk_index = 0;
2020-09-30 22:27:10 +08:00
for (ii = START_CHK_X; ii < END_CHK_X; ii++)
for (jj = START_CHK_Y; jj < END_CHK_Y; jj++, chk_index++) {
offset[0] = ii * CHUNK_NX;
offset[1] = jj * CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info(dset, dspace, chk_index, chunk_size, offset, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info failed\n");
/* Use the same offset to pass into the next ...by_coord function */
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info_by_coord(dset, offset, chunk_size, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord failed\n");
}
/* Close the first dataset */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
/* Create an empty dataset and close it */
dset = H5Dcreate2(chunkfile, EMPTY_DSET_NAME, H5T_NATIVE_INT, dspace, H5P_DEFAULT, cparms, H5P_DEFAULT);
2020-09-30 22:27:10 +08:00
if (dset < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
/* Reopen the empty dataset to verify the chunk query functions on it */
2020-09-30 22:27:10 +08:00
if ((dset = H5Dopen2(chunkfile, EMPTY_DSET_NAME, H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Verify that the number of chunks is 0 */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, dspace, &nchunks) < 0)
TEST_ERROR;
if (NO_CHUNK_WRITTEN != nchunks)
FAIL_PUTS_ERROR("unexpected number of chunks");
/* Attempt to get info of a chunk from an empty dataset, should fail */
chk_index = OUTOFRANGE_CHK_INDEX;
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
ret = H5Dget_chunk_info(dset, dspace, chk_index, out_offset, &read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
2020-09-30 22:27:10 +08:00
if (ret != FAIL)
FAIL_PUTS_ERROR(" Attempt to get info of a non-existing chunk.");
/* Attempt to get info of a chunk given its coords from an empty dataset,
should succeed with the returned address as HADDR_UNDEF and size as 0 */
offset[0] = EMPTY_CHK_X;
offset[1] = EMPTY_CHK_Y;
2020-09-30 22:27:10 +08:00
if (verify_empty_chunk_info(dset, offset) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord on empty chunk failed\n");
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
/************************************************************************
* Test empty dataset with H5D_ALLOC_TIME_EARLY *
************************************************************************/
/* Set space allocation to early so that chunk query functions will
retrieve chunk information even though the dataset is empty */
2020-09-30 22:27:10 +08:00
if (H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY) < 0)
TEST_ERROR;
/* Create an empty dataset and close it */
2020-09-30 22:27:10 +08:00
dset = H5Dcreate2(chunkfile, EMPTY_EARLY_ALLOC_DSET_NAME, H5T_NATIVE_INT, dspace, H5P_DEFAULT, cparms,
H5P_DEFAULT);
if (dset < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
/* Reopen the empty dataset to verify the chunk query functions on it */
2020-09-30 22:27:10 +08:00
if ((dset = H5Dopen2(chunkfile, EMPTY_EARLY_ALLOC_DSET_NAME, H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Verify that the number of chunks is NUM_CHUNKS */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, dspace, &nchunks) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (nchunks != NUM_CHUNKS)
TEST_ERROR;
/* Attempt to get info of a chunk from an empty dataset, verify the
returned address and size in the case of H5D_ALLOC_TIME_EARLY */
chk_index = NONEXIST_CHK_INDEX;
reinit_vars(&read_flt_msk, &addr, &size);
ret = H5Dget_chunk_info(dset, dspace, chk_index, out_offset, &read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
if (ret < 0)
TEST_ERROR;
2020-08-01 02:40:29 +08:00
/* Because of H5D_ALLOC_TIME_EARLY, addr cannot be HADDR_UNDEF and size not 0 */
2020-09-30 22:27:10 +08:00
if (addr == HADDR_UNDEF)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (size == EMPTY_CHK_SIZE)
TEST_ERROR;
chk_index = 10;
reinit_vars(&read_flt_msk, &addr, &size);
ret = H5Dget_chunk_info(dset, dspace, chk_index, out_offset, &read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
if (ret < 0)
TEST_ERROR;
/* Because of H5D_ALLOC_TIME_EARLY, addr cannot be HADDR_UNDEF and size not 0 */
2020-09-30 22:27:10 +08:00
if (addr == HADDR_UNDEF)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (size == EMPTY_CHK_SIZE)
TEST_ERROR;
/* Attempt to get info of a chunk given its coords from an empty dataset,
verify the returned address and size */
offset[0] = 0;
offset[1] = 0;
2020-09-30 22:27:10 +08:00
if (H5Dget_chunk_info_by_coord(dset, offset, &read_flt_msk, &addr, &size) < 0)
TEST_ERROR;
/* Because of H5D_ALLOC_TIME_EARLY, addr cannot be HADDR_UNDEF and size not 0 */
2020-09-30 22:27:10 +08:00
if (addr == HADDR_UNDEF)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (size == 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
/* Close/release resources. */
2020-09-30 22:27:10 +08:00
if (H5Sclose(dspace) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pclose(cparms) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Fclose(chunkfile) < 0)
TEST_ERROR;
PASSED();
return SUCCEED;
error:
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
H5Dclose(dset);
H5Sclose(dspace);
H5Pclose(cparms);
H5Fclose(chunkfile);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
H5_FAILED();
return FAIL;
} /* test_get_chunk_info_highest_v18() */
2019-08-29 13:53:40 +08:00
/*-------------------------------------------------------------------------
* Function: test_chunk_info_single_chunk
*
* Purpose: Test getting various chunk information when Single Chunk
* index type is used
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Note: Note that the dataspace argument in these new functions are
* currently not used. The functionality involved the dataspace
* will be implemented in the next version.
*
* Date: November 2018
*
*-------------------------------------------------------------------------
*/
static herr_t
test_chunk_info_single_chunk(const char *filename, hid_t fapl)
{
2020-09-30 22:27:10 +08:00
hid_t chunkfile = H5I_INVALID_HID; /* File ID */
hid_t dspace = H5I_INVALID_HID; /* Dataspace ID */
hid_t dset = H5I_INVALID_HID; /* Dataset ID */
hid_t cparms = H5I_INVALID_HID; /* Creation plist */
hsize_t dims[2] = {NX, NY}; /* Dataset dimensions */
hsize_t chunk_dims[2] = {NX, NY}; /* Chunk dimensions */
int data_buf[NX][NY]; /* Input buffer */
H5D_chunk_index_t idx_type; /* Dataset chunk index type */
unsigned flt_msk = 0; /* Filter mask */
unsigned read_flt_msk = 0; /* Filter mask after direct read */
hsize_t offset[2]; /* Offset coordinates of a chunk */
hsize_t out_offset[2] = {0, 0}; /* Buffer to get offset coordinates */
hsize_t size = 0; /* Size of an allocated/written chunk */
hsize_t nchunks = 0; /* Number of chunks */
haddr_t addr = 0; /* Address of an allocated/written chunk */
hsize_t chk_index = 0; /* Index of a chunk */
hsize_t ii, jj; /* Array indices */
herr_t ret; /* Temporary returned value for verifying failure */
TESTING(" Single Chunk index");
/* Open the file for reading/writing */
2020-09-30 22:27:10 +08:00
if ((chunkfile = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
TEST_ERROR;
2019-09-26 21:43:41 +08:00
/* Create dataspace */
2020-09-30 22:27:10 +08:00
if ((dspace = H5Screate_simple(RANK, dims, NULL)) < 0)
TEST_ERROR;
/* Enable chunking */
2020-09-30 22:27:10 +08:00
if ((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pset_chunk(cparms, RANK, chunk_dims) < 0)
TEST_ERROR;
/* Create a new dataset using cparms creation properties */
2020-09-30 22:27:10 +08:00
dset = H5Dcreate2(chunkfile, SINGLE_CHUNK_DSET_NAME, H5T_NATIVE_INT, dspace, H5P_DEFAULT, cparms,
H5P_DEFAULT);
if (dset < 0)
TEST_ERROR;
/* Close the dataset */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
/* ...open it again to test the chunk query functions on a single empty
chunk */
2020-09-30 22:27:10 +08:00
if ((dset = H5Dopen2(chunkfile, SINGLE_CHUNK_DSET_NAME, H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Ensure the correct chunk indexing scheme is used */
2020-09-30 22:27:10 +08:00
if (H5Dget_chunk_index_type(dset, &idx_type) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (idx_type != H5D_CHUNK_IDX_SINGLE)
FAIL_PUTS_ERROR("Should be using Single Chunk index type");
/* Get the number of chunks and verify that no chunk has been written */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, dspace, &nchunks) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (nchunks != NO_CHUNK_WRITTEN)
TEST_ERROR;
/* Initialize the array of chunk data for the single chunk */
2020-09-30 22:27:10 +08:00
for (ii = 0; ii < NX; ii++)
for (jj = 0; jj < NY; jj++)
data_buf[ii][jj] = (int)(ii * jj);
/* Write the chunk */
2020-09-30 22:27:10 +08:00
if (H5Dwrite(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_buf) < 0)
TEST_ERROR;
/* Get and verify that one chunk had been written */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, dspace, &nchunks) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (nchunks != ONE_CHUNK_WRITTEN)
TEST_ERROR;
/* Offset of the only chunk */
offset[0] = 0;
offset[1] = 0;
/* Get and verify info of the first and only chunk */
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info(dset, H5S_ALL, 0, SINGLE_CHK_SIZE, offset, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification H5Dget_chunk_info failed\n");
2019-09-26 21:43:41 +08:00
/* Get and verify info of the chunk at logical coordinates (0,0) */
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info_by_coord(dset, offset, SINGLE_CHK_SIZE, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord failed\n");
/* Attempt to get chunk info given an invalid chunk index and verify
* that failure occurs */
chk_index = INVALID_CHK_INDEX;
reinit_vars(&read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
ret = H5Dget_chunk_info(dset, dspace, chk_index, out_offset, &read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
2020-09-30 22:27:10 +08:00
if (ret != FAIL)
TEST_ERROR;
/* Release resource */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Sclose(dspace) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Fclose(chunkfile) < 0)
TEST_ERROR;
PASSED();
return SUCCEED;
error:
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
H5Dclose(dset);
H5Sclose(dspace);
H5Pclose(cparms);
H5Fclose(chunkfile);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
H5_FAILED();
return FAIL;
} /* test_chunk_info_single_chunk() */
/*-------------------------------------------------------------------------
* Function: test_chunk_info_implicit
*
* Purpose: Test getting various chunk information when Implicit
* index type is used
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Note: Note that the dataspace argument in these new functions are
* currently not used. The functionality involved the dataspace
* will be implemented in the next version.
*
* Date: November 2018
*
*-------------------------------------------------------------------------
*/
static herr_t
test_chunk_info_implicit(char *filename, hid_t fapl)
{
2020-09-30 22:27:10 +08:00
hid_t chunkfile = H5I_INVALID_HID; /* File ID */
hid_t dspace = H5I_INVALID_HID; /* Dataspace ID */
hid_t dset = H5I_INVALID_HID; /* Dataset ID */
hid_t cparms = H5I_INVALID_HID; /* Creation plist */
hsize_t dims[2] = {NX, NY}; /* Dataset dimensions */
hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY}; /* Chunk dimensions */
unsigned flt_msk = 0; /* Filter mask */
hsize_t chk_index = 0; /* Index of a chunk */
hsize_t ii, jj; /* Array indices */
hsize_t start[2] = {START_CHK_X, START_CHK_Y}; /* Start position */
2020-09-30 22:27:10 +08:00
hsize_t end[2] = {END_CHK_X, END_CHK_Y}; /* End position */
TESTING(" Implicit index");
/* Open the file for reading/writing */
2020-09-30 22:27:10 +08:00
if ((chunkfile = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
TEST_ERROR;
2019-09-26 21:43:41 +08:00
/* Create dataspace */
2020-09-30 22:27:10 +08:00
if ((dspace = H5Screate_simple(RANK, dims, NULL)) < 0)
TEST_ERROR;
/* Enable chunking */
2020-09-30 22:27:10 +08:00
if ((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pset_chunk(cparms, RANK, chunk_dims) < 0)
TEST_ERROR;
/* Set allocation time to early */
2020-09-30 22:27:10 +08:00
if (H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY) < 0)
TEST_ERROR;
/* Create a new dataset using cparms creation properties */
2020-09-30 22:27:10 +08:00
dset = H5Dcreate2(chunkfile, IMPLICIT_INDEX_DSET_NAME, H5T_NATIVE_INT, dspace, H5P_DEFAULT, cparms,
H5P_DEFAULT);
if (dset < 0)
TEST_ERROR;
/* Close the dataset */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
/* Open the dataset again to test getting chunk info */
2020-09-30 22:27:10 +08:00
if ((dset = H5Dopen2(chunkfile, IMPLICIT_INDEX_DSET_NAME, H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Verify chunk indexing scheme and number of chunks */
2020-09-30 22:27:10 +08:00
if (verify_idx_nchunks(dset, dspace, H5D_CHUNK_IDX_NONE, NUM_CHUNKS) == FAIL)
FAIL_PUTS_ERROR("Verification and write failed\n");
/* Write NUM_CHUNKS_WRITTEN chunks at the following logical coords:
(0,2) (0,3) (1,2) (1,3) */
2020-09-30 22:27:10 +08:00
if (write_selected_chunks(dset, H5P_DEFAULT, start, end, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Writing to selected chunks failed\n");
/* Go through all chunks, and get their info and verify the values */
chk_index = 0;
2020-09-30 22:27:10 +08:00
for (ii = 0; ii < NX / CHUNK_NX; ii++)
for (jj = 0; jj < NY / CHUNK_NY; jj++, chk_index++) {
hsize_t offset[2] = {ii * CHUNK_NX, jj * CHUNK_NY};
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info(dset, H5S_ALL, chk_index, CHK_SIZE, offset, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info failed\n");
/* Get info of a chunk and verify its information. Note that
all chunks in this dataset are allocated because of the property
H5D_ALLOC_TIME_EARLY */
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info_by_coord(dset, offset, CHK_SIZE, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord failed\n");
}
/* Release resource */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Sclose(dspace) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pclose(cparms) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Fclose(chunkfile) < 0)
TEST_ERROR;
PASSED();
return SUCCEED;
error:
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
H5Dclose(dset);
H5Sclose(dspace);
H5Pclose(cparms);
H5Fclose(chunkfile);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
H5_FAILED();
return FAIL;
} /* test_chunk_info_implicit() */
2019-09-26 21:43:41 +08:00
/*-------------------------------------------------------------------------
* Function: test_chunk_info_fixed_array
*
* Purpose: Test getting various chunk information when Fixed Array
* index type is used
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Note: Note that the dataspace argument in these new functions are
* currently not used. The functionality involved the dataspace
* will be implemented in the next version.
*
* Date: November 2018
*
*-------------------------------------------------------------------------
*/
static herr_t
test_chunk_info_fixed_array(const char *filename, hid_t fapl)
{
2020-09-30 22:27:10 +08:00
hid_t chunkfile = H5I_INVALID_HID; /* File ID */
hid_t dspace = H5I_INVALID_HID; /* Dataspace ID */
hid_t dset = H5I_INVALID_HID; /* Dataset ID */
hid_t cparms = H5I_INVALID_HID; /* Creation plist */
hsize_t dims[2] = {NX, NY}; /* Dataset dimensions */
hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY}; /* Chunk dimensions */
unsigned flt_msk = 0; /* Filter mask */
unsigned read_flt_msk = 0; /* Filter mask after direct read */
hsize_t offset[2]; /* Offset coordinates of a chunk */
hsize_t start[2] = {START_CHK_X, START_CHK_Y}; /* Start position */
hsize_t end[2] = {END_CHK_X, END_CHK_Y}; /* End position */
hsize_t out_offset[2] = {0, 0}; /* Buffer to get offset coordinates */
hsize_t size = 0; /* Size of an allocated/written chunk */
hsize_t nchunks = 0; /* Number of chunks */
haddr_t addr = 0; /* Address of an allocated/written chunk */
hsize_t chk_index = 0; /* Index of a chunk */
hsize_t ii, jj; /* Array indices */
herr_t ret; /* Temporary returned value for verifying failure */
TESTING(" Fixed Array index");
/* Open the file for reading/writing */
2020-09-30 22:27:10 +08:00
if ((chunkfile = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
TEST_ERROR;
2019-09-26 21:43:41 +08:00
/* Create dataspace */
2020-09-30 22:27:10 +08:00
if ((dspace = H5Screate_simple(RANK, dims, NULL)) < 0)
TEST_ERROR;
/* Enable chunking */
2020-09-30 22:27:10 +08:00
if ((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pset_chunk(cparms, RANK, chunk_dims) < 0)
TEST_ERROR;
/* Create a new dataset using cparms creation properties */
2020-09-30 22:27:10 +08:00
dset = H5Dcreate2(chunkfile, FIXED_ARR_INDEX_DSET_NAME, H5T_NATIVE_INT, dspace, H5P_DEFAULT, cparms,
H5P_DEFAULT);
if (dset < 0)
TEST_ERROR;
/* Close the dataset */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
/* Open the dataset again to test getting chunk info */
2020-09-30 22:27:10 +08:00
if ((dset = H5Dopen2(chunkfile, FIXED_ARR_INDEX_DSET_NAME, H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Verify chunk indexing scheme and number of chunks */
2020-09-30 22:27:10 +08:00
if (verify_idx_nchunks(dset, dspace, H5D_CHUNK_IDX_FARRAY, NO_CHUNK_WRITTEN) == FAIL)
FAIL_PUTS_ERROR("Verification and write failed\n");
/* Write NUM_CHUNKS_WRITTEN chunks at the following logical coords:
(0,2) (0,3) (1,2) (1,3) */
2020-09-30 22:27:10 +08:00
if (write_selected_chunks(dset, H5P_DEFAULT, start, end, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Writing to selected chunks failed\n");
/* Get and verify the number of chunks written */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, dspace, &nchunks) < 0)
TEST_ERROR;
if (NUM_CHUNKS_WRITTEN != nchunks)
FAIL_PUTS_ERROR("unexpected number of chunks");
/* Get and verify info of each written chunk */
chk_index = 0;
2020-09-30 22:27:10 +08:00
for (ii = START_CHK_X; ii < END_CHK_X; ii++)
for (jj = START_CHK_Y; jj < END_CHK_Y; jj++, chk_index++) {
offset[0] = ii * CHUNK_NX;
offset[1] = jj * CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info(dset, dspace, chk_index, CHK_SIZE, offset, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info failed\n");
}
/* Attempt to get info using an out-of-range index, chk_index is now > NUM_CHUNKS_WRITTEN. should fail */
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
ret = H5Dget_chunk_info(dset, dspace, chk_index, out_offset, &read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
2020-09-30 22:27:10 +08:00
if (ret != FAIL)
FAIL_PUTS_ERROR(" Attempted to get info of a chunk using an out-of-range index.");
/* Attempt to get info of empty chunks, verify the returned address and size */
offset[0] = 0;
offset[1] = 0;
2020-09-30 22:27:10 +08:00
if (verify_empty_chunk_info(dset, offset) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord on empty chunk failed\n");
offset[0] = 3 * CHUNK_NX;
offset[1] = 3 * CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (verify_empty_chunk_info(dset, offset) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord on empty chunk failed\n");
/* Read and verify values of selected chunks */
2020-09-30 22:27:10 +08:00
if (verify_selected_chunks(dset, H5P_DEFAULT, start, end) < 0)
/* Release resource */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Sclose(dspace) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Fclose(chunkfile) < 0)
TEST_ERROR;
PASSED();
return SUCCEED;
error:
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
H5Dclose(dset);
H5Sclose(dspace);
H5Pclose(cparms);
H5Fclose(chunkfile);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
H5_FAILED();
return FAIL;
} /* test_chunk_info_fixed_array() */
2019-09-26 21:43:41 +08:00
/*-------------------------------------------------------------------------
* Function: test_chunk_info_extensible_array
*
* Purpose: Test getting various chunk information when Extensible Array
* index type is used
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Note: Note that the dataspace argument in these new functions are
* currently not used. The functionality involved the dataspace
* will be implemented in the next version.
*
* Date: November 2018
*
*-------------------------------------------------------------------------
*/
static herr_t
test_chunk_info_extensible_array(const char *filename, hid_t fapl)
{
2020-09-30 22:27:10 +08:00
hid_t chunkfile = H5I_INVALID_HID; /* File ID */
hid_t dspace = H5I_INVALID_HID; /* Dataspace ID */
hid_t dset = H5I_INVALID_HID; /* Dataset ID */
hid_t cparms = H5I_INVALID_HID; /* Creation plist */
hsize_t dims[2] = {NX, NY}; /* Dataset dimensions */
hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY}; /* Chunk dimensions */
hsize_t maxdims[2] = {H5S_UNLIMITED, NY}; /* One unlimited dimension */
unsigned flt_msk = 0; /* Filter mask */
unsigned read_flt_msk = 0; /* Filter mask after direct read */
hsize_t offset[2]; /* Offset coordinates of a chunk */
hsize_t start[2] = {START_CHK_X, START_CHK_Y}; /* Start position */
hsize_t end[2] = {END_CHK_X, END_CHK_Y}; /* End position */
hsize_t out_offset[2] = {0, 0}; /* Buffer to get offset coordinates */
hsize_t size = 0; /* Size of an allocated/written chunk */
hsize_t nchunks = 0; /* Number of chunks */
haddr_t addr = 0; /* Address of an allocated/written chunk */
hsize_t chk_index = 0; /* Index of a chunk */
hsize_t ii, jj; /* Array indices */
herr_t ret; /* Temporary returned value for verifying failure */
TESTING(" Extensible Array index");
/* Open the file for reading/writing */
2020-09-30 22:27:10 +08:00
if ((chunkfile = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
TEST_ERROR;
2019-09-26 21:43:41 +08:00
/* Create dataspace */
2020-09-30 22:27:10 +08:00
if ((dspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
TEST_ERROR;
/* Enable chunking */
2020-09-30 22:27:10 +08:00
if ((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pset_chunk(cparms, RANK, chunk_dims) < 0)
TEST_ERROR;
/* Create a new dataset using cparms creation properties */
2020-09-30 22:27:10 +08:00
dset = H5Dcreate2(chunkfile, EXT_ARR_INDEX_DSET_NAME, H5T_NATIVE_INT, dspace, H5P_DEFAULT, cparms,
H5P_DEFAULT);
if (dset < 0)
TEST_ERROR;
/* Close the dataset */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
/* Open the dataset again to test getting chunk info */
2020-09-30 22:27:10 +08:00
if ((dset = H5Dopen2(chunkfile, EXT_ARR_INDEX_DSET_NAME, H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Verify chunk indexing scheme and number of chunks */
2020-09-30 22:27:10 +08:00
if (verify_idx_nchunks(dset, dspace, H5D_CHUNK_IDX_EARRAY, NO_CHUNK_WRITTEN) == FAIL)
FAIL_PUTS_ERROR("Verification and write failed\n");
/* Write NUM_CHUNKS_WRITTEN chunks at the following logical coords:
(0,2) (0,3) (1,2) (1,3) */
2020-09-30 22:27:10 +08:00
if (write_selected_chunks(dset, H5P_DEFAULT, start, end, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Writing to selected chunks failed\n");
/* Get and verify the number of chunks written */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, dspace, &nchunks) < 0)
TEST_ERROR;
if (NUM_CHUNKS_WRITTEN != nchunks)
FAIL_PUTS_ERROR("unexpected number of chunks");
/* Get and verify info of each written chunk */
chk_index = 0;
2020-09-30 22:27:10 +08:00
for (ii = START_CHK_X; ii < END_CHK_X; ii++)
for (jj = START_CHK_Y; jj < END_CHK_Y; jj++, chk_index++) {
offset[0] = ii * CHUNK_NX;
offset[1] = jj * CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info(dset, dspace, chk_index, CHK_SIZE, offset, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info failed\n");
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info_by_coord(dset, offset, CHK_SIZE, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord failed\n");
}
/* Attempt to get info using an out-of-range index, should fail */
chk_index = OUTOFRANGE_CHK_INDEX;
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
ret = H5Dget_chunk_info(dset, dspace, chk_index, out_offset, &read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
2020-09-30 22:27:10 +08:00
if (ret != FAIL)
FAIL_PUTS_ERROR(" Attempted to get info of a chunk using an out-of-range index.");
/* Attempt to get info of empty chunks, verify the returned address and size */
offset[0] = 0;
offset[1] = 0;
2020-09-30 22:27:10 +08:00
if (verify_empty_chunk_info(dset, offset) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord on empty chunk failed\n");
offset[0] = 3 * CHUNK_NX;
offset[1] = 3 * CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (verify_empty_chunk_info(dset, offset) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord on empty chunk failed\n");
/* Read and verify values of selected chunks */
2020-09-30 22:27:10 +08:00
if (verify_selected_chunks(dset, H5P_DEFAULT, start, end) < 0)
/* Release resource */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Sclose(dspace) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Fclose(chunkfile) < 0)
TEST_ERROR;
PASSED();
return SUCCEED;
error:
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
H5Dclose(dset);
H5Sclose(dspace);
H5Pclose(cparms);
H5Fclose(chunkfile);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
H5_FAILED();
return FAIL;
} /* test_chunk_info_extensible_array() */
/*-------------------------------------------------------------------------
* Function: test_chunk_info_version2_btrees
*
* Purpose: Test getting various chunk information when Version 2 B-trees
* index type is used
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Note: Note that the dataspace argument in these new functions are
* currently not used. The functionality involved the dataspace
* will be implemented in the next version.
*
* Date: November 2018
*
*-------------------------------------------------------------------------
*/
static herr_t
test_chunk_info_version2_btrees(const char *filename, hid_t fapl)
{
2020-09-30 22:27:10 +08:00
hid_t chunkfile = H5I_INVALID_HID; /* File ID */
hid_t dspace = H5I_INVALID_HID; /* Dataspace ID */
hid_t dset = H5I_INVALID_HID; /* Dataset ID */
hid_t cparms = H5I_INVALID_HID; /* Creation plist */
hsize_t dims[2] = {NX, NY}; /* Dataset dimensions */
hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY}; /* Chunk dimensions */
hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Two unlimited dims */
unsigned flt_msk = 0; /* Filter mask */
unsigned read_flt_msk = 0; /* Filter mask after direct read */
hsize_t offset[2]; /* Offset coordinates of a chunk */
hsize_t start[2] = {START_CHK_X, START_CHK_Y}; /* Start position */
hsize_t end[2] = {END_CHK_X, END_CHK_Y}; /* End position */
hsize_t out_offset[2] = {0, 0}; /* Buffer to get offset coordinates */
hsize_t size = 0; /* Size of an allocated/written chunk */
hsize_t nchunks = 0; /* Number of chunks */
haddr_t addr = 0; /* Address of an allocated/written chunk */
hsize_t chk_index = 0; /* Index of a chunk */
hsize_t ii, jj; /* Array indices */
herr_t ret; /* Temporary returned value for verifying failure */
TESTING(" Version 2 B-trees index");
/* Open the file for reading/writing */
2020-09-30 22:27:10 +08:00
if ((chunkfile = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
TEST_ERROR;
2019-09-26 21:43:41 +08:00
/* Create dataspace */
2020-09-30 22:27:10 +08:00
if ((dspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
TEST_ERROR;
/* Enable chunking */
2020-09-30 22:27:10 +08:00
if ((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pset_chunk(cparms, RANK, chunk_dims) < 0)
TEST_ERROR;
/* Create a new dataset using cparms creation properties */
2020-09-30 22:27:10 +08:00
dset = H5Dcreate2(chunkfile, V2_BTREE_INDEX_DSET_NAME, H5T_NATIVE_INT, dspace, H5P_DEFAULT, cparms,
H5P_DEFAULT);
if (dset < 0)
TEST_ERROR;
/* Close the dataset */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
/* Open the dataset again to test getting chunk info */
2020-09-30 22:27:10 +08:00
if ((dset = H5Dopen2(chunkfile, V2_BTREE_INDEX_DSET_NAME, H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Verify chunk indexing scheme and number of chunks */
2020-09-30 22:27:10 +08:00
if (verify_idx_nchunks(dset, dspace, H5D_CHUNK_IDX_BT2, NO_CHUNK_WRITTEN) == FAIL)
FAIL_PUTS_ERROR("Verification and write failed\n");
/* Write NUM_CHUNKS_WRITTEN chunks at the following logical coords:
(0,2) (0,3) (1,2) (1,3) */
2020-09-30 22:27:10 +08:00
if (write_selected_chunks(dset, H5P_DEFAULT, start, end, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Writing to selected chunks failed\n");
/* Get and verify the number of chunks written */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, dspace, &nchunks) < 0)
TEST_ERROR;
if (NUM_CHUNKS_WRITTEN != nchunks)
FAIL_PUTS_ERROR("unexpected number of chunks");
/* Go through all written chunks, get their info and verify the values */
chk_index = 0;
2020-09-30 22:27:10 +08:00
for (ii = START_CHK_X; ii < END_CHK_X; ii++)
for (jj = START_CHK_Y; jj < END_CHK_Y; jj++, chk_index++) {
offset[0] = ii * CHUNK_NX;
offset[1] = jj * CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info(dset, dspace, chk_index, CHK_SIZE, offset, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info failed\n");
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info_by_coord(dset, offset, CHK_SIZE, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord failed\n");
}
/* Attempt to provide out-of-range offsets, should fail */
chk_index = OUTOFRANGE_CHK_INDEX;
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
ret = H5Dget_chunk_info(dset, dspace, chk_index, out_offset, &read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
2020-09-30 22:27:10 +08:00
if (ret != FAIL)
FAIL_PUTS_ERROR(" Attempted to get info of a chunk using an out-of-range index.");
/* Attempt to get info of empty chunks, verify the returned address and size */
offset[0] = 0;
offset[1] = 0;
2020-09-30 22:27:10 +08:00
if (verify_empty_chunk_info(dset, offset) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord on empty chunk failed\n");
offset[0] = 3 * CHUNK_NX;
offset[1] = 3 * CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (verify_empty_chunk_info(dset, offset) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord on empty chunk failed\n");
/* Read and verify values of selected chunks */
2020-09-30 22:27:10 +08:00
if (verify_selected_chunks(dset, H5P_DEFAULT, start, end) < 0)
/* Release resource */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Sclose(dspace) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Fclose(chunkfile) < 0)
TEST_ERROR;
PASSED();
return SUCCEED;
error:
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
H5Dclose(dset);
H5Sclose(dspace);
H5Pclose(cparms);
H5Fclose(chunkfile);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
H5_FAILED();
return FAIL;
} /* test_chunk_info_version2_btrees() */
typedef struct chunk_iter_info_t {
hsize_t offset[2];
unsigned filter_mask;
haddr_t addr;
hsize_t size;
} chunk_iter_info_t;
typedef struct chunk_iter_udata_t {
chunk_iter_info_t *chunk_info;
int last_index;
} chunk_iter_udata_t;
static int
iter_cb(const hsize_t *offset, unsigned filter_mask, haddr_t addr, hsize_t size, void *op_data)
{
chunk_iter_udata_t *cidata = (chunk_iter_udata_t *)op_data;
int idx = cidata->last_index + 1;
cidata->chunk_info[idx].offset[0] = offset[0];
cidata->chunk_info[idx].offset[1] = offset[1];
cidata->chunk_info[idx].filter_mask = filter_mask;
cidata->chunk_info[idx].addr = addr;
cidata->chunk_info[idx].size = size;
cidata->last_index++;
return H5_ITER_CONT;
}
static int
iter_cb_stop(const hsize_t H5_ATTR_UNUSED *offset, unsigned H5_ATTR_UNUSED filter_mask,
haddr_t H5_ATTR_UNUSED addr, hsize_t H5_ATTR_UNUSED size, void *op_data)
{
chunk_iter_info_t **chunk_info = (chunk_iter_info_t **)op_data;
*chunk_info += 1;
return H5_ITER_STOP;
}
static int
iter_cb_fail(const hsize_t H5_ATTR_UNUSED *offset, unsigned H5_ATTR_UNUSED filter_mask,
haddr_t H5_ATTR_UNUSED addr, hsize_t H5_ATTR_UNUSED size, void *op_data)
{
chunk_iter_info_t **chunk_info = (chunk_iter_info_t **)op_data;
*chunk_info += 1;
return H5_ITER_ERROR;
}
/*-------------------------------------------------------------------------
* Function: test_basic_query
*
* Purpose: Tests basic operations to ensure the chunk query functions
* work properly.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Note: Note that the dataspace argument in these new functions are
* currently not used. The functionality involved the dataspace
* will be implemented in the next version.
*
* Date: August 2019
*
*-------------------------------------------------------------------------
*/
static herr_t
test_basic_query(hid_t fapl)
{
char filename[FILENAME_BUF_SIZE]; /* File name */
hid_t basicfile = H5I_INVALID_HID; /* File ID */
hid_t dspace = H5I_INVALID_HID; /* Dataspace ID */
hid_t dset = H5I_INVALID_HID; /* Dataset ID */
hid_t cparms = H5I_INVALID_HID; /* Creation plist */
hsize_t dims[2] = {NX, NY}; /* Dataset dimensions */
hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY}; /* Chunk dimensions */
int direct_buf[CHUNK_NX][CHUNK_NY]; /* Data in chunks */
unsigned flt_msk = 0; /* Filter mask */
unsigned read_flt_msk = 0; /* Filter mask after direct read */
hsize_t offset[2]; /* Offset coordinates of a chunk */
hsize_t out_offset[2] = {0, 0}; /* Buffer to get offset coordinates */
hsize_t size = 0; /* Size of an allocated/written chunk */
hsize_t nchunks = 0; /* Number of chunks */
haddr_t addr = 0; /* Address of an allocated/written chunk */
hsize_t chk_index = 0; /* Index of a chunk */
hsize_t ii, jj; /* Array indices */
chunk_iter_info_t chunk_infos[2]; /* Chunk infos filled up by iterator */
chunk_iter_info_t *cptr; /* Pointer to array of chunks */
chunk_iter_udata_t udata; /* udata for iteration */
herr_t ret; /* Temporary returned value for verifying failure */
TESTING("basic operations");
/* Create the file */
h5_fixname(BASIC_FILE, fapl, filename, sizeof filename);
/* Create a new file. */
2020-09-30 22:27:10 +08:00
if ((basicfile = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Create dataspace */
2020-09-30 22:27:10 +08:00
if ((dspace = H5Screate_simple(RANK, dims, NULL)) < 0)
TEST_ERROR;
/* Enable chunking */
2020-09-30 22:27:10 +08:00
if ((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pset_chunk(cparms, RANK, chunk_dims) < 0)
TEST_ERROR;
/* Create a new dataset using cparms creation properties */
2020-09-30 22:27:10 +08:00
dset = H5Dcreate2(basicfile, SIMPLE_CHUNKED_DSET_NAME, H5T_NATIVE_INT, dspace, H5P_DEFAULT, cparms,
H5P_DEFAULT);
if (dset < 0)
TEST_ERROR;
/* Get the number of chunks and verify that no chunk has been written */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, dspace, &nchunks) < 0)
TEST_ERROR;
if (NO_CHUNK_WRITTEN != nchunks)
FAIL_PUTS_ERROR("unexpected number of chunks");
/* Initialize the array of chunk data for the single chunk */
2020-09-30 22:27:10 +08:00
for (ii = 0; ii < CHUNK_NX; ii++)
for (jj = 0; jj < CHUNK_NY; jj++)
direct_buf[ii][jj] = (int)(ii * jj);
/* Write the chunk of data */
offset[0] = CHUNK_NX;
offset[1] = CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (H5Dwrite_chunk(dset, H5P_DEFAULT, flt_msk, offset, CHK_SIZE, direct_buf) < 0)
TEST_ERROR;
/* Get and verify that one chunk had been written */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, dspace, &nchunks) < 0)
TEST_ERROR;
if (ONE_CHUNK_WRITTEN != nchunks)
FAIL_PUTS_ERROR("unexpected number of chunks");
/* Get and verify info of the first and only chunk */
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info(dset, H5S_ALL, 0, CHK_SIZE, offset, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification H5Dget_chunk_info failed\n");
2019-09-26 21:43:41 +08:00
/* Get and verify info of the chunk at the offset (CHUNK_NX,CHUNK_NY) */
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info_by_coord(dset, offset, CHK_SIZE, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord failed\n");
/* Attempt to get chunk info given an invalid chunk index and verify
* that failure occurs */
chk_index = INVALID_CHK_INDEX;
reinit_vars(&read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
ret = H5Dget_chunk_info(dset, dspace, chk_index, out_offset, &read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
2020-09-30 22:27:10 +08:00
if (ret != FAIL)
TEST_ERROR;
/* Write the chunk of data to another location */
offset[0] = 0;
offset[1] = 0;
2020-09-30 22:27:10 +08:00
if (H5Dwrite_chunk(dset, H5P_DEFAULT, flt_msk, offset, CHK_SIZE, direct_buf) < 0)
TEST_ERROR;
/* Get and verify that two chunks had been written */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, dspace, &nchunks) < 0)
TEST_ERROR;
if (TWO_CHUNKS_WRITTEN != nchunks)
FAIL_PUTS_ERROR("unexpected number of chunks");
/* Get and verify info of the first written chunk in the dataset, its
offset should be (0,0) */
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info(dset, H5S_ALL, 0, CHK_SIZE, offset, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification H5Dget_chunk_info failed\n");
2019-09-26 21:43:41 +08:00
/* Get and verify info of the chunk at the offset (0,0) */
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info_by_coord(dset, offset, CHK_SIZE, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord failed\n");
/* Get and verify info of the second written chunk in the dataset, its
offset should be (CHUNK_NX, CHUNK_NY) */
offset[0] = CHUNK_NX;
offset[1] = CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info(dset, H5S_ALL, 1, CHK_SIZE, offset, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification H5Dget_chunk_info failed\n");
2019-09-26 21:43:41 +08:00
/* Get and verify info of the chunk at the offset (CHUNK_NX, CHUNK_NY) */
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info_by_coord(dset, offset, CHK_SIZE, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord failed\n");
2019-09-26 21:43:41 +08:00
/* Get and verify info of an empty chunk, at offset
(2*CHUNK_NX, 2*CHUNK_NY) */
2020-09-30 22:27:10 +08:00
offset[0] = 2 * CHUNK_NX;
offset[1] = 2 * CHUNK_NY;
/* Get and verify info of the chunk at the offset (CHUNK_NX, CHUNK_NY) */
2020-09-30 22:27:10 +08:00
if (verify_empty_chunk_info(dset, offset) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord on empty chunk failed\n");
/* Iterate over all chunks */
udata.chunk_info = chunk_infos;
udata.last_index = -1;
if (H5Dchunk_iter(dset, H5P_DEFAULT, &iter_cb, &udata) < 0)
TEST_ERROR;
if (udata.last_index != 1)
FAIL_PUTS_ERROR("Iterator did not iterate over all chunks");
if (chunk_infos[0].offset[0] != 0)
FAIL_PUTS_ERROR("offset[0] mismatch");
if (chunk_infos[0].offset[1] != 0)
FAIL_PUTS_ERROR("offset[1] mismatch");
if (chunk_infos[0].filter_mask != 0)
FAIL_PUTS_ERROR("filter mask mismatch");
if (chunk_infos[0].size != 96)
FAIL_PUTS_ERROR("size mismatch");
if (chunk_infos[1].offset[0] != CHUNK_NX)
FAIL_PUTS_ERROR("offset[0] mismatch");
if (chunk_infos[1].offset[1] != CHUNK_NY)
FAIL_PUTS_ERROR("offset[1] mismatch");
/* Iterate and stop after one iteration */
cptr = &(chunk_infos[0]);
Combo set of async and other changes (#161) * Update API tracing for new H5VL_request_status_t typedef * Finish converting internal event set operations to use list iterator callbacks, instead of directly accessing the list structure * Add H5VL_REQUEST_GET_ERR_STACK operation to request subclass, for retrieving a copy of the error stack for a failed asynchronous operation * Remove 'canceled' event status from Java constants * Be safer about releasing resources when inserting a newly opened/created object or file into an event set * Remove H5EStest, add H5ES_WAIT_NONE for 0 timeout, and revise parameters to H5ESwait, to make it more "aggregate". * Remove H5ES_STATUS_CANCELED from Java wrappers also * Apply patch for dynamically registering optional VOL operations * (a) Add async APIs for H5O module as listed in jira issue ID-283. (b) Remove verification of name parameter in async related routines for H55A and H5L modules because it is checked in H5VL_setup* routine. (c) Modify h5dump expected output due to the async changes. * Corrections based on PR feedback. * Further changes to make based on PR feedback. * Remove H5Dwait & H5Fwait (moved to the async connector). Added H5atclose routine. Updated 'optional op' operations. * Fix missed merge marker, and reformatted line * Update API tracing infrastructure for H5atclose callback * Clean up some warnings * Normalize against develop branch * Correct level of indirection * Add doxygen info for H5is_library_terminating and regression tests for it and H5atclose * Relocate prototype (and doxygen info) for H5Aclose * Align w/changes on develop * Move group package initialization code to H5Gint.c, and update tracing macros * Change non-static function declarations to be static * Correct GCC diagnostic macro * Ensure that H5TSpublic.h header gets installed (#129) * Finish moving API routines that invoke VOL framework to main source files. * Fix position of H5Fmount and H5Funmount * Add 'wrapper' versions of async calls, to allow language wrappers and layers on top of HDF5 to pass in their application information. * Add wrappers for dynamically registered optional operations * Fix typo * Update doxygen comment for H5atclose with additional detail. * Add H5VL\*_vararg versions of H5VL routines that use va_list parameters * Implement and test H5S_BLOCK * Switch H5Aexists\*_async and H5Lexists\*_async to use flag to return status, instead of return value. Make the corresponding changes through most of the v1 and v2 B-tree code. Clean up warnings in H5public.h and cmpd_dtransform.c. * Add H5Iregister_future routine and tests. * Correct return value for H5Lexists_async * Add H5_DLL macro to public H5ES API routines * Update supported -> flags parameter for introspect_query callback * Remove my email address. Update passthrough VOL connector ID. * Fix comment for post_open_api_common * Remove unused non-blocking VOL connector * Minor cleanup in async branch in preparation for merge to develop * Update CMake and the Autotools to use the new pass-through VOL ID * Finish another iteration on public H5ES routines, along with running the code reformatter * Another round of reformatting * Fix for SWMR daily test failures (#160) The H5I_register_using_existing_id() call did not initialize the future ID callbacks, causing the library to segfault when it tried to resolve those function pointers. * Added selective async APIs (#150) * Added selective async APIs Description: Added the following APIs: H5Ropen_attr_async H5Ropen_object_async H5Ropen_region_async H5Mcreate_async H5Mopen_async H5Mput_async H5Mget_async H5Mclose_async H5Tcommit_async H5Topen_async H5Tcopy_async H5Tclose_async - Updated an expected output file to include a new internal function in the error stack for the failure case. * Updated async APIs per reviews, including removing async version of H5Tcopy. * Removed statements that were added by mistake in the previous commit. * Fix compile issues in H5M and warnings elsewhere * Reformat code * Brings VOL_LIST changes from develop. (#163) * Remove H5Dwait and H5Fwait calls, which were incorrectly brought back in * Tiny cleanup of H5Lcreate_hard_async * Run source formatter * Allow for canceled operation in wait_cb * Attempt to fix switch on string value * Re-run source formatter * Add H5S_BLOCK testfile to CMake clean target * Add H5Pset_vol_async API routine and 'get_cap_flags' VOL introspection callback * Clean up warnings * Add H5P(set\|get)_vol_implicit_async API routines to allow \/ disallow implicit asynchronous operations (default is disallowed) * Run formatting script * Remove H5VL_REQUEST_WAIT\* * Warning cleanup * Eliminate strdup()s on statically allocated strings * Warning cleanup * Split H5VLrestore_lib_state into H5VLstart_lib_state and H5VLrestore_lib_state, and rename H5VLreset_lib_state to H5VLfinish_lib_state. * Duplicate strings when building err_info to return to applicatin * Move connector author routines into seperate header files, all included in the new hdf5dev.h header * Run bin/trace to add TRACE macros * Allow H5ES_NONE as a valid, but no-op, parameter to all H5ES API routines that accept an event set ID * Clean up formatting * Remove H5Pset/get_vol_implicit_async * Clean up warning * Remove H5Pget_vol_async and replace with more generic H5Pget_vol_cap_flags * Clean up warnings * Add H5ESfree_err_info convenience routine * Fix typo * Correct matching for cached VOL plugins * Add developer header file * Update for C99 compatibility * Add missing trace macro * Stop clang-format from messing with the trace macros. Don't set up VOL wrappers for 'infrastructure' objects like requests and blobs * Fix warning about formatting a directory * Clean up formatting for H5E_BEGIN_TRY / H5E_END_TRY * Reduce scope of H5ES__close * Enable CMake checks for various types on MacOS * Clean up properly when H5CX_retrieve_state() fails. Also clean up many compiler warnings. * Committing clang-format changes * Merge from develop * Fix mis-placed assert * Remove commented-out code * Re-add macro for unsetenv on Windows (I think it accidentally was merged out) * Strengthen sanity check from error report to assertion * Committing clang-format changes * Add units to the comments for a few fields * Switch 'get execution time' operation for async request tokens to be an optional operation and query if connector supports operation before retrieving it. * Committing clang-format changes * Remove H5ESget_time_estimate * Committing clang-format changes * Create developer header for datatype routines and move type conversion register/unregister routines there. * Simplify internal H5VL_setup_name_args and H5VL_setup_idx_args routines * Add H5VLlink_optional_op, allowing dynamicly registered optional operations for the link VOL subclass, also added H5VL_loc_params argument to the link 'optional' callback to allow them to work correctly. * Run bin/format_source on current code * Add H5VLobject_optional_op, allowing dynamicly registered optional operations for the object VOL subclass, also added H5VL_loc_params argument to the object 'optional' callback to allow them to work correctly. * Run bin/format_source on current code * Committing clang-format changes * Revert "Switch 'get execution time' operation for async request tokens to be an optional operation and query if connector supports operation before retrieving it." This reverts commit 5ac92014da2682bdba62d7a2524b8d90e38f6b19. * Committing clang-format changes * Convert attribute 'get' operation to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Update tracing macros * Convert attribute 'specific' operation to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert dataset 'get' and 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also, minor tweaks to attribute 'get' and 'specific' operation parameters. * Convert datatype 'get' and 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also, minor tweaks to H5O_refresh_metadata arguments. * Reduce warnings * Reduce warnings * Track change to datatype 'get' callback * Fix bug with file pointer getting invalidated when object closed * Reformat source * Convert file and group VOL classes 'get' and 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also small cleanup to the attribute get name operation. Also moved 'mount' and 'unmount' operations to be group specific operations, instead of file specific, to better align with their behavior (mounted files are on groups, so a group is what is operated on). * Remove remainder of merge conflict marking * Convert link VOL class 'create' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Remove some unused local variables * Convert link VOL class 'get' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also refactor 'get name by idx' routines to return actual length of name with a parameter instead of the return value, and move some callback context structs for the link interface from the private header file into the source code module, to reduce their visibility scope. * Update tracing macros * Convert link VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert object VOL class 'get' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert object VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also refactor H5G_loc_exists, et al, to return 'exists' flag in a parameter and errors with the function return value, instead of overloading both into the return value. And, corrected logic error in test/links.c around non-existant objects in a file. * Convert request VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert blob VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also removes the H5VL_BLOB_GETSIZE operation, as it's unused in the library and the blob ID size for a container is now returned with H5VL_FILE_GET_CONT_INFO. * Add 'const' to several parameters that are only queried. * Convert all VOL classes' 'optional' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Convert several 'get' routines to return the length of an array in a parameter instead of combining it into the return value. Move several routines to be in less public namespace. Correct direct_chunk test to verify that parameters aren't modified on error. * Switch get/specific/optional VOL callback argument structures to be 'async-friendly'. Also other minor cleanups and bug-fixes. * Add H5Pset_dataset_io_hyperslab_selection / H5S_PLIST feature, to allow skipping H5Dget_space + H5Sselect_hyperslab for async operation * Add dynamic optional operations for request objects * Update dynamic operation test for optional request operations * Update a comment for an operation argument * Run trace and format_source scripts * Committing clang-format changes * Committing clang-format changes Co-authored-by: vchoi <vchoi@jelly.ad.hdfgroup.org> Co-authored-by: vchoi-hdfgroup <55293060+vchoi-hdfgroup@users.noreply.github.com> Co-authored-by: jhendersonHDF <jhenderson@hdfgroup.org> Co-authored-by: Dana Robinson <derobins@hdfgroup.org> Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com>
2021-06-03 04:29:46 +08:00
if (H5Dchunk_iter(dset, H5P_DEFAULT, &iter_cb_stop, &cptr) < 0)
TEST_ERROR;
if (cptr != &(chunk_infos[1]))
FAIL_PUTS_ERROR("Verification of halted iterator failed");
/* Iterate and fail after one iteration */
cptr = &(chunk_infos[0]);
H5E_BEGIN_TRY
{
Combo set of async and other changes (#161) * Update API tracing for new H5VL_request_status_t typedef * Finish converting internal event set operations to use list iterator callbacks, instead of directly accessing the list structure * Add H5VL_REQUEST_GET_ERR_STACK operation to request subclass, for retrieving a copy of the error stack for a failed asynchronous operation * Remove 'canceled' event status from Java constants * Be safer about releasing resources when inserting a newly opened/created object or file into an event set * Remove H5EStest, add H5ES_WAIT_NONE for 0 timeout, and revise parameters to H5ESwait, to make it more "aggregate". * Remove H5ES_STATUS_CANCELED from Java wrappers also * Apply patch for dynamically registering optional VOL operations * (a) Add async APIs for H5O module as listed in jira issue ID-283. (b) Remove verification of name parameter in async related routines for H55A and H5L modules because it is checked in H5VL_setup* routine. (c) Modify h5dump expected output due to the async changes. * Corrections based on PR feedback. * Further changes to make based on PR feedback. * Remove H5Dwait & H5Fwait (moved to the async connector). Added H5atclose routine. Updated 'optional op' operations. * Fix missed merge marker, and reformatted line * Update API tracing infrastructure for H5atclose callback * Clean up some warnings * Normalize against develop branch * Correct level of indirection * Add doxygen info for H5is_library_terminating and regression tests for it and H5atclose * Relocate prototype (and doxygen info) for H5Aclose * Align w/changes on develop * Move group package initialization code to H5Gint.c, and update tracing macros * Change non-static function declarations to be static * Correct GCC diagnostic macro * Ensure that H5TSpublic.h header gets installed (#129) * Finish moving API routines that invoke VOL framework to main source files. * Fix position of H5Fmount and H5Funmount * Add 'wrapper' versions of async calls, to allow language wrappers and layers on top of HDF5 to pass in their application information. * Add wrappers for dynamically registered optional operations * Fix typo * Update doxygen comment for H5atclose with additional detail. * Add H5VL\*_vararg versions of H5VL routines that use va_list parameters * Implement and test H5S_BLOCK * Switch H5Aexists\*_async and H5Lexists\*_async to use flag to return status, instead of return value. Make the corresponding changes through most of the v1 and v2 B-tree code. Clean up warnings in H5public.h and cmpd_dtransform.c. * Add H5Iregister_future routine and tests. * Correct return value for H5Lexists_async * Add H5_DLL macro to public H5ES API routines * Update supported -> flags parameter for introspect_query callback * Remove my email address. Update passthrough VOL connector ID. * Fix comment for post_open_api_common * Remove unused non-blocking VOL connector * Minor cleanup in async branch in preparation for merge to develop * Update CMake and the Autotools to use the new pass-through VOL ID * Finish another iteration on public H5ES routines, along with running the code reformatter * Another round of reformatting * Fix for SWMR daily test failures (#160) The H5I_register_using_existing_id() call did not initialize the future ID callbacks, causing the library to segfault when it tried to resolve those function pointers. * Added selective async APIs (#150) * Added selective async APIs Description: Added the following APIs: H5Ropen_attr_async H5Ropen_object_async H5Ropen_region_async H5Mcreate_async H5Mopen_async H5Mput_async H5Mget_async H5Mclose_async H5Tcommit_async H5Topen_async H5Tcopy_async H5Tclose_async - Updated an expected output file to include a new internal function in the error stack for the failure case. * Updated async APIs per reviews, including removing async version of H5Tcopy. * Removed statements that were added by mistake in the previous commit. * Fix compile issues in H5M and warnings elsewhere * Reformat code * Brings VOL_LIST changes from develop. (#163) * Remove H5Dwait and H5Fwait calls, which were incorrectly brought back in * Tiny cleanup of H5Lcreate_hard_async * Run source formatter * Allow for canceled operation in wait_cb * Attempt to fix switch on string value * Re-run source formatter * Add H5S_BLOCK testfile to CMake clean target * Add H5Pset_vol_async API routine and 'get_cap_flags' VOL introspection callback * Clean up warnings * Add H5P(set\|get)_vol_implicit_async API routines to allow \/ disallow implicit asynchronous operations (default is disallowed) * Run formatting script * Remove H5VL_REQUEST_WAIT\* * Warning cleanup * Eliminate strdup()s on statically allocated strings * Warning cleanup * Split H5VLrestore_lib_state into H5VLstart_lib_state and H5VLrestore_lib_state, and rename H5VLreset_lib_state to H5VLfinish_lib_state. * Duplicate strings when building err_info to return to applicatin * Move connector author routines into seperate header files, all included in the new hdf5dev.h header * Run bin/trace to add TRACE macros * Allow H5ES_NONE as a valid, but no-op, parameter to all H5ES API routines that accept an event set ID * Clean up formatting * Remove H5Pset/get_vol_implicit_async * Clean up warning * Remove H5Pget_vol_async and replace with more generic H5Pget_vol_cap_flags * Clean up warnings * Add H5ESfree_err_info convenience routine * Fix typo * Correct matching for cached VOL plugins * Add developer header file * Update for C99 compatibility * Add missing trace macro * Stop clang-format from messing with the trace macros. Don't set up VOL wrappers for 'infrastructure' objects like requests and blobs * Fix warning about formatting a directory * Clean up formatting for H5E_BEGIN_TRY / H5E_END_TRY * Reduce scope of H5ES__close * Enable CMake checks for various types on MacOS * Clean up properly when H5CX_retrieve_state() fails. Also clean up many compiler warnings. * Committing clang-format changes * Merge from develop * Fix mis-placed assert * Remove commented-out code * Re-add macro for unsetenv on Windows (I think it accidentally was merged out) * Strengthen sanity check from error report to assertion * Committing clang-format changes * Add units to the comments for a few fields * Switch 'get execution time' operation for async request tokens to be an optional operation and query if connector supports operation before retrieving it. * Committing clang-format changes * Remove H5ESget_time_estimate * Committing clang-format changes * Create developer header for datatype routines and move type conversion register/unregister routines there. * Simplify internal H5VL_setup_name_args and H5VL_setup_idx_args routines * Add H5VLlink_optional_op, allowing dynamicly registered optional operations for the link VOL subclass, also added H5VL_loc_params argument to the link 'optional' callback to allow them to work correctly. * Run bin/format_source on current code * Add H5VLobject_optional_op, allowing dynamicly registered optional operations for the object VOL subclass, also added H5VL_loc_params argument to the object 'optional' callback to allow them to work correctly. * Run bin/format_source on current code * Committing clang-format changes * Revert "Switch 'get execution time' operation for async request tokens to be an optional operation and query if connector supports operation before retrieving it." This reverts commit 5ac92014da2682bdba62d7a2524b8d90e38f6b19. * Committing clang-format changes * Convert attribute 'get' operation to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Update tracing macros * Convert attribute 'specific' operation to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert dataset 'get' and 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also, minor tweaks to attribute 'get' and 'specific' operation parameters. * Convert datatype 'get' and 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also, minor tweaks to H5O_refresh_metadata arguments. * Reduce warnings * Reduce warnings * Track change to datatype 'get' callback * Fix bug with file pointer getting invalidated when object closed * Reformat source * Convert file and group VOL classes 'get' and 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also small cleanup to the attribute get name operation. Also moved 'mount' and 'unmount' operations to be group specific operations, instead of file specific, to better align with their behavior (mounted files are on groups, so a group is what is operated on). * Remove remainder of merge conflict marking * Convert link VOL class 'create' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Remove some unused local variables * Convert link VOL class 'get' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also refactor 'get name by idx' routines to return actual length of name with a parameter instead of the return value, and move some callback context structs for the link interface from the private header file into the source code module, to reduce their visibility scope. * Update tracing macros * Convert link VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert object VOL class 'get' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert object VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also refactor H5G_loc_exists, et al, to return 'exists' flag in a parameter and errors with the function return value, instead of overloading both into the return value. And, corrected logic error in test/links.c around non-existant objects in a file. * Convert request VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. * Convert blob VOL class 'specific' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Also removes the H5VL_BLOB_GETSIZE operation, as it's unused in the library and the blob ID size for a container is now returned with H5VL_FILE_GET_CONT_INFO. * Add 'const' to several parameters that are only queried. * Convert all VOL classes' 'optional' operations to use struct-of-tagged-union pattern for VOL callback arguments, instead of using varargs. Convert several 'get' routines to return the length of an array in a parameter instead of combining it into the return value. Move several routines to be in less public namespace. Correct direct_chunk test to verify that parameters aren't modified on error. * Switch get/specific/optional VOL callback argument structures to be 'async-friendly'. Also other minor cleanups and bug-fixes. * Add H5Pset_dataset_io_hyperslab_selection / H5S_PLIST feature, to allow skipping H5Dget_space + H5Sselect_hyperslab for async operation * Add dynamic optional operations for request objects * Update dynamic operation test for optional request operations * Update a comment for an operation argument * Run trace and format_source scripts * Committing clang-format changes * Committing clang-format changes Co-authored-by: vchoi <vchoi@jelly.ad.hdfgroup.org> Co-authored-by: vchoi-hdfgroup <55293060+vchoi-hdfgroup@users.noreply.github.com> Co-authored-by: jhendersonHDF <jhenderson@hdfgroup.org> Co-authored-by: Dana Robinson <derobins@hdfgroup.org> Co-authored-by: Dana Robinson <43805+derobins@users.noreply.github.com> Co-authored-by: bmribler <39579120+bmribler@users.noreply.github.com> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com>
2021-06-03 04:29:46 +08:00
ret = H5Dchunk_iter(dset, H5P_DEFAULT, &iter_cb_fail, &cptr);
}
H5E_END_TRY
if (ret >= 0)
TEST_ERROR;
if (cptr != &(chunk_infos[1]))
FAIL_PUTS_ERROR("Verification of halted iterator failed");
/* Release resource */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Sclose(dspace) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pclose(cparms) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Fclose(basicfile) < 0)
TEST_ERROR;
/* Remove the test file */
HDremove(filename);
PASSED();
return SUCCEED;
error:
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
H5Dclose(dset);
H5Sclose(dspace);
H5Pclose(cparms);
H5Fclose(basicfile);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
H5_FAILED();
return FAIL;
} /* test_basic_query() */
/*-------------------------------------------------------------------------
* Function: test_failed_attempts
*
* Purpose: Test attempting to use chunk query functions incorrectly.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Note: Note that the dataspace argument in these new functions are
* currently not used. The functionality involved the dataspace
* will be implemented in the next version.
*
* Date: August 2019
*
*-------------------------------------------------------------------------
*/
static herr_t
test_failed_attempts(const char *filename, hid_t fapl)
{
2020-09-30 22:27:10 +08:00
hid_t chunkfile = H5I_INVALID_HID; /* File ID */
hid_t dspace = H5I_INVALID_HID; /* Dataspace ID */
hid_t dset = H5I_INVALID_HID; /* Dataset ID */
hsize_t dims[2] = {NX, NY}; /* Dataset dimensions */
int data_buf[NX][NY]; /* Input buffer */
unsigned read_flt_msk = 0; /* Filter mask after direct read */
hsize_t offset[2]; /* Offset coordinates of a chunk */
hsize_t out_offset[2] = {0, 0}; /* Buffer to get offset coordinates */
hsize_t size = 0; /* Size of an allocated/written chunk */
hsize_t nchunks = 0; /* Number of chunks */
haddr_t addr = 0; /* Address of an allocated/written chunk */
hsize_t chk_index = 0; /* Index of a chunk */
hsize_t ii, jj; /* Array indices */
herr_t ret; /* Temporary returned value for verifying failure */
TESTING(" Invalid Operations");
/* Open the file for reading/writing */
2020-09-30 22:27:10 +08:00
if ((chunkfile = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
TEST_ERROR;
2019-09-26 21:43:41 +08:00
/* Create dataspace */
2020-09-30 22:27:10 +08:00
if ((dspace = H5Screate_simple(RANK, dims, NULL)) < 0)
TEST_ERROR;
/* Create a contiguous dataset */
2020-09-30 22:27:10 +08:00
dset = H5Dcreate2(chunkfile, CONTIGUOUS_DSET_NAME, H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT,
H5P_DEFAULT);
if (dset < 0)
TEST_ERROR;
/* Initialize the array of data */
2020-09-30 22:27:10 +08:00
for (ii = 0; ii < NX; ii++)
for (jj = 0; jj < NY; jj++)
data_buf[ii][jj] = (int)(ii * jj);
/* Write the data */
2020-09-30 22:27:10 +08:00
if (H5Dwrite(dset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_buf) < 0)
TEST_ERROR;
/* Close the dataset */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
/* Open it again to test the chunk query functions on contiguous dataset */
2020-09-30 22:27:10 +08:00
if ((dset = H5Dopen2(chunkfile, CONTIGUOUS_DSET_NAME, H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Attempt to get the number of chunks on contiguous dataset, should fail */
H5E_BEGIN_TRY
{
ret = H5Dget_num_chunks(dset, dspace, &nchunks);
}
H5E_END_TRY
2020-09-30 22:27:10 +08:00
if (ret != FAIL)
FAIL_PUTS_ERROR(" Attempt a chunk query function on a contiguous dataset.");
/* Attempt to get chunk info on contiguous data, should fail */
chk_index = 0;
reinit_vars(&read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
ret = H5Dget_chunk_info(dset, dspace, chk_index, out_offset, &read_flt_msk, &addr, &size);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
2020-09-30 22:27:10 +08:00
if (ret != FAIL)
FAIL_PUTS_ERROR(" Attempt a chunk query function on a contiguous dataset.");
/* Attempt to get chunk info at logical coordinates (0,0) on contiguous
* dataset, should fail */
offset[0] = 0;
offset[1] = 0;
H5E_BEGIN_TRY
{
ret = H5Dget_chunk_info_by_coord(dset, offset, &read_flt_msk, &addr, &size);
}
H5E_END_TRY
2020-09-30 22:27:10 +08:00
if (ret != FAIL)
FAIL_PUTS_ERROR(" Attempt a chunk query function on a contiguous dataset.");
/* Release resource */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Sclose(dspace) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Fclose(chunkfile) < 0)
TEST_ERROR;
PASSED();
return SUCCEED;
error:
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
H5Dclose(dset);
H5Sclose(dspace);
H5Fclose(chunkfile);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
H5_FAILED();
return FAIL;
} /* test_failed_attempts() */
/*-------------------------------------------------------------------------
* Function: test_get_chunk_info_v110
*
* Purpose: Test getting various chunk information in version 1.10.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Note: Note that the dataspace argument in these new functions are
* currently not used. The functionality involved the dataspace
* will be implemented in the next version.
*
* Description:
* This function tests the new API functions added for HDFFV-10677:
* H5Dget_num_chunks, H5Dget_chunk_info, and
* H5Dget_chunk_info_by_coord for low bound beyond 1.8.
*
* Date: October 2018
*
*-------------------------------------------------------------------------
*/
static herr_t
test_get_chunk_info_v110(hid_t fapl)
{
2020-09-30 22:27:10 +08:00
char filename[FILENAME_BUF_SIZE]; /* File name */
hid_t chunkfile = H5I_INVALID_HID; /* File ID */
H5F_libver_t low, high; /* File format bounds */
TESTING("getting chunk information in file with versions 1.10 and later");
printf("\n"); /* to list sub-tests */
/* Set high bound to the current latest version */
high = H5F_LIBVER_LATEST;
/* Test getting info of chunked datasets in version combo up to 1.10 */
2020-09-30 22:27:10 +08:00
for (low = H5F_LIBVER_V110; low <= H5F_LIBVER_LATEST; low++) {
/* Set version bounds for creating file */
2020-09-30 22:27:10 +08:00
if (H5Pset_libver_bounds(fapl, low, high) < 0)
TEST_ERROR;
/* Create the file */
h5_fixname(FILENAME[low], fapl, filename, sizeof filename);
chunkfile = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
2020-09-30 22:27:10 +08:00
if (chunkfile < 0)
TEST_ERROR;
/* Close the file, individual tests will re-open the file with different
libvers via the fapl */
2020-09-30 22:27:10 +08:00
if (H5Fclose(chunkfile) < 0)
TEST_ERROR;
/* Test getting chunk info when Single Chunk index type is used */
2020-09-30 22:27:10 +08:00
if (test_chunk_info_single_chunk(filename, fapl) < 0)
TEST_ERROR;
/* Test getting chunk info when Implicit index type is used */
2020-09-30 22:27:10 +08:00
if (test_chunk_info_implicit(filename, fapl) < 0)
TEST_ERROR;
/* Test getting chunk info when Fixed Array index type is used */
2020-09-30 22:27:10 +08:00
if (test_chunk_info_fixed_array(filename, fapl) < 0)
TEST_ERROR;
/* Test getting chunk info when Extensible Array index type is used */
2020-09-30 22:27:10 +08:00
if (test_chunk_info_extensible_array(filename, fapl) < 0)
TEST_ERROR;
/* Test getting chunk info when Version 2 B-trees index type is used */
2020-09-30 22:27:10 +08:00
if (test_chunk_info_version2_btrees(filename, fapl) < 0)
TEST_ERROR;
/* Test various attempts to use the functions incorrectly */
2020-09-30 22:27:10 +08:00
if (test_failed_attempts(filename, fapl) < 0)
TEST_ERROR;
} /* for low libver bound */
return SUCCEED;
error:
H5_FAILED();
return FAIL;
} /* test_get_chunk_info_v110() */
/*-------------------------------------------------------------------------
* Function: test_flt_msk_with_skip_compress
*
* Purpose: Test getting chunk info when compression filter is skipped.
*
* Return: Success: SUCCEED
* Failure: FAIL
*
* Date: August 2019 (based on direct_chunk.c/test_skip_compress_write1)
*
*-------------------------------------------------------------------------
*/
static herr_t
test_flt_msk_with_skip_compress(hid_t fapl)
{
2020-09-30 22:27:10 +08:00
char filename[FILENAME_BUF_SIZE]; /* File name */
hid_t filter_file = H5I_INVALID_HID; /* File ID for filter mask */
hid_t dspace = H5I_INVALID_HID; /* Dataspace ID */
hid_t mem_space = H5I_INVALID_HID; /* Dataspace ID */
hid_t dset = H5I_INVALID_HID; /* Dataset ID */
hid_t cparms = H5I_INVALID_HID; /* Creation plist */
hid_t dxpl = H5I_INVALID_HID; /* Transfer plist */
hsize_t dims[2] = {NX, NY}; /* Dataset dimensions */
hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* 2 unlimited dims */
hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY}; /* Chunk dimensions */
int direct_buf[CHUNK_NX][CHUNK_NY]; /* One chunk of data */
int check_chunk[CHUNK_NX][CHUNK_NY]; /* Buffer to read data in */
int read_direct_buf[CHUNK_NX][CHUNK_NY]; /* Buffer to read a chunk */
hsize_t read_buf_size = 0; /* buf size */
unsigned flt_msk = 0; /* Filter mask */
unsigned read_flt_msk = 0; /* Filter mask after direct read */
hsize_t offset[2] = {0, 0}; /* Offset coordinates of a chunk */
hsize_t nchunks = 0; /* Number of chunks */
hsize_t chk_index = 0; /* Index of a chunk */
int aggression = 9; /* Compression aggression setting */
hsize_t start[2]; /* Start of hyperslab */
hsize_t stride[2]; /* Stride of hyperslab */
hsize_t count[2]; /* Block count */
hsize_t block[2]; /* Block sizes */
int ii, jj; /* Array indices */
TESTING("getting filter mask when compression filter is skipped");
/* Create the file */
h5_fixname(FILTERMASK_FILE, fapl, filename, sizeof filename);
/* Create a new file. */
2020-09-30 22:27:10 +08:00
if ((filter_file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Create file data space with unlimited dimensions. */
2020-09-30 22:27:10 +08:00
if ((dspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
TEST_ERROR;
/* Create memory data space. */
2020-09-30 22:27:10 +08:00
if ((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
TEST_ERROR;
/* Create dataset create property list with chunking and compression
enabled. */
2020-09-30 22:27:10 +08:00
if ((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pset_chunk(cparms, RANK, chunk_dims) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pset_deflate(cparms, (unsigned)aggression) < 0)
TEST_ERROR;
/* Create a new dataset using cparms creation properties. */
2020-09-30 22:27:10 +08:00
if ((dset = H5Dcreate2(filter_file, SKIP_FILTER_DSET_NAME, H5T_NATIVE_INT, dspace, H5P_DEFAULT, cparms,
H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Create transfer property list for writing */
2020-09-30 22:27:10 +08:00
if ((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
TEST_ERROR;
/* Initialize data for one chunk */
2020-09-30 22:27:10 +08:00
for (ii = 0; ii < CHUNK_NX; ii++)
for (jj = 0; jj < CHUNK_NY; jj++)
direct_buf[ii][jj] = (int)(ii * jj);
/* Indicate the compression filter is to be skipped. */
flt_msk = 0x00000001;
2019-08-29 13:53:40 +08:00
/* Write a chunk of uncompressed data */
offset[0] = CHUNK_NX;
offset[1] = CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (H5Dwrite_chunk(dset, H5P_DEFAULT, flt_msk, offset, CHK_SIZE, direct_buf) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Fflush(dset, H5F_SCOPE_LOCAL) < 0)
TEST_ERROR;
/* Close and re-open the dataset */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if ((dset = H5Dopen2(filter_file, SKIP_FILTER_DSET_NAME, H5P_DEFAULT)) < 0)
TEST_ERROR;
/* Select hyperslab for the chunk just written in the file */
2020-09-30 22:27:10 +08:00
start[0] = CHUNK_NX;
start[1] = CHUNK_NY;
stride[0] = 1;
stride[1] = 1;
count[0] = 1;
count[1] = 1;
block[0] = CHUNK_NX;
block[1] = CHUNK_NY;
if (H5Sselect_hyperslab(dspace, H5S_SELECT_SET, start, stride, count, block) < 0)
TEST_ERROR;
/* Read the chunk back */
2020-09-30 22:27:10 +08:00
if (H5Dread(dset, H5T_NATIVE_INT, mem_space, dspace, H5P_DEFAULT, check_chunk) < 0)
TEST_ERROR;
/* Check that the values read are the same as the values written */
2020-09-30 22:27:10 +08:00
for (ii = 0; ii < CHUNK_NX; ii++)
for (jj = 0; jj < CHUNK_NY; jj++)
if (direct_buf[ii][jj] != check_chunk[ii][jj]) {
printf(" 1. Read different values than written.");
printf(" At index %d,%d\n", ii, jj);
printf(" direct_buf=%d, check_chunk=%d\n", direct_buf[ii][jj], check_chunk[ii][jj]);
TEST_ERROR;
}
/* Query chunk storage size */
2020-09-30 22:27:10 +08:00
if (H5Dget_chunk_storage_size(dset, offset, &read_buf_size) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (read_buf_size != CHK_SIZE)
TEST_ERROR;
/* Read the raw chunk back with H5Dread_chunk */
2023-06-30 03:33:46 +08:00
memset(&read_direct_buf, 0, sizeof(read_direct_buf));
2020-09-30 22:27:10 +08:00
if (H5Dread_chunk(dset, H5P_DEFAULT, offset, &read_flt_msk, read_direct_buf) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (read_flt_msk != flt_msk)
TEST_ERROR;
/* Check that the direct chunk read is the same as the chunk written */
2020-09-30 22:27:10 +08:00
for (ii = 0; ii < CHUNK_NX; ii++)
for (jj = 0; jj < CHUNK_NY; jj++)
if (direct_buf[ii][jj] != read_direct_buf[ii][jj]) {
printf(" 1. Read different values than written.");
printf(" At index %d,%d\n", ii, jj);
printf(" direct_buf=%d, read_direct_buf=%d\n", direct_buf[ii][jj],
read_direct_buf[ii][jj]);
TEST_ERROR;
}
/* Get and verify the number of chunks written */
2020-09-30 22:27:10 +08:00
if (H5Dget_num_chunks(dset, H5S_ALL, &nchunks) < 0)
TEST_ERROR;
if (ONE_CHUNK_WRITTEN != nchunks)
FAIL_PUTS_ERROR("unexpected number of chunks");
/* Get and verify info of the first and only chunk */
chk_index = 0;
offset[0] = CHUNK_NX;
offset[1] = CHUNK_NY;
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info(dset, H5S_ALL, chk_index, CHK_SIZE, offset, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info failed\n");
/* Get info of the chunk at the specified offsets and verify its info */
2020-09-30 22:27:10 +08:00
if (verify_get_chunk_info_by_coord(dset, offset, CHK_SIZE, flt_msk) == FAIL)
FAIL_PUTS_ERROR("Verification of H5Dget_chunk_info_by_coord failed\n");
/* Release resource */
2020-09-30 22:27:10 +08:00
if (H5Dclose(dset) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Sclose(mem_space) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Sclose(dspace) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pclose(cparms) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Pclose(dxpl) < 0)
TEST_ERROR;
2020-09-30 22:27:10 +08:00
if (H5Fclose(filter_file) < 0)
TEST_ERROR;
/* Remove the test file */
HDremove(filename);
PASSED();
return SUCCEED;
error:
2020-09-30 22:27:10 +08:00
H5E_BEGIN_TRY
{
H5Dclose(dset);
H5Sclose(mem_space);
H5Sclose(dspace);
H5Pclose(cparms);
H5Pclose(dxpl);
H5Fclose(filter_file);
2020-09-30 22:27:10 +08:00
}
H5E_END_TRY
H5_FAILED();
return FAIL;
} /* test_flt_msk_with_skip_compress() */
/*-------------------------------------------------------------------------
* Function: main
*
* Purpose: Tests functions related to chunk information
*
2020-08-01 02:40:29 +08:00
* Return: EXIT_SUCCESS/EXIT_FAILURE
*
*-------------------------------------------------------------------------
*/
int
main(void)
{
2020-09-30 22:27:10 +08:00
hid_t fapl = H5I_INVALID_HID; /* File access property list */
int nerrors = 0; /* Number of errors so far */
h5_reset();
/* Create a copy of file access property list */
2020-09-30 22:27:10 +08:00
if ((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
TEST_ERROR;
/* Test basic operations on the chunk query functions */
nerrors += test_basic_query(fapl) < 0 ? 1 : 0;
/* Tests getting chunk information of version 1.8 and prior */
nerrors += test_get_chunk_info_highest_v18(fapl) < 0 ? 1 : 0;
/* Tests getting chunk information of version 1.10 */
nerrors += test_get_chunk_info_v110(fapl) < 0 ? 1 : 0;
/* Tests getting filter mask when compression filter is skipped */
nerrors += test_flt_msk_with_skip_compress(fapl) < 0 ? 1 : 0;
2020-09-30 22:27:10 +08:00
if (nerrors)
2020-08-01 02:40:29 +08:00
goto error;
printf("All chunk query tests passed.\n");
h5_cleanup(FILENAME, fapl);
2020-08-01 02:40:29 +08:00
return EXIT_SUCCESS;
error:
nerrors = MAX(1, nerrors);
printf("***** %d QUERY CHUNK INFO TEST%s FAILED! *****\n", nerrors, 1 == nerrors ? "" : "S");
2020-08-01 02:40:29 +08:00
return EXIT_FAILURE;
}
/****************************************************************************
Additional tests to be added:
- do the query when extending the dataset (shrink or expand)
- verify that invalid input parameters are handled properly
****************************************************************************/