mirror of
https://github.com/HDFGroup/hdf5.git
synced 2024-12-15 07:40:23 +08:00
Fixes the chunk overwrite bug (HDFFV-10187) and adds a new H5DO HL test.
This commit is contained in:
parent
1a4bda1bd8
commit
3d8416251d
@ -32,6 +32,7 @@
|
||||
#define DATASETNAME4 "data_conv"
|
||||
#define DATASETNAME5 "contiguous_dset"
|
||||
#define DATASETNAME6 "invalid_argue"
|
||||
#define DATASETNAME7 "overwrite_chunk"
|
||||
#define RANK 2
|
||||
#define NX 16
|
||||
#define NY 16
|
||||
@ -46,6 +47,13 @@
|
||||
#define ADD_ON 7
|
||||
#define FACTOR 3
|
||||
|
||||
/* Constants for the overwrite test */
|
||||
#define OVERWRITE_NDIMS 3
|
||||
#define OVERWRITE_CHUNK_NX 3
|
||||
#define OVERWRITE_CHUNK_2NX 6
|
||||
#define OVERWRITE_CHUNK_NY 2
|
||||
#define OVERWRITE_VALUE 42
|
||||
|
||||
/* Local prototypes for filter functions */
|
||||
static size_t filter_bogus1(unsigned int flags, size_t cd_nelmts,
|
||||
const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
|
||||
@ -498,6 +506,111 @@ error:
|
||||
}
|
||||
#endif /* H5_HAVE_FILTER_DEFLATE */
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: test_direct_chunk_overwrite_data
|
||||
*
|
||||
* Purpose: Test overwriting a chunk with new data.
|
||||
*
|
||||
* Return: Success: 0
|
||||
* Failure: 1
|
||||
*
|
||||
* Programmer: Dana Robinson
|
||||
* Spring 2017
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
static int
|
||||
test_direct_chunk_overwrite_data(hid_t fid)
|
||||
{
|
||||
size_t buf_size = OVERWRITE_CHUNK_NX * OVERWRITE_CHUNK_NY * sizeof(int16_t);
|
||||
int16_t data_buf[OVERWRITE_CHUNK_NY][OVERWRITE_CHUNK_NX];
|
||||
int16_t overwrite_buf[OVERWRITE_CHUNK_NY][OVERWRITE_CHUNK_NX];
|
||||
uint32_t filter_mask = 0;
|
||||
hid_t tid = H5T_NATIVE_UINT16;
|
||||
hid_t dcpl_id = -1;
|
||||
hid_t sid = -1;
|
||||
hid_t did = -1;
|
||||
uint16_t fill_value = 0;
|
||||
hsize_t dset_dims[] = {1, OVERWRITE_CHUNK_NY, OVERWRITE_CHUNK_2NX};
|
||||
hsize_t dset_max_dims[] = {H5S_UNLIMITED, OVERWRITE_CHUNK_NY, OVERWRITE_CHUNK_2NX};
|
||||
hsize_t chunk_dims[] = {1, OVERWRITE_CHUNK_NY, OVERWRITE_CHUNK_NX};
|
||||
hsize_t offset[] = {0, 0, 0};
|
||||
hsize_t i, j;
|
||||
int16_t n;
|
||||
int16_t read_buf[OVERWRITE_CHUNK_NY][OVERWRITE_CHUNK_2NX];
|
||||
|
||||
TESTING("overwriting existing data with H5DOwrite_chunk");
|
||||
|
||||
/* Create the dataset's data space */
|
||||
if ((sid = H5Screate_simple(OVERWRITE_NDIMS, dset_dims, dset_max_dims)) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Set chunk size and filll value */
|
||||
if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
if (H5Pset_fill_value(dcpl_id, tid, &fill_value) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
if (H5Pset_chunk(dcpl_id, OVERWRITE_NDIMS, chunk_dims) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Create dataset */
|
||||
if ((did = H5Dcreate2(fid, DATASETNAME7, tid, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Initialize data buffers */
|
||||
n = 0;
|
||||
for (i = 0; i < OVERWRITE_CHUNK_NY; i++) {
|
||||
for (j = 0; j < OVERWRITE_CHUNK_NX; j++) {
|
||||
data_buf[i][j] = n++;
|
||||
overwrite_buf[i][j] = OVERWRITE_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write chunk data using the direct write function. */
|
||||
if (H5DOwrite_chunk(did, H5P_DEFAULT, filter_mask, offset, buf_size, data_buf) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Write second chunk. */
|
||||
offset[2] = OVERWRITE_CHUNK_NX;
|
||||
if (H5DOwrite_chunk(did, H5P_DEFAULT, filter_mask, offset, buf_size, data_buf) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Overwrite first chunk. */
|
||||
offset[2] = 0;
|
||||
if (H5DOwrite_chunk(did, H5P_DEFAULT, filter_mask, offset, buf_size, overwrite_buf) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Read the data back out */
|
||||
if (H5Dread(did, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
/* Ensure that the data are correct in chunk 1 */
|
||||
for (i = 0; i < OVERWRITE_CHUNK_NY; i++)
|
||||
for (j = 0; j < OVERWRITE_CHUNK_NX; j++) {
|
||||
if (read_buf[i][j] != OVERWRITE_VALUE)
|
||||
TEST_ERROR
|
||||
}
|
||||
|
||||
if (H5Pclose(dcpl_id) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
if (H5Sclose(sid) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
if (H5Dclose(did) < 0)
|
||||
FAIL_STACK_ERROR
|
||||
|
||||
PASSED();
|
||||
return 0;
|
||||
|
||||
error:
|
||||
H5E_BEGIN_TRY {
|
||||
H5Pclose(dcpl_id);
|
||||
H5Sclose(sid);
|
||||
H5Dclose(did);
|
||||
} H5E_END_TRY;
|
||||
|
||||
return 1;
|
||||
} /* end test_direct_chunk_overwrite_data() */
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: test_skip_compress_write1
|
||||
*
|
||||
@ -1409,6 +1522,7 @@ int main( void )
|
||||
#ifdef H5_HAVE_FILTER_DEFLATE
|
||||
nerrors += test_direct_chunk_write(file_id);
|
||||
#endif /* H5_HAVE_FILTER_DEFLATE */
|
||||
nerrors += test_direct_chunk_overwrite_data(file_id);
|
||||
nerrors += test_skip_compress_write1(file_id);
|
||||
nerrors += test_skip_compress_write2(file_id);
|
||||
nerrors += test_data_conv(file_id);
|
||||
|
@ -445,11 +445,18 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters,
|
||||
/* Set up the size of chunk for user data */
|
||||
udata.chunk_block.length = data_size;
|
||||
|
||||
/* Create the chunk it if it doesn't exist, or reallocate the chunk
|
||||
* if its size changed.
|
||||
*/
|
||||
if(H5D__chunk_file_alloc(&idx_info, &old_chunk, &udata.chunk_block, &need_insert, scaled) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk")
|
||||
if (0 == idx_info.pline->nused && H5F_addr_defined(old_chunk.offset)) {
|
||||
/* If there are no filters and we are overwriting the chunk we can just set values */
|
||||
udata.chunk_block.offset = old_chunk.offset;
|
||||
need_insert = FALSE;
|
||||
}
|
||||
else {
|
||||
/* Otherwise, create the chunk it if it doesn't exist, or reallocate the chunk
|
||||
* if its size has changed.
|
||||
*/
|
||||
if (H5D__chunk_file_alloc(&idx_info, &old_chunk, &udata.chunk_block, &need_insert, scaled) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk")
|
||||
}
|
||||
|
||||
/* Make sure the address of the chunk is returned. */
|
||||
if(!H5F_addr_defined(udata.chunk_block.offset))
|
||||
|
Loading…
Reference in New Issue
Block a user