mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-03-01 16:28:09 +08:00
[svn-r8867] Purpose:
Bug fix Description: Fix error in chunked dataset I/O where data written out wasn't read correctly from a chunked, extendible dataset after the dataset was extended. Also, fix parallel I/O tests to gather error results from all processes, in order to detect errors that only occur on one process. Solution: Bypass chunk cache for reads as well as writes, if parallel I/O driver is used and file is opened for writing. Platforms tested: FreeBSD 4.10 (sleipnir) w/parallel Too minor to require h5committest
This commit is contained in:
parent
0a8d8c54b2
commit
803bb3e532
@ -142,18 +142,20 @@ Bug Fixes since HDF5-1.6.0 release
|
||||
|
||||
Library
|
||||
-------
|
||||
- Fixed bug where incorrect data could be read from a chunked dataset
|
||||
after it was extended. QAK - 2004/07/12
|
||||
- Fixed failure to read data back from file of compound type with
|
||||
variable-length string as field. SLU - 2004/06/10
|
||||
variable-length string as field. SLU - 2004/06/10
|
||||
- Fixed potential file corruption bug when a block of metadata could
|
||||
overlap the end of the internal metadata accumulator buffer and
|
||||
the buffer would be extended correctly, but would incorrectly
|
||||
change it's starting address. QAK - 2004/06/09
|
||||
overlap the end of the internal metadata accumulator buffer and
|
||||
the buffer would be extended correctly, but would incorrectly
|
||||
change it's starting address. QAK - 2004/06/09
|
||||
- Opaque datatype with no tag failed for some operations. Fixed.
|
||||
SLU - 2004/6/3
|
||||
- Fixed potential file corruption bug where dimensions that were
|
||||
too large (a value greater than could be represented in 32-bits)
|
||||
could cause the incorrect amount of space to be allocated in a
|
||||
file for the raw data for the dataset. QAK - 2004/06/01
|
||||
too large (a value greater than could be represented in 32-bits)
|
||||
could cause the incorrect amount of space to be allocated in a
|
||||
file for the raw data for the dataset. QAK - 2004/06/01
|
||||
- Fixed dtypes "sw long double -> double" failure in QSC class
|
||||
machines. AKC - 2004/4/16
|
||||
- Fixed problem with fletcher32 filter when converting data of different
|
||||
|
@ -1860,10 +1860,20 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
|
||||
* If the chunk is too large to load into the cache and it has no
|
||||
* filters in the pipeline (i.e. not compressed) and if the address
|
||||
* for the chunk has been defined, then don't load the chunk into the
|
||||
* cache, just write the data to it directly.
|
||||
* cache, just read the data from it directly.
|
||||
*
|
||||
* If MPI based VFD is used, must bypass the
|
||||
* chunk-cache scheme because other MPI processes could be
|
||||
* writing to other elements in the same chunk. Do a direct
|
||||
* read-through of only the elements requested.
|
||||
*/
|
||||
if (dset->layout.u.chunk.size>dset->cache.chunk.nbytes && dset->dcpl_cache.pline.nused==0 &&
|
||||
chunk_addr!=HADDR_UNDEF) {
|
||||
if ((dset->layout.u.chunk.size>dset->cache.chunk.nbytes && dset->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF)
|
||||
|| (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & H5F_get_intent(f)))) {
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
/* Additional sanity check when operating in parallel */
|
||||
if (chunk_addr==HADDR_UNDEF || dset->dcpl_cache.pline.nused>0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk");
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
if ((ret_value=H5D_contig_readvv(f, dxpl_id, dset, chunk_addr, (hsize_t)dset->layout.u.chunk.size, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "unable to read raw data to file");
|
||||
} /* end if */
|
||||
|
170
testpar/t_dset.c
170
testpar/t_dset.c
@ -12,8 +12,6 @@
|
||||
* access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
|
||||
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
|
||||
|
||||
/* $Id$ */
|
||||
|
||||
/*
|
||||
* Parallel tests for datasets
|
||||
*/
|
||||
@ -1301,6 +1299,174 @@ extend_writeInd(void)
|
||||
if (data_array1) free(data_array1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Example of using the parallel HDF5 library to create an extendable dataset
|
||||
* and perform I/O on it in a way that verifies that the chunk cache is
|
||||
* bypassed for parallel I/O.
|
||||
*/
|
||||
|
||||
void
|
||||
extend_writeInd2(void)
|
||||
{
|
||||
char *filename;
|
||||
hid_t fid; /* HDF5 file ID */
|
||||
hid_t fapl; /* File access templates */
|
||||
hid_t fs; /* File dataspace ID */
|
||||
hid_t ms; /* Memory dataspace ID */
|
||||
hid_t dataset; /* Dataset ID */
|
||||
hbool_t use_gpfs = FALSE; /* Use GPFS hints */
|
||||
hsize_t orig_size=10; /* Original dataset dim size */
|
||||
hsize_t new_size=20; /* Extended dataset dim size */
|
||||
hsize_t one=1;
|
||||
hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
|
||||
hsize_t chunk_size = 16384; /* chunk size */
|
||||
hid_t dcpl; /* dataset create prop. list */
|
||||
int written[10], /* Data to write */
|
||||
retrieved[10]; /* Data read in */
|
||||
int mpi_size, mpi_rank; /* MPI settings */
|
||||
int i; /* Local index variable */
|
||||
herr_t ret; /* Generic return value */
|
||||
|
||||
filename = (char *) GetTestParameters();
|
||||
if (VERBOSE_MED)
|
||||
printf("Extend independent write test #2 on file %s\n", filename);
|
||||
|
||||
/* set up MPI parameters */
|
||||
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
|
||||
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
|
||||
|
||||
/* -------------------
|
||||
* START AN HDF5 FILE
|
||||
* -------------------*/
|
||||
/* setup file access template */
|
||||
fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type, use_gpfs);
|
||||
VRFY((fapl >= 0), "create_faccess_plist succeeded");
|
||||
|
||||
/* create the file collectively */
|
||||
fid=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,fapl);
|
||||
VRFY((fid >= 0), "H5Fcreate succeeded");
|
||||
|
||||
/* Release file-access template */
|
||||
ret=H5Pclose(fapl);
|
||||
VRFY((ret >= 0), "H5Pclose succeeded");
|
||||
|
||||
|
||||
/* --------------------------------------------------------------
|
||||
* Define the dimensions of the overall datasets and create them.
|
||||
* ------------------------------------------------------------- */
|
||||
|
||||
/* set up dataset storage chunk sizes and creation property list */
|
||||
dcpl = H5Pcreate(H5P_DATASET_CREATE);
|
||||
VRFY((dcpl >= 0), "H5Pcreate succeeded");
|
||||
ret = H5Pset_chunk(dcpl, 1, &chunk_size);
|
||||
VRFY((ret >= 0), "H5Pset_chunk succeeded");
|
||||
|
||||
/* setup dimensionality object */
|
||||
fs = H5Screate_simple (1, &orig_size, &max_size);
|
||||
VRFY((fs >= 0), "H5Screate_simple succeeded");
|
||||
|
||||
/* create an extendible dataset collectively */
|
||||
dataset = H5Dcreate(fid, DATASETNAME1, H5T_NATIVE_INT, fs, dcpl);
|
||||
VRFY((dataset >= 0), "H5Dcreate succeeded");
|
||||
|
||||
/* release resource */
|
||||
ret=H5Pclose(dcpl);
|
||||
VRFY((ret >= 0), "H5Pclose succeeded");
|
||||
|
||||
|
||||
/* -------------------------
|
||||
* Test writing to dataset
|
||||
* -------------------------*/
|
||||
/* create a memory dataspace independently */
|
||||
ms = H5Screate_simple(1, &orig_size, &max_size);
|
||||
VRFY((ms >= 0), "H5Screate_simple succeeded");
|
||||
|
||||
/* put some trivial data in the data_array */
|
||||
for (i=0; i<(int)orig_size; i++)
|
||||
written[i] = i;
|
||||
MESG("data array initialized");
|
||||
if (VERBOSE_MED) {
|
||||
MESG("writing at offset zero: ");
|
||||
for (i=0; i<(int)orig_size; i++)
|
||||
printf("%s%d", i?", ":"", written[i]);
|
||||
printf("\n");
|
||||
}
|
||||
ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
|
||||
VRFY((ret >= 0), "H5Dwrite succeeded");
|
||||
|
||||
/* -------------------------
|
||||
* Read initial data from dataset.
|
||||
* -------------------------*/
|
||||
ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
|
||||
VRFY((ret >= 0), "H5Dread succeeded");
|
||||
for (i=0; i<(int)orig_size; i++)
|
||||
if(written[i]!=retrieved[i]) {
|
||||
printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
|
||||
i,written[i], i,retrieved[i]);
|
||||
nerrors++;
|
||||
}
|
||||
if (VERBOSE_MED){
|
||||
MESG("read at offset zero: ");
|
||||
for (i=0; i<(int)orig_size; i++)
|
||||
printf("%s%d", i?", ":"", retrieved[i]);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
/* -------------------------
|
||||
* Extend the dataset & retrieve new dataspace
|
||||
* -------------------------*/
|
||||
ret =H5Dextend(dataset, &new_size);
|
||||
VRFY((ret >= 0), "H5Dextend succeeded");
|
||||
ret=H5Sclose(fs);
|
||||
VRFY((ret >= 0), "H5Sclose succeeded");
|
||||
fs = H5Dget_space(dataset);
|
||||
VRFY((fs >= 0), "H5Dget_space succeeded");
|
||||
|
||||
/* -------------------------
|
||||
* Write to the second half of the dataset
|
||||
* -------------------------*/
|
||||
for (i=0; i<(int)orig_size; i++)
|
||||
written[i] = orig_size + i;
|
||||
MESG("data array re-initialized");
|
||||
if (VERBOSE_MED) {
|
||||
MESG("writing at offset 10: ");
|
||||
for (i=0; i<(int)orig_size; i++)
|
||||
printf("%s%d", i?", ":"", written[i]);
|
||||
printf("\n");
|
||||
}
|
||||
ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, (hssize_t *)&orig_size, NULL, &one, &orig_size);
|
||||
VRFY((ret >= 0), "H5Sselect_hyperslab succeeded");
|
||||
ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
|
||||
VRFY((ret >= 0), "H5Dwrite succeeded");
|
||||
|
||||
/* -------------------------
|
||||
* Read the new data
|
||||
* -------------------------*/
|
||||
ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
|
||||
VRFY((ret >= 0), "H5Dread succeeded");
|
||||
for (i=0; i<(int)orig_size; i++)
|
||||
if(written[i]!=retrieved[i]) {
|
||||
printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
|
||||
i,written[i], i,retrieved[i]);
|
||||
nerrors++;
|
||||
}
|
||||
if (VERBOSE_MED){
|
||||
MESG("read at offset 10: ");
|
||||
for (i=0; i<(int)orig_size; i++)
|
||||
printf("%s%d", i?", ":"", retrieved[i]);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
|
||||
/* Close dataset collectively */
|
||||
ret=H5Dclose(dataset);
|
||||
VRFY((ret >= 0), "H5Dclose succeeded");
|
||||
|
||||
/* Close the file collectively */
|
||||
ret = H5Fclose(fid);
|
||||
VRFY((ret >= 0), "H5Fclose succeeded");
|
||||
}
|
||||
|
||||
/* Example of using the parallel HDF5 library to read an extendible dataset */
|
||||
void
|
||||
extend_readInd(void)
|
||||
|
@ -12,8 +12,6 @@
|
||||
* access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
|
||||
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
|
||||
|
||||
/* $Id$ */
|
||||
|
||||
/*
|
||||
* Main driver of the Parallel HDF5 tests
|
||||
*/
|
||||
@ -434,6 +432,8 @@ int main(int argc, char **argv)
|
||||
"dataset collective write", filenames[1]);
|
||||
AddTest("indwriteext", extend_writeInd, NULL,
|
||||
"extendible dataset independent write", filenames[2]);
|
||||
AddTest("indwriteext2", extend_writeInd2, NULL,
|
||||
"extendible dataset independent write #2", filenames[2]);
|
||||
AddTest("collwriteext", extend_writeAll, NULL,
|
||||
"extendible dataset collective write", filenames[2]);
|
||||
|
||||
@ -482,7 +482,7 @@ int main(int argc, char **argv)
|
||||
if (CleanUp && !getenv("HDF5_NOCLEANUP"))
|
||||
TestCleanup();
|
||||
|
||||
nerrors = GetTestNumErrs();
|
||||
nerrors += GetTestNumErrs();
|
||||
|
||||
}
|
||||
|
||||
@ -526,6 +526,9 @@ int main(int argc, char **argv)
|
||||
MPI_BANNER("extendible dataset independent write...");
|
||||
extend_writeInd(filenames[2]);
|
||||
|
||||
MPI_BANNER("extendible dataset independent write #2...");
|
||||
extend_writeInd2(filenames[2]);
|
||||
|
||||
MPI_BANNER("extendible dataset collective write...");
|
||||
extend_writeAll(filenames[2]);
|
||||
}
|
||||
@ -600,22 +603,28 @@ finish:
|
||||
* and exit.
|
||||
*/
|
||||
MPI_Barrier(MPI_COMM_WORLD);
|
||||
|
||||
/* Gather errors from all processes */
|
||||
{
|
||||
int temp;
|
||||
MPI_Reduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
|
||||
if(mpi_rank==0)
|
||||
nerrors=temp;
|
||||
}
|
||||
|
||||
if (MAINPROCESS){ /* only process 0 reports */
|
||||
printf("===================================\n");
|
||||
if (nerrors){
|
||||
if (nerrors)
|
||||
printf("***PHDF5 tests detected %d errors***\n", nerrors);
|
||||
}
|
||||
else{
|
||||
else
|
||||
printf("PHDF5 tests finished with no errors\n");
|
||||
}
|
||||
printf("===================================\n");
|
||||
}
|
||||
if (dowrite){
|
||||
if (dowrite)
|
||||
h5_cleanup(FILENAME, fapl);
|
||||
} else {
|
||||
else
|
||||
/* h5_cleanup would have closed fapl. Now must do it explicitedly */
|
||||
H5Pclose(fapl);
|
||||
}
|
||||
|
||||
/* close HDF5 library */
|
||||
H5close();
|
||||
|
@ -12,8 +12,6 @@
|
||||
* access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
|
||||
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
|
||||
|
||||
/* $Id$ */
|
||||
|
||||
#ifndef PHDF5TEST_H
|
||||
#define PHDF5TEST_H
|
||||
|
||||
@ -153,6 +151,7 @@ void test_split_comm_access(void);
|
||||
void dataset_writeInd(void);
|
||||
void dataset_writeAll(void);
|
||||
void extend_writeInd(void);
|
||||
void extend_writeInd2(void);
|
||||
void extend_writeAll(void);
|
||||
void dataset_readInd(void);
|
||||
void dataset_readAll(void);
|
||||
|
Loading…
Reference in New Issue
Block a user