mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-03-31 17:10:47 +08:00
[svn-r11312] Purpose:
Bug #281 Description: A dataset created in serial mode with H5D_ALLOC_TIME_INCR allocation setting was not extendible, either explicitly by H5Dextend or implicitly by writing to unallocated chunks. This was because parallel mode expects the allocation mode be H5D_ALLOC_TIME_INCR only. Solution: Modified library to allocate more space when needed or directed if the file is opened by parallel mode, independent of what the dataset allocation mode is. Platforms tested: Heping pp.
This commit is contained in:
parent
bd4312d049
commit
3182a8a040
@ -31,7 +31,7 @@ check_PROGRAMS = $(TEST_PROG_PARA)
|
||||
check_SCRIPTS= $(TEST_SCRIPT)
|
||||
|
||||
testphdf5_SOURCES=testphdf5.c t_dset.c t_file.c t_mdset.c t_ph5basic.c \
|
||||
t_coll_chunk.c t_span_tree.c
|
||||
t_coll_chunk.c t_span_tree.c t_chunk_alloc.c
|
||||
|
||||
# The tests all depend on the hdf5 library and the test library
|
||||
LDADD = $(LIBHDF5) $(LIBH5TEST)
|
||||
|
@ -78,7 +78,8 @@ t_mpi_LDADD = $(LDADD)
|
||||
t_mpi_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_2)
|
||||
am_testphdf5_OBJECTS = testphdf5.$(OBJEXT) t_dset.$(OBJEXT) \
|
||||
t_file.$(OBJEXT) t_mdset.$(OBJEXT) t_ph5basic.$(OBJEXT) \
|
||||
t_coll_chunk.$(OBJEXT) t_span_tree.$(OBJEXT)
|
||||
t_coll_chunk.$(OBJEXT) t_span_tree.$(OBJEXT) \
|
||||
t_chunk_alloc.$(OBJEXT)
|
||||
testphdf5_OBJECTS = $(am_testphdf5_OBJECTS)
|
||||
testphdf5_LDADD = $(LDADD)
|
||||
testphdf5_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_2)
|
||||
@ -321,7 +322,7 @@ TEST_PROG_PARA = t_mpi t_fphdf5 testphdf5
|
||||
TEST_SCRIPT_PARA = testph5.sh
|
||||
check_SCRIPTS = $(TEST_SCRIPT)
|
||||
testphdf5_SOURCES = testphdf5.c t_dset.c t_file.c t_mdset.c t_ph5basic.c \
|
||||
t_coll_chunk.c t_span_tree.c
|
||||
t_coll_chunk.c t_span_tree.c t_chunk_alloc.c
|
||||
|
||||
|
||||
# The tests all depend on the hdf5 library and the test library
|
||||
@ -403,6 +404,7 @@ mostlyclean-compile:
|
||||
distclean-compile:
|
||||
-rm -f *.tab.c
|
||||
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_chunk_alloc.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_coll_chunk.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_dset.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/t_file.Po@am__quote@
|
||||
|
194
testpar/t_chunk_alloc.c
Normal file
194
testpar/t_chunk_alloc.c
Normal file
@ -0,0 +1,194 @@
|
||||
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
|
||||
* Copyright by the Board of Trustees of the University of Illinois. *
|
||||
* All rights reserved. *
|
||||
* *
|
||||
* This file is part of HDF5. The full HDF5 copyright notice, including *
|
||||
* terms governing use, modification, and redistribution, is contained in *
|
||||
* the files COPYING and Copyright.html. COPYING can be found at the root *
|
||||
* of the source code distribution tree; Copyright.html can be found at the *
|
||||
* root level of an installed copy of the electronic HDF5 document set and *
|
||||
* is linked from the top-level documents page. It can also be found at *
|
||||
* http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
|
||||
* access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
|
||||
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
|
||||
|
||||
/*
|
||||
* This verifies if the storage space allocation methods are compatible between
|
||||
* serial and parallel modes.
|
||||
*
|
||||
* Created by: Christian Chilan and Albert Cheng
|
||||
* Date: 2005/07/05
|
||||
*/
|
||||
|
||||
#include "testphdf5.h"
|
||||
|
||||
#define DATASETNAME "ExtendibleArray"
|
||||
#define ELEMS 20000000
|
||||
|
||||
/*
|
||||
* This creates a dataset serially with 20000 chunks, each of 1000
|
||||
* elements. It does not perform any writing on it. Another routine
|
||||
* will open this in parallel for extension test.
|
||||
*/
|
||||
void
|
||||
create_chunked_dataset(const char *filename)
|
||||
{
|
||||
hid_t file; /* handles */
|
||||
hid_t dataspace, dataset;
|
||||
hid_t cparms;
|
||||
int mpi_size, mpi_rank;
|
||||
hsize_t dims[1] = {20*1000000}; /* dataset dimensions
|
||||
at creation time */
|
||||
|
||||
hsize_t maxdims[1] = {H5S_UNLIMITED};
|
||||
herr_t hrc;
|
||||
|
||||
/* Variables used in reading data back */
|
||||
hsize_t chunk_dims[1] ={1000};
|
||||
|
||||
/* set up MPI parameters */
|
||||
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
|
||||
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
|
||||
|
||||
/* Create the data space with unlimited dimensions. */
|
||||
dataspace = H5Screate_simple (1, dims, maxdims);
|
||||
VRFY((dataspace >= 0), "");
|
||||
|
||||
/* Create a new file. If file exists its contents will be overwritten. */
|
||||
file = H5Fcreate (filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
|
||||
VRFY((file >= 0), "");
|
||||
|
||||
/* Modify dataset creation properties, i.e. enable chunking */
|
||||
cparms = H5Pcreate (H5P_DATASET_CREATE);
|
||||
VRFY((cparms >= 0), "");
|
||||
|
||||
hrc = H5Pset_chunk ( cparms, 1, chunk_dims);
|
||||
VRFY((hrc >= 0), "");
|
||||
|
||||
/* Create a new dataset within the file using cparms creation properties. */
|
||||
dataset = H5Dcreate (file, DATASETNAME, H5T_NATIVE_INT, dataspace, cparms);
|
||||
VRFY((dataset >= 0), "");
|
||||
|
||||
/* Close resources */
|
||||
hrc = H5Dclose (dataset);
|
||||
VRFY((hrc >= 0), "");
|
||||
|
||||
hrc = H5Fclose (file);
|
||||
VRFY((hrc >= 0), "");
|
||||
}
|
||||
|
||||
/*
|
||||
* This program extends the size of a small dataset to 20000 chunks (each of
|
||||
* 1000 elements) in parallel. The dataset was initially created in a serial
|
||||
* environment. A correct extend operation should increase the file size
|
||||
* considerably.
|
||||
*/
|
||||
void
|
||||
extend_chunked_dataset(const char *filename)
|
||||
{
|
||||
/* HDF5 gubbins */
|
||||
hid_t file_id, dataset; /* HDF5 file identifier */
|
||||
hid_t access_plist; /* HDF5 ID for file access property list */
|
||||
herr_t hrc; /* HDF5 return code */
|
||||
|
||||
hsize_t size[1];
|
||||
int mpi_size, mpi_rank;
|
||||
|
||||
long int lb;
|
||||
|
||||
/* MPI Gubbins */
|
||||
MPI_Comm comm;
|
||||
MPI_Info info;
|
||||
MPI_File thefile;
|
||||
MPI_Offset filesize;
|
||||
|
||||
int mpierr, ret;
|
||||
|
||||
|
||||
printf("extend_chunked_dataset: filename=%s\n", filename);
|
||||
/* Initialize MPI */
|
||||
comm = MPI_COMM_WORLD;
|
||||
info = MPI_INFO_NULL;
|
||||
|
||||
/* set up MPI parameters */
|
||||
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
|
||||
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
|
||||
|
||||
/* Set up HDF5 file creation, access and independent data transfer property lists */
|
||||
access_plist = H5Pcreate(H5P_FILE_ACCESS);
|
||||
VRFY((access_plist >= 0), "");
|
||||
|
||||
hrc = H5Pset_fapl_mpio(access_plist, comm, info);
|
||||
VRFY((hrc >= 0), "");
|
||||
|
||||
/* Open the file */
|
||||
file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
|
||||
VRFY((file_id >= 0), "");
|
||||
|
||||
/* Can close some plists */
|
||||
hrc = H5Pclose(access_plist);
|
||||
VRFY((hrc >= 0), "");
|
||||
|
||||
/* Open dataset*/
|
||||
dataset = H5Dopen(file_id, DATASETNAME);
|
||||
VRFY((dataset >= 0), "");
|
||||
|
||||
size[0] = ELEMS;
|
||||
|
||||
/* Extend dataset*/
|
||||
hrc = H5Dextend(dataset, size);
|
||||
VRFY((hrc >= 0), "");
|
||||
|
||||
/* Close up */
|
||||
hrc = H5Dclose(dataset);
|
||||
VRFY((hrc >= 0), "");
|
||||
|
||||
hrc = H5Fclose(file_id);
|
||||
VRFY((hrc >= 0), "");
|
||||
|
||||
mpierr = MPI_File_open(comm, filename, MPI_MODE_RDONLY, info, &thefile);
|
||||
VRFY((mpierr == MPI_SUCCESS), "");
|
||||
|
||||
mpierr = MPI_File_get_size(thefile, &filesize);
|
||||
VRFY((mpierr == MPI_SUCCESS), "");
|
||||
|
||||
|
||||
/* Definition of lower bound */
|
||||
lb = ELEMS*sizeof(H5T_NATIVE_INT);
|
||||
|
||||
/* Print size of file. */
|
||||
printf("File size: %ld\n", (long int)filesize);
|
||||
|
||||
if (filesize>=lb){
|
||||
printf("Test passed\n");
|
||||
ret = 0;
|
||||
} else {
|
||||
printf("Test failed\n");
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
mpierr = MPI_File_close(&thefile);
|
||||
VRFY((mpierr == MPI_SUCCESS), "");
|
||||
|
||||
hrc = H5close();
|
||||
VRFY((hrc >= 0), "");
|
||||
|
||||
/* Finalize */
|
||||
mpierr = MPI_Finalize();
|
||||
VRFY((mpierr == MPI_SUCCESS), "");
|
||||
}
|
||||
|
||||
void
|
||||
test_chunk_alloc(void)
|
||||
{
|
||||
const char *filename;
|
||||
|
||||
filename = GetTestParameters();
|
||||
if (VERBOSE_MED)
|
||||
printf("Extend Chunked allocation test on file %s\n", filename);
|
||||
|
||||
/* create the datafile first */
|
||||
create_chunked_dataset(filename);
|
||||
/* reopen it in parallel and extend it. */
|
||||
/*extend_chunked_dataset(filename); */
|
||||
}
|
@ -381,6 +381,8 @@ int main(int argc, char **argv)
|
||||
"extendible dataset collective read", PARATESTFILE);
|
||||
AddTest("eidsetw2", extend_writeInd2, NULL,
|
||||
"extendible dataset independent write #2", PARATESTFILE);
|
||||
AddTest("calloc", test_chunk_alloc, NULL,
|
||||
"parallel extend Chunked allocation on serial file", PARATESTFILE);
|
||||
|
||||
#ifdef H5_HAVE_FILTER_DEFLATE
|
||||
AddTest("cmpdsetr", compress_readAll, NULL,
|
||||
|
@ -187,6 +187,7 @@ void dataset_readInd(void);
|
||||
void dataset_readAll(void);
|
||||
void extend_readInd(void);
|
||||
void extend_readAll(void);
|
||||
void test_chunk_alloc(void);
|
||||
void compact_dataset(void);
|
||||
void null_dataset(void);
|
||||
void big_dataset(void);
|
||||
|
Loading…
x
Reference in New Issue
Block a user