[svn-r8590] Purpose:

Code optimization & bug fix

Description:
    When dimension information is being stored in the storage layout message
on disk, it is stored as 32-bit quantities, possibly truncating the dimension
information, if a dimension is greater than 32-bits in size.

Solution:
    Fix the storage layout message problem by revising file format to not store
dimension information, since it is already available in the dataspace.

    Also revise the storage layout data structures to be more compartmentalized
for the information for contiguous, chunked and compact storage.

Platforms tested:
    FreeBSD 4.9 (sleipnir) w/parallel
    Solaris 2.7 (arabica)
    h5committest
This commit is contained in:
Quincey Koziol 2004-05-27 15:24:08 -05:00
parent 71fba25e6e
commit 2ce06c3912
32 changed files with 1363 additions and 596 deletions

View File

@ -782,6 +782,7 @@
./src/H5Dprivate.h
./src/H5Dpublic.h
./src/H5Dpkg.h
./src/H5Dtest.c
./src/H5Ztrans.c
./src/H5E.c
./src/H5Edefin.h
@ -997,6 +998,7 @@
./test/gen_old_array.c _DO_NOT_DISTRIBUTE_
./test/gen_new_array.c _DO_NOT_DISTRIBUTE_
./test/gen_new_fill.c _DO_NOT_DISTRIBUTE_
./test/gen_old_layout.c _DO_NOT_DISTRIBUTE_
./test/gen_old_mtime.c _DO_NOT_DISTRIBUTE_
./test/gen_new_mtime.c _DO_NOT_DISTRIBUTE_
./test/gen_new_super.c _DO_NOT_DISTRIBUTE_
@ -1022,6 +1024,7 @@
./test/th5s.h5
./test/theap.c
./test/titerate.c
./test/tlayouto.h5
./test/tmeta.c
./test/tmisc.c
./test/tmtimen.h5

View File

@ -130,8 +130,7 @@ TABLE.list TD { border:none; }
<li><a href="#AttributeMessage">Name: Attribute</a> <!-- 0x000c -->
<li><a href="#CommentMessage">Name: Object Comment</a> <!-- 0x000d -->
<li><a href="#OldModifiedMessage">Name: Object Modification Date and Time (Old)</a> <!-- 0x000e -->
<!-- <li><a href="#SharedMessage">Name: Shared Object Message</a> --> <!-- 0x000f -->
<li><a href="#ReservedMessage_000F">Name: Reserved - not assigned yet</a> <!-- 0x000f -->
<li><a href="#SharedMessage">Name: Shared Object Message</a> <!-- 0x000f -->
<li><a href="#ContinuationMessage">Name: Object Header Continuation</a> <!-- 0x0010 -->
<li><a href="#SymbolTableMessage">Name: Group Message</a> <!-- 0x0011 -->
<li><a href="#ModifiedMessage">Name: Object Modification Date and Time</a> <!-- 0x0012 -->
@ -4729,14 +4728,17 @@ TABLE.list TD { border:none; }
addresses.
<li>The array can be stored in one contiguous block, as part of
this object header message.
this object header message (this is called "compact" storage below).
</ol>
<P>Version 3 of this message re-structured the format into specific
properties that are required for each layout class.
<p>
<center>
<table border cellpadding=4 width="80%">
<caption align=top>
<B>Data Layout Message</B>
<B>Data Layout Message, Versions 1 and 2</B>
</caption>
<tr align=center>
@ -4850,6 +4852,233 @@ TABLE.list TD { border:none; }
</table>
</center>
<p>
<center>
<table border cellpadding=4 width="80%">
<caption align=top>
<B>Data Layout Message, Version 3</B>
</caption>
<tr align=center>
<th width="25%">byte</th>
<th width="25%">byte</th>
<th width="25%">byte</th>
<th width="25%">byte</th>
</tr>
<tr align=center>
<td>Version</td>
<td>Layout Class</td>
</tr>
<tr align=center>
<td colspan=4>Properties</td>
</tr>
</table>
</center>
<p>
<center>
<table align=center width="80%">
<tr align=left>
<th width="30%"><U><font size=+1>Field Name</font></U></th>
<th><U><font size=+1>Description</font></U></th>
</tr>
<tr valign=top>
<td>Version</td>
<td>A version number for the layout message. This value can be
either 1, 2 or 3.</td>
</tr>
<tr valign=top>
<td>Layout Class</td>
<td>The layout class specifies how the other fields of the
layout message are to be interpreted. A value of one
indicates contiguous storage, a value of two
indicates chunked storage,
while a value of three
indicates compact storage.</td>
</tr>
<tr valign=top>
<td>Properties</td>
<td>This variable-sized field encodes information specific to each
layout class and is described below. If there is no property
information specified for a layout class, the size of this field
is zero bytes.</td>
</tr>
</table>
</center>
<P>Class-specific information for contiguous layout (Class 0):
<p>
<center>
<table border cellpadding=4 width="80%">
<caption align=top>
<B>Property Descriptions</B>
</caption>
<tr align=center>
<th width="25%">byte</th>
<th width="25%">byte</th>
<th width="25%">byte</th>
<th width="25%">byte</th>
</tr>
<tr align=center>
<td colspan=4><br>Address<br><br></td>
</tr>
<tr align=center>
<td colspan=4><br>Size<br><br></td>
</tr>
</table>
</center>
<p>
<center>
<table align=center width="80%">
<tr align=left>
<th width="30%"><U><font size=+1>Field Name</font></U></th>
<th><U><font size=+1>Description</font></U></th>
</tr>
<tr valign=top>
<td>Address</td>
<td>This is the address of the first byte of raw data storage.
The address may have the "undefined address" value, to indicate
that storage has not yet been allocated for this array.</td>
</tr>
<tr valign=top>
<td>Size</td>
<td>This field contains the size allocated to store the raw data.</td>
</table>
</center>
<P>Class-specific information for chunked layout (Class 1):
<p>
<center>
<table border cellpadding=4 width="80%">
<caption align=top>
<B>Property Descriptions</B>
</caption>
<tr align=center>
<th width="25%">byte</th>
<th width="25%">byte</th>
<th width="25%">byte</th>
<th width="25%">byte</th>
</tr>
<tr align=center>
<td>Dimensionality</td>
</tr>
<tr align=center>
<td colspan=4><br>Address<br><br></td>
</tr>
<tr align=center>
<td colspan=4>Dimension 0 (4-bytes)</td>
</tr>
<tr align=center>
<td colspan=4>Dimension 1 (4-bytes)</td>
</tr>
<tr align=center>
<td colspan=4>...</td>
</tr>
</table>
</center>
<p>
<center>
<table align=center width="80%">
<tr align=left>
<th width="30%"><U><font size=+1>Field Name</font></U></th>
<th><U><font size=+1>Description</font></U></th>
</tr>
<tr valign=top>
<td>Dimensionality</td>
<td>A chunk has a fixed dimensionality. This field
specifies the number of dimension size fields later in the
message.</td>
</tr>
<tr valign=top>
<td>Address</td>
<td>This is the address
of the B-tree that is used to look up the addresses of the
chunks.
The address
may have the "undefined address" value, to indicate that
storage has not yet been allocated for this array.</td>
</tr>
<tr valign=top>
<td>Dimensions</td>
<td>The dimension sizes define the size of a single chunk.</td>
</tr>
</table>
</center>
<P>Class-specific information for compact layout (Class 2):
<p>
<center>
<table border cellpadding=4 width="80%">
<caption align=top>
<B>Property Descriptions</B>
</caption>
<tr align=center>
<th width="25%">byte</th>
<th width="25%">byte</th>
<th width="25%">byte</th>
<th width="25%">byte</th>
</tr>
<tr align=center>
<td colspan=2>Size</td>
</tr>
<tr align=center>
<td colspan=4>Raw Data</td>
</tr>
<tr align=center>
<td colspan=4>...</td>
</tr>
</table>
</center>
<p>
<center>
<table align=center width="80%">
<tr align=left>
<th width="30%"><U><font size=+1>Field Name</font></U></th>
<th><U><font size=+1>Description</font></U></th>
</tr>
<tr valign=top>
<td>Size</td>
<td>This field contains the size of the raw data for the dataset array.</td>
<tr valign=top>
<td>Raw Data</td>
<td>This field contains the raw data for the dataset array.</td>
</tr>
</table>
</center>
<hr>
<h4><a name="ReservedMessage_0009">Name: Reserved - Not Assigned Yet</a></h4>
@ -5348,7 +5577,6 @@ TABLE.list TD { border:none; }
</table>
</center>
<!--
<hr>
<h4><a name="SharedMessage">Name: Shared Object Message</a></h4>
<b>Header Message Type:</b> 0x000F<br>
@ -5444,20 +5672,8 @@ TABLE.list TD { border:none; }
</tr>
</table>
</center>
-->
<hr>
<h4><a name="ReservedMessage_000F">Name: Reserved - Not Assigned Yet</a></h4>
<b>Header Message Type:</b> 0x000F<BR>
<b>Length:</b> N/A<BR>
<b>Status:</b> N/A<BR>
<b>Format of Data:</b> N/A<BR>
<p><b>Purpose and Description:</b> This message type was skipped during
the initial specification of the file format and may be used in a
future expansion to the format.
<hr>
<h4><a name="ContinuationMessage">Name: Object Header Continuation</a></h4>
<b>Header Message Type:</b> 0x0010<BR>

View File

@ -269,7 +269,7 @@ H5A_create(const H5G_entry_t *ent, const char *name, const H5T_t *type,
assert(attr->dt_size>0);
attr->ds_size=H5O_raw_size(H5O_SDSPACE_ID,attr->ent.file,&(space->extent));
assert(attr->ds_size>0);
H5_ASSIGN_OVERFLOW(attr->data_size,H5S_get_simple_extent_npoints(attr->ds)*H5T_get_size(attr->dt),hssize_t,size_t);
H5_ASSIGN_OVERFLOW(attr->data_size,H5S_GET_SIMPLE_EXTENT_NPOINTS(attr->ds)*H5T_get_size(attr->dt),hssize_t,size_t);
/* Hold the symbol table entry (and file) open */
if (H5O_open(&(attr->ent)) < 0)
@ -629,7 +629,7 @@ H5A_write(H5A_t *attr, const H5T_t *mem_type, const void *buf, hid_t dxpl_id)
assert(buf);
/* Create buffer for data to store on disk */
if((snelmts=H5S_get_simple_extent_npoints (attr->ds))<0)
if((snelmts=H5S_GET_SIMPLE_EXTENT_NPOINTS(attr->ds))<0)
HGOTO_ERROR (H5E_ATTR, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
nelmts=(hsize_t)snelmts;
@ -778,7 +778,7 @@ H5A_read(const H5A_t *attr, const H5T_t *mem_type, void *buf, hid_t dxpl_id)
assert(buf);
/* Create buffer for data to store on disk */
if((snelmts=H5S_get_simple_extent_npoints (attr->ds))<0)
if((snelmts=H5S_GET_SIMPLE_EXTENT_NPOINTS(attr->ds))<0)
HGOTO_ERROR (H5E_ATTR, H5E_CANTCOUNT, FAIL, "dataspace is invalid")
nelmts=(hsize_t)snelmts;

242
src/H5D.c
View File

@ -189,7 +189,7 @@ H5D_init_interface(void)
H5P_genclass_t *crt_pclass;
H5D_layout_t layout = H5D_CRT_LAYOUT_DEF;
unsigned chunk_ndims = H5D_CRT_CHUNK_DIM_DEF;
hsize_t chunk_size[H5O_LAYOUT_NDIMS] = H5D_CRT_CHUNK_SIZE_DEF;
size_t chunk_size[H5O_LAYOUT_NDIMS] = H5D_CRT_CHUNK_SIZE_DEF;
H5O_fill_t fill = H5D_CRT_FILL_VALUE_DEF;
H5D_alloc_time_t alloc_time = H5D_CRT_ALLOC_TIME_DEF;
H5D_fill_time_t fill_time = H5D_CRT_FILL_TIME_DEF;
@ -1443,7 +1443,7 @@ H5D_get_space_status(const H5D_t *dset, H5D_space_status_t *allocation, hid_t dx
assert(space);
/* Get the total number of elements in dataset's dataspace */
if((total_elem=H5S_get_simple_extent_npoints(space))<0)
if((total_elem=H5S_GET_SIMPLE_EXTENT_NPOINTS(space))<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTCOUNT, FAIL, "unable to get # of dataspace elements")
/* Get the size of the dataset's datatype */
@ -1788,7 +1788,7 @@ H5D_update_entry_info(H5F_t *file, hid_t dxpl_id, H5D_t *dset, H5P_genplist_t *p
/* Add the dataset's raw data size to the size of the header, if the raw data will be stored as compact */
if (layout->type == H5D_COMPACT)
ohdr_size += layout->size;
ohdr_size += layout->u.compact.size;
/* Create (open for write access) an object header */
if (H5O_create(file, dxpl_id, ohdr_size, ent) < 0)
@ -2031,10 +2031,8 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
H5D_t *new_dset = NULL;
int i, ndims;
unsigned u;
hsize_t max_dim[H5O_LAYOUT_NDIMS]={0};
H5F_t *file=NULL;
unsigned chunk_ndims = 0; /* Dimensionality of chunk */
hsize_t chunk_size[H5O_LAYOUT_NDIMS]={0};
H5P_genplist_t *dc_plist=NULL; /* New Property list */
hbool_t has_vl_type=FALSE; /* Flag to indicate a VL-type for dataset */
H5D_t *ret_value; /* Return value */
@ -2159,80 +2157,88 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
/* Check if this dataset is going into a parallel file and set space allocation time */
if(IS_H5FD_MPI(file))
new_dset->alloc_time=H5D_ALLOC_TIME_EARLY;
/* Set up layout information */
if((ndims=H5S_GET_SIMPLE_EXTENT_NDIMS(new_dset->space))<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "unable to get rank")
new_dset->layout.ndims = (unsigned)ndims + 1;
assert((unsigned)(new_dset->layout.ndims) <= NELMTS(new_dset->layout.dim));
new_dset->layout.dim[new_dset->layout.ndims-1] = H5T_get_size(new_dset->type);
new_dset->layout.addr = HADDR_UNDEF; /* Initialize to no address */
switch (new_dset->layout.type) {
case H5D_CONTIGUOUS:
/*
* The maximum size of the dataset cannot exceed the storage size.
* Also, only the slowest varying dimension of a simple data space
* can be extendible.
*/
if ((ndims=H5S_get_simple_extent_dims(new_dset->space, new_dset->layout.dim, max_dim))<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize contiguous storage")
for (i=1; i<ndims; i++) {
if (max_dim[i]>new_dset->layout.dim[i])
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "only the first dimension can be extendible")
}
if (new_dset->efl.nused>0) {
hsize_t max_points = H5S_get_npoints_max (new_dset->space);
hsize_t max_storage = H5O_efl_total_size (&new_dset->efl);
{
hssize_t tmp_size; /* Temporary holder for raw data size */
hsize_t dim[H5O_LAYOUT_NDIMS]; /* Current size of data in elements */
hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */
if (H5S_UNLIMITED==max_points) {
if (H5O_EFL_UNLIMITED!=max_storage)
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "unlimited data space but finite storage")
} else if (max_points * H5T_get_size (type) < max_points) {
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "data space * type size overflowed")
} else if (max_points * H5T_get_size (type) > max_storage) {
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "data space size exceeds external storage size")
/*
* The maximum size of the dataset cannot exceed the storage size.
* Also, only the slowest varying dimension of a simple data space
* can be extendible (currently only for external data storage).
*/
new_dset->layout.u.contig.addr = HADDR_UNDEF; /* Initialize to no address */
if ((ndims=H5S_get_simple_extent_dims(new_dset->space, dim, max_dim))<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize contiguous storage")
for (i=1; i<ndims; i++)
if (max_dim[i]>dim[i])
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "only the first dimension can be extendible")
if (new_dset->efl.nused>0) {
hsize_t max_points = H5S_get_npoints_max (new_dset->space);
hsize_t max_storage = H5O_efl_total_size (&new_dset->efl);
if (H5S_UNLIMITED==max_points) {
if (H5O_EFL_UNLIMITED!=max_storage)
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "unlimited data space but finite storage")
} else if (max_points * H5T_get_size (type) < max_points) {
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "data space * type size overflowed")
} else if (max_points * H5T_get_size (type) > max_storage) {
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "data space size exceeds external storage size")
}
} else if (ndims>0 && max_dim[0]>dim[0]) {
HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, NULL, "extendible contiguous non-external dataset")
}
} else if (ndims>0 && max_dim[0]>new_dset->layout.dim[0]) {
HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, NULL, "extendible contiguous non-external dataset")
}
/* Compute the total size of a chunk */
for (u=1, new_dset->layout.chunk_size=new_dset->layout.dim[0]; u<new_dset->layout.ndims; u++)
new_dset->layout.chunk_size *= new_dset->layout.dim[u];
/* Compute the total size of a chunk */
tmp_size = H5S_GET_SIMPLE_EXTENT_NPOINTS(new_dset->space) *
H5T_get_size(new_dset->type);
H5_ASSIGN_OVERFLOW(new_dset->layout.u.contig.size,tmp_size,hssize_t,hsize_t);
} /* end case */
break;
case H5D_CHUNKED:
/*
* Chunked storage allows any type of data space extension, so we
* don't even bother checking.
*/
if(chunk_ndims != (unsigned)H5S_GET_SIMPLE_EXTENT_NDIMS(new_dset->space))
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "dimensionality of chunks doesn't match the data space")
if (new_dset->efl.nused>0)
HGOTO_ERROR (H5E_DATASET, H5E_BADVALUE, NULL, "external storage not supported with chunked layout")
{
hsize_t max_dim[H5O_LAYOUT_NDIMS]; /* Maximum size of data in elements */
/*
* The chunk size of a dimension with a fixed size cannot exceed
* the maximum dimension size
*/
if(H5P_get(dc_plist, H5D_CRT_CHUNK_SIZE_NAME, chunk_size) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve chunk size")
/* Set up layout information */
if((ndims=H5S_GET_SIMPLE_EXTENT_NDIMS(new_dset->space))<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, NULL, "unable to get rank")
new_dset->layout.u.chunk.ndims = (unsigned)ndims + 1;
assert((unsigned)(new_dset->layout.u.chunk.ndims) <= NELMTS(new_dset->layout.u.chunk.dim));
if (H5S_get_simple_extent_dims(new_dset->space, NULL, max_dim)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to query maximum dimensions")
for (u=0; u<new_dset->layout.ndims-1; u++) {
if(max_dim[u] != H5S_UNLIMITED && max_dim[u] < chunk_size[u])
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "chunk size must be <= maximum dimension size for fixed-sized dimensions")
}
new_dset->layout.u.chunk.addr = HADDR_UNDEF; /* Initialize to no address */
/*
* Chunked storage allows any type of data space extension, so we
* don't even bother checking.
*/
if(chunk_ndims != ndims)
HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, NULL, "dimensionality of chunks doesn't match the data space")
if (new_dset->efl.nused>0)
HGOTO_ERROR (H5E_DATASET, H5E_BADVALUE, NULL, "external storage not supported with chunked layout")
/* Set the dataset's chunk sizes from the property list's chunk sizes */
for (u=0; u<new_dset->layout.ndims-1; u++)
new_dset->layout.dim[u] = chunk_size[u];
/*
* The chunk size of a dimension with a fixed size cannot exceed
* the maximum dimension size
*/
if(H5P_get(dc_plist, H5D_CRT_CHUNK_SIZE_NAME, new_dset->layout.u.chunk.dim) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't retrieve chunk size")
new_dset->layout.u.chunk.dim[new_dset->layout.u.chunk.ndims-1] = H5T_get_size(new_dset->type);
/* Compute the total size of a chunk */
for (u=1, new_dset->layout.chunk_size=new_dset->layout.dim[0]; u<new_dset->layout.ndims; u++)
new_dset->layout.chunk_size *= new_dset->layout.dim[u];
if (H5S_get_simple_extent_dims(new_dset->space, NULL, max_dim)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to query maximum dimensions")
for (u=0; u<new_dset->layout.u.chunk.ndims-1; u++)
if(max_dim[u] != H5S_UNLIMITED && max_dim[u] < new_dset->layout.u.chunk.dim[u])
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL, "chunk size must be <= maximum dimension size for fixed-sized dimensions")
/* Compute the total size of a chunk */
for (u=1, new_dset->layout.u.chunk.size=new_dset->layout.u.chunk.dim[0]; u<new_dset->layout.u.chunk.ndims; u++)
new_dset->layout.u.chunk.size *= new_dset->layout.u.chunk.dim[u];
} /* end case */
break;
case H5D_COMPACT:
@ -2244,19 +2250,17 @@ H5D_create(H5G_entry_t *loc, const char *name, hid_t type_id, const H5S_t *space
* Compact dataset is stored in dataset object header message of
* layout.
*/
tmp_size = H5S_get_simple_extent_npoints(space) *
tmp_size = H5S_GET_SIMPLE_EXTENT_NPOINTS(space) *
H5T_get_size(new_dset->type);
H5_ASSIGN_OVERFLOW(new_dset->layout.size,tmp_size,hssize_t,size_t);
H5_ASSIGN_OVERFLOW(new_dset->layout.u.compact.size,tmp_size,hssize_t,size_t);
/* Verify data size is smaller than maximum header message size
* (64KB) minus other layout message fields.
*/
comp_data_size=H5O_MAX_SIZE-H5O_layout_meta_size(file, &(new_dset->layout));
if(new_dset->layout.size > comp_data_size)
if(new_dset->layout.u.compact.size > comp_data_size)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "compact dataset size is bigger than header message maximum size")
if (H5S_get_simple_extent_dims(space, new_dset->layout.dim, NULL)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize dimension size of compact dataset storage");
}
} /* end case */
break;
default:
@ -2464,7 +2468,6 @@ H5D_open_oid(const H5G_entry_t *ent, hid_t dxpl_id)
H5O_fill_new_t fill = {NULL, 0, NULL, H5D_ALLOC_TIME_LATE, H5D_CRT_FILL_TIME_DEF, TRUE};
H5O_fill_t *fill_prop; /* Pointer to dataset's fill value area */
H5O_pline_t pline; /* I/O pipeline information */
H5D_layout_t layout; /* Dataset layout */
H5P_genplist_t *plist; /* Property list */
H5D_t *ret_value = NULL; /*return value */
@ -2517,11 +2520,21 @@ H5D_open_oid(const H5G_entry_t *ent, hid_t dxpl_id)
*/
if (NULL==H5O_read(&(dataset->ent), H5O_LAYOUT_ID, 0, &(dataset->layout), dxpl_id))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to read data layout message")
if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &dataset->layout.type) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set layout")
switch (dataset->layout.type) {
case H5D_CONTIGUOUS:
layout = H5D_CONTIGUOUS;
if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set layout")
/* Compute the size of the contiguous storage for versions of the
* layout message less than version 3 because versions 1 & 2 would
* truncate the dimension sizes to 32-bits of information. - QAK 5/26/04
*/
if(dataset->layout.version<3) {
hssize_t tmp_size; /* Temporary holder for raw data size */
tmp_size = H5S_GET_SIMPLE_EXTENT_NPOINTS(dataset->space) *
H5T_get_size(dataset->type);
H5_ASSIGN_OVERFLOW(dataset->layout.u.contig.size,tmp_size,hssize_t,hsize_t);
} /* end if */
break;
case H5D_CHUNKED:
@ -2533,23 +2546,18 @@ H5D_open_oid(const H5G_entry_t *ent, hid_t dxpl_id)
{
unsigned chunk_ndims; /* Dimensionality of chunk */
layout = H5D_CHUNKED;
chunk_ndims = dataset->layout.ndims - 1;
chunk_ndims = dataset->layout.u.chunk.ndims - 1;
if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set layout")
if(H5P_set(plist, H5D_CRT_CHUNK_DIM_NAME, &chunk_ndims) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set chunk dimensions")
if(H5P_set(plist, H5D_CRT_CHUNK_SIZE_NAME, dataset->layout.dim) < 0)
if(H5P_set(plist, H5D_CRT_CHUNK_SIZE_NAME, dataset->layout.u.chunk.dim) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set chunk size")
}
break;
case H5D_COMPACT:
layout = H5D_COMPACT;
if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set layout")
break;
default:
HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, NULL, "not implemented yet")
} /* end switch */
@ -2617,19 +2625,22 @@ H5D_open_oid(const H5G_entry_t *ent, hid_t dxpl_id)
/* Get the external file list message, which might not exist. Space is
* also undefined when space allocate time is H5D_ALLOC_TIME_LATE. */
if( !H5F_addr_defined(dataset->layout.addr)) {
if((dataset->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->layout.u.contig.addr))
|| (dataset->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->layout.u.chunk.addr))) {
HDmemset(&dataset->efl,0,sizeof(H5O_efl_t));
if(NULL != H5O_read(&(dataset->ent), H5O_EFL_ID, 0, &dataset->efl, dxpl_id))
if(H5P_set(plist, H5D_CRT_EXT_FILE_LIST_NAME, &dataset->efl) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, NULL, "can't set external file list")
}
/*
* Make sure all storage is properly initialized.
* This is important only for parallel I/O where the space must
* be fully allocated before I/O can happen.
*/
if ((H5F_get_intent(dataset->ent.file) & H5F_ACC_RDWR)
&& (dataset->layout.type!=H5D_COMPACT && dataset->layout.addr==HADDR_UNDEF)
&& ((dataset->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->layout.u.contig.addr))
|| (dataset->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->layout.u.chunk.addr)))
&& IS_H5FD_MPI(dataset->ent.file)) {
if (H5D_alloc_storage(dataset->ent.file, dxpl_id, dataset,H5D_ALLOC_OPEN, TRUE, FALSE)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, NULL, "unable to initialize file storage")
@ -2708,10 +2719,10 @@ H5D_close(H5D_t *dataset)
H5I_dec_ref(dataset->dcpl_id) < 0);
/* Update header message of layout for compact dataset. */
if(dataset->layout.type==H5D_COMPACT && dataset->layout.dirty) {
if(dataset->layout.type==H5D_COMPACT && dataset->layout.u.compact.dirty) {
if(H5O_modify(&(dataset->ent), H5O_LAYOUT_ID, 0, 0, 1, &(dataset->layout), H5AC_dxpl_id)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to update layout message")
dataset->layout.dirty = FALSE;
dataset->layout.u.compact.dirty = FALSE;
} /* end if */
/* Remove the dataset from the list of opened objects in the file */
@ -2732,7 +2743,7 @@ H5D_close(H5D_t *dataset)
dataset->ent.file = NULL;
/* Free the buffer for the raw data for compact datasets */
if(dataset->layout.type==H5D_COMPACT)
dataset->layout.buf=H5MM_xfree(dataset->layout.buf);
dataset->layout.u.compact.buf=H5MM_xfree(dataset->layout.u.compact.buf);
H5FL_FREE(H5D_t,dataset);
if (free_failed)
@ -2924,13 +2935,13 @@ H5D_alloc_storage (H5F_t *f, hid_t dxpl_id, H5D_t *dset/*in,out*/, H5D_time_allo
* We assume that external storage is already
* allocated by the caller, or at least will be before I/O is performed.
*/
if(dset->efl.nused==0) {
if(!(H5S_NULL == H5S_GET_SIMPLE_EXTENT_TYPE(dset->space) || dset->efl.nused>0)) {
/* Get a pointer to the dataset's layout information */
layout=&(dset->layout);
switch (layout->type) {
case H5D_CONTIGUOUS:
if(layout->addr==HADDR_UNDEF) {
if(layout->u.contig.addr==HADDR_UNDEF) {
/* Reserve space in the file for the entire array */
if (H5F_contig_create (f, dxpl_id, layout/*out*/)<0)
HGOTO_ERROR (H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize contiguous storage")
@ -2944,7 +2955,7 @@ H5D_alloc_storage (H5F_t *f, hid_t dxpl_id, H5D_t *dset/*in,out*/, H5D_time_allo
break;
case H5D_CHUNKED:
if(layout->addr==HADDR_UNDEF) {
if(layout->u.chunk.addr==HADDR_UNDEF) {
/* Create the root of the B-tree that describes chunked storage */
if (H5F_istore_create (f, dxpl_id, layout/*out*/)<0)
HGOTO_ERROR (H5E_IO, H5E_CANTINIT, FAIL, "unable to initialize chunked storage")
@ -2967,12 +2978,12 @@ H5D_alloc_storage (H5F_t *f, hid_t dxpl_id, H5D_t *dset/*in,out*/, H5D_time_allo
case H5D_COMPACT:
/* Check if space is already allocated */
if(layout->buf==NULL) {
if(layout->u.compact.buf==NULL) {
/* Reserve space in layout header message for the entire array. */
assert(layout->size>0);
if (NULL==(layout->buf=H5MM_malloc(layout->size)))
assert(layout->u.compact.size>0);
if (NULL==(layout->u.compact.buf=H5MM_malloc(layout->u.compact.size)))
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate memory for compact dataset")
layout->dirty = TRUE;
layout->u.compact.dirty = TRUE;
/* Indicate that we set the storage addr */
addr_set=1;
@ -3073,7 +3084,7 @@ H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id)
space=dset->space;
/* Get the number of elements in the dataset's dataspace */
snpoints = H5S_get_simple_extent_npoints(space);
snpoints = H5S_GET_SIMPLE_EXTENT_NPOINTS(space);
assert(snpoints>=0);
H5_ASSIGN_OVERFLOW(npoints,snpoints,hssize_t,size_t);
@ -3084,9 +3095,9 @@ H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id)
/* If the fill value is defined, initialize the data buffer with it */
if(dset->fill.buf)
/* Initialize the cached data buffer with the fill value */
H5V_array_fill(dset->layout.buf, dset->fill.buf, dset->fill.size, npoints);
H5V_array_fill(dset->layout.u.compact.buf, dset->fill.buf, dset->fill.size, npoints);
else /* If the fill value is default, zero set data buf. */
HDmemset(dset->layout.buf, 0, dset->layout.size);
HDmemset(dset->layout.u.compact.buf, 0, dset->layout.u.compact.size);
} /* end if */
break;
@ -3117,7 +3128,7 @@ H5D_init_storage(H5D_t *dset, hbool_t full_overwrite, hid_t dxpl_id)
/* We only handle simple data spaces so far */
if ((ndims=H5S_get_simple_extent_dims(space, dim, NULL))<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple data space info")
dim[ndims] = dset->layout.dim[ndims];
dim[ndims] = dset->layout.u.chunk.dim[ndims];
if (H5F_istore_allocate(dset->ent.file, dxpl_id, &(dset->layout), dim, plist, full_overwrite)<0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to allocate all chunks of dataset")
@ -3204,25 +3215,23 @@ H5D_get_storage_size(const H5D_t *dset, hid_t dxpl_id)
switch(dset->layout.type) {
case H5D_CHUNKED:
if(dset->layout.addr == HADDR_UNDEF)
if(dset->layout.u.chunk.addr == HADDR_UNDEF)
ret_value=0;
else
ret_value = H5F_istore_allocated(dset->ent.file, dxpl_id, dset->layout.ndims,
dset->layout.addr);
ret_value = H5F_istore_allocated(dset->ent.file, dxpl_id, dset->layout.u.chunk.ndims,
dset->layout.u.chunk.addr);
break;
case H5D_CONTIGUOUS:
/* Datasets which are not allocated yet are using no space on disk */
if(dset->layout.addr == HADDR_UNDEF)
if(dset->layout.u.contig.addr == HADDR_UNDEF)
ret_value=0;
else {
for (u=0, ret_value=1; u<dset->layout.ndims; u++)
ret_value *= dset->layout.dim[u];
} /* end else */
else
ret_value=dset->layout.u.contig.size;
break;
case H5D_COMPACT:
ret_value = dset->layout.size;
ret_value = dset->layout.u.compact.size;
break;
default:
@ -3313,9 +3322,9 @@ H5D_get_offset(const H5D_t *dset)
/* If there's user block in file, returns the absolute dataset offset
* from the beginning of file. */
if(base_addr!=HADDR_UNDEF)
ret_value = dset->layout.addr + base_addr;
ret_value = dset->layout.u.contig.addr + base_addr;
else
ret_value = dset->layout.addr;
ret_value = dset->layout.u.contig.addr;
break;
default:
@ -3879,10 +3888,11 @@ H5D_flush(const H5F_t *f, hid_t dxpl_id)
for(j=0; j<num_dsets; j++) {
if(NULL==(dataset=H5I_object_verify(id_list[j], H5I_DATASET)))
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to get dataset object")
if(dataset->layout.type==H5D_COMPACT && dataset->layout.dirty)
if(dataset->layout.type==H5D_COMPACT && dataset->layout.u.compact.dirty) {
if(H5O_modify(&(dataset->ent), H5O_LAYOUT_ID, 0, 0, 1, &(dataset->layout), dxpl_id)<0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to update layout message")
dataset->layout.dirty = FALSE;
dataset->layout.u.compact.dirty = FALSE;
} /* end if */
}
} /* end if */
@ -3925,9 +3935,9 @@ H5Ddebug(hid_t dset_id)
/* Print B-tree information */
if (H5D_CHUNKED==dset->layout.type) {
(void)H5F_istore_dump_btree(dset->ent.file, H5AC_dxpl_id, stdout, dset->layout.ndims, dset->layout.addr);
(void)H5F_istore_dump_btree(dset->ent.file, H5AC_dxpl_id, stdout, dset->layout.u.chunk.ndims, dset->layout.u.chunk.addr);
} else if (H5D_CONTIGUOUS==dset->layout.type) {
HDfprintf(stdout, " %-10s %a\n", "Address:", dset->layout.addr);
HDfprintf(stdout, " %-10s %a\n", "Address:", dset->layout.u.contig.addr);
}
done:

View File

@ -70,7 +70,7 @@ H5F_compact_readvv(H5F_t UNUSED *f, const H5O_layout_t *layout,
FUNC_ENTER_NOAPI(H5F_compact_readvv, FAIL);
/* Use the vectorized memory copy routine to do actual work */
if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,layout->buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0)
if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,layout->u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed");
done:
@ -112,10 +112,10 @@ H5F_compact_writevv(H5F_t UNUSED *f, H5O_layout_t *layout,
FUNC_ENTER_NOAPI(H5F_compact_writevv, FAIL);
/* Use the vectorized memory copy routine to do actual work */
if((ret_value=H5V_memcpyvv(layout->buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0)
if((ret_value=H5V_memcpyvv(layout->u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed");
layout->dirty = TRUE;
layout->u.compact.dirty = TRUE;
done:
FUNC_LEAVE_NOAPI(ret_value);

View File

@ -75,8 +75,6 @@ H5FL_BLK_DEFINE_STATIC(zero_fill);
herr_t
H5F_contig_create(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout)
{
hsize_t size; /* Size of contiguous block of data */
unsigned u; /* Local index variable */
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5F_contig_create, FAIL);
@ -85,14 +83,8 @@ H5F_contig_create(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout)
assert(f);
assert(layout);
/* Compute size */
size=layout->dim[0];
for (u = 1; u < layout->ndims; u++)
size *= layout->dim[u];
assert (size>0);
/* Allocate space for the contiguous data */
if (HADDR_UNDEF==(layout->addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, size)))
if (HADDR_UNDEF==(layout->u.contig.addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, layout->u.contig.size)))
HGOTO_ERROR (H5E_IO, H5E_NOSPACE, FAIL, "unable to reserve file space");
done:
@ -145,8 +137,8 @@ H5F_contig_fill(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout,
assert(f);
assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
assert(layout && H5D_CONTIGUOUS==layout->type);
assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->addr));
assert(H5F_addr_defined(layout->u.contig.addr));
assert(layout->u.contig.size>0);
assert(space);
assert(elmt_size>0);
@ -167,7 +159,7 @@ H5F_contig_fill(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout,
#endif /* H5_HAVE_PARALLEL */
/* Get the number of elements in the dataset's dataspace */
snpoints = H5S_get_simple_extent_npoints(space);
snpoints = H5S_GET_SIMPLE_EXTENT_NPOINTS(space);
assert(snpoints>=0);
H5_ASSIGN_OVERFLOW(npoints,snpoints,hssize_t,size_t);
@ -214,7 +206,7 @@ H5F_contig_fill(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout,
} /* end else */
/* Start at the beginning of the dataset */
addr = layout->addr;
addr = layout->u.contig.addr;
/* Loop through writing the fill value to the dataset */
while (npoints>0) {
@ -290,8 +282,6 @@ done:
herr_t
H5F_contig_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout)
{
hsize_t size; /* Size of contiguous block of data */
unsigned u; /* Local index variable */
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5F_contig_delete, FAIL);
@ -300,17 +290,12 @@ H5F_contig_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout)
assert(f);
assert(layout);
/* Compute size */
size=layout->dim[0];
for (u = 1; u < layout->ndims; u++)
size *= layout->dim[u];
/* Check for overlap with the sieve buffer and reset it */
if (H5F_sieve_overlap_clear(f, dxpl_id, layout->addr, size)<0)
if (H5F_sieve_overlap_clear(f, dxpl_id, layout->u.contig.addr, layout->u.contig.size)<0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to clear sieve buffer");
/* Free the file space for the chunk */
if (H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, layout->addr, size)<0)
if (H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, layout->u.contig.addr, layout->u.contig.size)<0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to free object header");
done:

View File

@ -63,6 +63,7 @@ typedef struct fm_map {
H5S_sel_iter_t mem_iter; /* Iterator for elements in memory selection */
unsigned m_ndims; /* Number of dimensions for memory dataspace */
hsize_t chunks[H5O_LAYOUT_NDIMS]; /* Number of chunks in each dimension */
hsize_t chunk_dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in each dimension */
hsize_t down_chunks[H5O_LAYOUT_NDIMS]; /* "down" size of number of chunks in each dimension */
H5O_layout_t *layout; /* Dataset layout information*/
H5S_sel_type msel_type; /* Selection type in memory */
@ -694,8 +695,9 @@ H5D_read(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
* fill time is NEVER, there is no way to tell whether part of data
* has been overwritten. So just proceed in reading.
*/
if(nelmts > 0 && dataset->efl.nused==0 && dataset->layout.type!=H5D_COMPACT
&& dataset->layout.addr==HADDR_UNDEF) {
if(nelmts > 0 && dataset->efl.nused==0 &&
((dataset->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->layout.u.contig.addr))
|| (dataset->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->layout.u.chunk.addr)))) {
H5D_fill_value_t fill_status; /* Whether/How the fill value is defined */
/* Retrieve dataset's fill-value properties */
@ -912,13 +914,14 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
/* <none needed currently> */
/* Allocate data space and initialize it if it hasn't been. */
if(nelmts > 0 && dataset->efl.nused==0 && dataset->layout.type!=H5D_COMPACT
&& dataset->layout.addr==HADDR_UNDEF) {
if(nelmts > 0 && dataset->efl.nused==0 &&
((dataset->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->layout.u.contig.addr))
|| (dataset->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->layout.u.chunk.addr)))) {
hssize_t file_nelmts; /* Number of elements in file dataset's dataspace */
hbool_t full_overwrite; /* Whether we are over-writing all the elements */
/* Get the number of elements in file dataset's dataspace */
if((file_nelmts=H5S_get_simple_extent_npoints(file_space))<0)
if((file_nelmts=H5S_GET_SIMPLE_EXTENT_NPOINTS(file_space))<0)
HGOTO_ERROR (H5E_DATASET, H5E_BADVALUE, FAIL, "can't retrieve number of elements in file dataset")
/* Always allow fill values to be written if the dataset has a VL datatype */
@ -1081,7 +1084,9 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type,
H5_timer_begin(&timer);
#endif
/* Sanity check dataset, then read it */
assert(dataset->layout.addr!=HADDR_UNDEF || dataset->efl.nused>0 ||
assert(((dataset->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->layout.u.contig.addr))
|| (dataset->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->layout.u.chunk.addr)))
|| dataset->efl.nused>0 ||
H5S_NULL == H5S_GET_SIMPLE_EXTENT_TYPE(file_space) ||
dataset->layout.type==H5D_COMPACT);
H5_CHECK_OVERFLOW(nelmts,hsize_t,size_t);
@ -1178,7 +1183,9 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset, const H5T_t *mem_type,
H5_timer_begin(&timer);
#endif
/* Sanity check that space is allocated, then read data from it */
assert(dataset->layout.addr!=HADDR_UNDEF || dataset->efl.nused>0 ||
assert(((dataset->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->layout.u.contig.addr))
|| (dataset->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->layout.u.chunk.addr)))
|| dataset->efl.nused>0 ||
dataset->layout.type==H5D_COMPACT);
n = H5S_select_fgath(dataset->ent.file, &(dataset->layout),
&dataset->dcpl_cache, (H5D_storage_t *)&(dataset->efl), file_space,
@ -1580,8 +1587,9 @@ UNUSED
H5_timer_begin(&timer);
#endif
/* Sanity check dataset, then read it */
assert(dataset->layout.addr!=HADDR_UNDEF || dataset->efl.nused>0 ||
dataset->layout.type==H5D_COMPACT);
assert(((dataset->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->layout.u.contig.addr))
|| (dataset->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->layout.u.chunk.addr)))
|| dataset->efl.nused>0 || dataset->layout.type==H5D_COMPACT);
/* Get first node in chunk tree */
chunk_node=H5TB_first(fm.fsel->root);
@ -1709,8 +1717,9 @@ UNUSED
H5_timer_begin(&timer);
#endif
/* Sanity check that space is allocated, then read data from it */
assert(dataset->layout.addr!=HADDR_UNDEF || dataset->efl.nused>0 ||
dataset->layout.type==H5D_COMPACT);
assert(((dataset->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->layout.u.contig.addr))
|| (dataset->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->layout.u.chunk.addr)))
|| dataset->efl.nused>0 || dataset->layout.type==H5D_COMPACT);
n = H5S_select_fgath(dataset->ent.file, &(dataset->layout),
&dataset->dcpl_cache, &store, chunk_info->fspace,
&file_iter, smine_nelmts, dxpl_cache, dxpl_id, tconv_buf/*out*/);
@ -2324,16 +2333,16 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
hsize_t dims[H5O_LAYOUT_NDIMS]; /* Temporary dimension information */
/* Set up "equivalent" n-dimensional dataspace with size '1' in each dimension */
for(u=0; u<dataset->layout.ndims-1; u++)
for(u=0; u<dataset->layout.u.chunk.ndims-1; u++)
dims[u]=1;
if((equiv_mspace = H5S_create_simple(dataset->layout.ndims-1,dims,NULL))==NULL)
if((equiv_mspace = H5S_create_simple(dataset->layout.u.chunk.ndims-1,dims,NULL))==NULL)
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCREATE, FAIL, "unable to create equivalent dataspace for scalar space")
/* Indicate that this space needs to be released */
equiv_mspace_init=1;
/* Set the number of dimensions for the memory dataspace */
fm->m_ndims=dataset->layout.ndims-1;
fm->m_ndims=dataset->layout.u.chunk.ndims-1;
} /* end else */
else {
equiv_mspace=(H5S_t *)mem_space; /* Casting away 'const' OK... */
@ -2343,7 +2352,7 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
} /* end else */
/* Get dim number and dimensionality for each dataspace */
fm->f_ndims=f_ndims=dataset->layout.ndims-1;
fm->f_ndims=f_ndims=dataset->layout.u.chunk.ndims-1;
if(H5S_get_simple_extent_dims(file_space, fm->f_dims, NULL)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality")
@ -2352,8 +2361,11 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
last_nchunks=0;
nchunks=1;
for(u=0; u<f_ndims; u++) {
/* Keep the size of the chunk dimensions as hsize_t for various routines */
fm->chunk_dim[u]=fm->layout->u.chunk.dim[u];
/* Round up to the next integer # of chunks, to accomodate partial chunks */
fm->chunks[u] = ((fm->f_dims[u]+dataset->layout.dim[u])-1) / dataset->layout.dim[u];
fm->chunks[u] = ((fm->f_dims[u]+dataset->layout.u.chunk.dim[u])-1) / dataset->layout.u.chunk.dim[u];
/* Track total number of chunks in dataset */
nchunks *= fm->chunks[u];
@ -2653,7 +2665,6 @@ H5D_create_chunk_file_map_hyper(fm_map *fm)
hssize_t sel_end[H5O_LAYOUT_NDIMS]; /* Offset of high bound of file selection */
hssize_t start_coords[H5O_LAYOUT_NDIMS]; /* Starting coordinates of selection */
hssize_t coords[H5O_LAYOUT_NDIMS]; /* Current coordinates of chunk */
hsize_t count[H5O_LAYOUT_NDIMS]; /* Hyperslab count information */
hssize_t end[H5O_LAYOUT_NDIMS]; /* Current coordinates of chunk */
hsize_t chunk_index; /* Index of chunk */
int curr_dim; /* Current dimension to increment */
@ -2675,21 +2686,21 @@ H5D_create_chunk_file_map_hyper(fm_map *fm)
/* Set initial chunk location & hyperslab size */
for(u=0; u<fm->f_ndims; u++) {
H5_CHECK_OVERFLOW(fm->layout->dim[u],hsize_t,hssize_t);
start_coords[u]=(sel_start[u]/(hssize_t)fm->layout->dim[u])*(hssize_t)fm->layout->dim[u];
H5_CHECK_OVERFLOW(fm->layout->u.chunk.dim[u],hsize_t,hssize_t);
start_coords[u]=(sel_start[u]/(hssize_t)fm->layout->u.chunk.dim[u])*(hssize_t)fm->layout->u.chunk.dim[u];
coords[u]=start_coords[u];
count[u]=fm->layout->dim[u];
end[u]=(coords[u]+count[u])-1;
end[u]=(coords[u]+fm->chunk_dim[u])-1;
} /* end for */
/* Calculate the index of this chunk */
if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->dim,fm->down_chunks,&chunk_index)<0)
if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
/* Iterate through each chunk in the dataset */
while(sel_points) {
/* Check for intersection of temporary chunk and file selection */
if(H5S_hyper_intersect_block(fm->file_space,coords,end)==TRUE) {
/* (Casting away const OK - QAK) */
if(H5S_hyper_intersect_block((H5S_t *)fm->file_space,coords,end)==TRUE) {
H5S_t *tmp_fchunk; /* Temporary file dataspace */
H5D_chunk_info_t *new_chunk_info; /* chunk information to insert into tree */
hssize_t schunk_points; /* Number of elements in chunk selection */
@ -2707,13 +2718,13 @@ H5D_create_chunk_file_map_hyper(fm_map *fm)
HGOTO_ERROR (H5E_DATASET, H5E_BADSELECT, FAIL, "unable to normalize dataspace by offset")
/* "AND" temporary chunk and current chunk */
if(H5S_select_hyperslab(tmp_fchunk,H5S_SELECT_AND,coords,NULL,count,NULL)<0) {
if(H5S_select_hyperslab(tmp_fchunk,H5S_SELECT_AND,coords,NULL,fm->chunk_dim,NULL)<0) {
(void)H5S_close(tmp_fchunk);
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't create chunk selection")
} /* end if */
/* Resize chunk's dataspace dimensions to size of chunk */
if(H5S_set_extent_real(tmp_fchunk,count)<0) {
if(H5S_set_extent_real(tmp_fchunk,fm->chunk_dim)<0) {
(void)H5S_close(tmp_fchunk);
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTSELECT, FAIL, "can't adjust chunk dimensions")
} /* end if */
@ -2775,9 +2786,9 @@ H5D_create_chunk_file_map_hyper(fm_map *fm)
curr_dim=(int)fm->f_ndims-1;
/* Increment chunk location in fastest changing dimension */
H5_CHECK_OVERFLOW(count[curr_dim],hsize_t,hssize_t);
coords[curr_dim]+=(hssize_t)count[curr_dim];
end[curr_dim]+=(hssize_t)count[curr_dim];
H5_CHECK_OVERFLOW(fm->chunk_dim[curr_dim],hsize_t,hssize_t);
coords[curr_dim]+=(hssize_t)fm->chunk_dim[curr_dim];
end[curr_dim]+=(hssize_t)fm->chunk_dim[curr_dim];
/* Bring chunk location back into bounds, if necessary */
if(coords[curr_dim]>sel_end[curr_dim]) {
@ -2789,12 +2800,12 @@ H5D_create_chunk_file_map_hyper(fm_map *fm)
curr_dim--;
/* Increment chunk location in current dimension */
coords[curr_dim]+=(hssize_t)count[curr_dim];
end[curr_dim]=(coords[curr_dim]+(hssize_t)count[curr_dim])-1;
coords[curr_dim]+=(hssize_t)fm->chunk_dim[curr_dim];
end[curr_dim]=(coords[curr_dim]+(hssize_t)fm->chunk_dim[curr_dim])-1;
} while(coords[curr_dim]>sel_end[curr_dim]);
/* Re-Calculate the index of this chunk */
if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->dim,fm->down_chunks,&chunk_index)<0)
if(H5V_chunk_index(fm->f_ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
} /* end if */
} /* end while */
@ -2871,7 +2882,8 @@ H5D_create_chunk_mem_map_hyper(const fm_map *fm)
} /* end if */
else {
/* Just point at the memory dataspace & selection */
chunk_info->mspace=fm->mem_space;
/* (Casting away const OK -QAK) */
chunk_info->mspace=(H5S_t *)fm->mem_space;
/* Indicate that the chunk's memory space is shared */
chunk_info->mspace_shared=1;
@ -2931,10 +2943,8 @@ H5D_create_chunk_mem_map_hyper(const fm_map *fm)
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCOPY, FAIL, "unable to copy selection")
/* Compensate for the chunk offset */
for(u=0; u<fm->f_ndims; u++) {
H5_CHECK_OVERFLOW(fm->layout->dim[u],hsize_t,hssize_t);
for(u=0; u<fm->f_ndims; u++)
chunk_adjust[u]=adjust[u]-chunk_info->coords[u]; /*lint !e771 The adjust array will always be initialized */
} /* end for */
#ifdef QAK
{
int mpi_rank;
@ -3016,7 +3026,7 @@ H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, hsize_t ndims, hssize
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_file_cb)
/* Calculate the index of this chunk */
if(H5V_chunk_index((unsigned)ndims,coords,fm->layout->dim,fm->down_chunks,&chunk_index)<0)
if(H5V_chunk_index((unsigned)ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
/* Find correct chunk in file & memory TBBTs */
@ -3047,7 +3057,7 @@ H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, hsize_t ndims, hssize
new_chunk_info->index=chunk_index;
/* Create a dataspace for the chunk */
if((fspace = H5S_create_simple(fm->f_ndims,fm->layout->dim,NULL))==NULL) {
if((fspace = H5S_create_simple(fm->f_ndims,fm->chunk_dim,NULL))==NULL) {
H5FL_FREE(H5D_chunk_info_t,new_chunk_info);
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCREATE, FAIL, "unable to create dataspace for chunk")
} /* end if */
@ -3071,8 +3081,8 @@ H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, hsize_t ndims, hssize
/* Compute the chunk's coordinates */
for(u=0; u<fm->f_ndims; u++) {
H5_CHECK_OVERFLOW(fm->layout->dim[u],hsize_t,hssize_t);
new_chunk_info->coords[u]=(coords[u]/(hssize_t)fm->layout->dim[u])*(hssize_t)fm->layout->dim[u];
H5_CHECK_OVERFLOW(fm->layout->u.chunk.dim[u],hsize_t,hssize_t);
new_chunk_info->coords[u]=(coords[u]/(hssize_t)fm->layout->u.chunk.dim[u])*(hssize_t)fm->layout->u.chunk.dim[u];
} /* end for */
new_chunk_info->coords[fm->f_ndims]=0;
@ -3096,7 +3106,7 @@ H5D_chunk_file_cb(void UNUSED *elem, hid_t UNUSED type_id, hsize_t ndims, hssize
/* Get the coordinates of the element in the chunk */
for(u=0; u<fm->f_ndims; u++)
coords_in_chunk[u]=coords[u]%fm->layout->dim[u];
coords_in_chunk[u]=coords[u]%fm->layout->u.chunk.dim[u];
/* Add point to file selection for chunk */
if(H5S_select_elements(chunk_info->fspace,H5S_SELECT_APPEND,1,(const hssize_t **)coords_in_chunk)<0)
@ -3140,7 +3150,7 @@ H5D_chunk_mem_cb(void UNUSED *elem, hid_t UNUSED type_id, hsize_t ndims, hssize_
FUNC_ENTER_NOAPI_NOINIT(H5D_chunk_mem_cb)
/* Calculate the index of this chunk */
if(H5V_chunk_index((unsigned)ndims,coords,fm->layout->dim,fm->down_chunks,&chunk_index)<0)
if(H5V_chunk_index((unsigned)ndims,coords,fm->layout->u.chunk.dim,fm->down_chunks,&chunk_index)<0)
HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
/* Find correct chunk in file & memory TBBTs */

View File

@ -248,11 +248,11 @@ H5F_istore_sizeof_rkey(H5F_t UNUSED *f, const void *_udata)
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5F_istore_sizeof_rkey);
assert(udata);
assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS);
assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
nbytes = 4 + /*storage size */
4 + /*filter mask */
udata->mesg.ndims*8; /*dimension indices */
udata->mesg.u.chunk.ndims*8; /*dimension indices */
FUNC_LEAVE_NOAPI(nbytes);
}
@ -375,7 +375,7 @@ H5F_istore_debug_key (FILE *stream, H5F_t UNUSED *f, hid_t UNUSED dxpl_id, int i
"Filter mask:", key->filter_mask);
HDfprintf(stream, "%*s%-*s {", indent, "", fwidth,
"Logical offset:");
for (u=0; u<udata->mesg.ndims; u++)
for (u=0; u<udata->mesg.u.chunk.ndims; u++)
HDfprintf (stream, "%s%Hd", u?", ":"", key->offset[u]);
HDfputs ("}\n", stream);
@ -419,10 +419,10 @@ H5F_istore_cmp2(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, void *_uda
assert(lt_key);
assert(rt_key);
assert(udata);
assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS);
assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
/* Compare the offsets but ignore the other fields */
ret_value = H5V_vector_cmp_s(udata->mesg.ndims, lt_key->offset, rt_key->offset);
ret_value = H5V_vector_cmp_s(udata->mesg.u.chunk.ndims, lt_key->offset, rt_key->offset);
done:
FUNC_LEAVE_NOAPI(ret_value);
@ -472,12 +472,12 @@ H5F_istore_cmp3(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, void *_uda
assert(lt_key);
assert(rt_key);
assert(udata);
assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS);
assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
if (H5V_vector_lt_s(udata->mesg.ndims, udata->key.offset,
if (H5V_vector_lt_s(udata->mesg.u.chunk.ndims, udata->key.offset,
lt_key->offset)) {
ret_value = -1;
} else if (H5V_vector_ge_s(udata->mesg.ndims, udata->key.offset,
} else if (H5V_vector_ge_s(udata->mesg.u.chunk.ndims, udata->key.offset,
rt_key->offset)) {
ret_value = 1;
}
@ -525,7 +525,7 @@ H5F_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
assert(lt_key);
assert(rt_key);
assert(udata);
assert(udata->mesg.ndims > 0 && udata->mesg.ndims < H5O_LAYOUT_NDIMS);
assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims < H5O_LAYOUT_NDIMS);
assert(addr_p);
/* Allocate new storage */
@ -541,7 +541,7 @@ H5F_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
*/
lt_key->nbytes = udata->key.nbytes;
lt_key->filter_mask = udata->key.filter_mask;
for (u=0; u<udata->mesg.ndims; u++)
for (u=0; u<udata->mesg.u.chunk.ndims; u++)
lt_key->offset[u] = udata->key.offset[u];
/*
@ -551,12 +551,11 @@ H5F_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
if (H5B_INS_LEFT != op) {
rt_key->nbytes = 0;
rt_key->filter_mask = 0;
for (u=0; u<udata->mesg.ndims; u++) {
assert (udata->mesg.dim[u] < HSSIZET_MAX);
assert (udata->key.offset[u]+(hssize_t)(udata->mesg.dim[u]) >
for (u=0; u<udata->mesg.u.chunk.ndims; u++) {
assert (udata->key.offset[u]+(hssize_t)(udata->mesg.u.chunk.dim[u]) >
udata->key.offset[u]);
rt_key->offset[u] = udata->key.offset[u] +
(hssize_t)(udata->mesg.dim[u]);
(hssize_t)(udata->mesg.u.chunk.dim[u]);
}
}
@ -610,8 +609,8 @@ H5F_istore_found(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, const void
assert(lt_key);
/* Is this *really* the requested chunk? */
for (u=0; u<udata->mesg.ndims; u++) {
if (udata->key.offset[u] >= lt_key->offset[u]+(hssize_t)(udata->mesg.dim[u]))
for (u=0; u<udata->mesg.u.chunk.ndims; u++) {
if (udata->key.offset[u] >= lt_key->offset[u]+(hssize_t)(udata->mesg.u.chunk.dim[u]))
HGOTO_DONE(FAIL);
}
@ -620,7 +619,7 @@ H5F_istore_found(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, const void
udata->key.nbytes = lt_key->nbytes;
udata->key.filter_mask = lt_key->filter_mask;
assert (lt_key->nbytes>0);
for (u = 0; u < udata->mesg.ndims; u++)
for (u = 0; u < udata->mesg.u.chunk.ndims; u++)
udata->key.offset[u] = lt_key->offset[u];
done:
@ -696,7 +695,7 @@ H5F_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key,
assert("HDF5 INTERNAL ERROR -- see rpm" && 0);
HGOTO_ERROR(H5E_STORAGE, H5E_UNSUPPORTED, H5B_INS_ERROR, "internal error");
} else if (H5V_vector_eq_s (udata->mesg.ndims,
} else if (H5V_vector_eq_s (udata->mesg.u.chunk.ndims,
udata->key.offset, lt_key->offset) &&
lt_key->nbytes>0) {
/*
@ -735,20 +734,20 @@ H5F_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key,
ret_value = H5B_INS_NOOP;
}
} else if (H5V_hyper_disjointp(udata->mesg.ndims,
lt_key->offset, udata->mesg.dim,
udata->key.offset, udata->mesg.dim)) {
assert(H5V_hyper_disjointp(udata->mesg.ndims,
rt_key->offset, udata->mesg.dim,
udata->key.offset, udata->mesg.dim));
} else if (H5V_hyper_disjointp(udata->mesg.u.chunk.ndims,
lt_key->offset, udata->mesg.u.chunk.dim,
udata->key.offset, udata->mesg.u.chunk.dim)) {
assert(H5V_hyper_disjointp(udata->mesg.u.chunk.ndims,
rt_key->offset, udata->mesg.u.chunk.dim,
udata->key.offset, udata->mesg.u.chunk.dim));
/*
* Split this node, inserting the new new node to the right of the
* current node. The MD_KEY is where the split occurs.
*/
md_key->nbytes = udata->key.nbytes;
md_key->filter_mask = udata->key.filter_mask;
for (u=0; u<udata->mesg.ndims; u++) {
assert(0 == udata->key.offset[u] % udata->mesg.dim[u]);
for (u=0; u<udata->mesg.u.chunk.ndims; u++) {
assert(0 == udata->key.offset[u] % udata->mesg.u.chunk.dim[u]);
md_key->offset[u] = udata->key.offset[u];
}
@ -847,7 +846,7 @@ H5F_istore_iter_dump (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, hadd
}
HDfprintf(bt_udata->stream, " 0x%08x %8Zu %10a [",
lt_key->filter_mask, lt_key->nbytes, addr);
for (u=0; u<bt_udata->mesg.ndims; u++)
for (u=0; u<bt_udata->mesg.u.chunk.ndims; u++)
HDfprintf(bt_udata->stream, "%s%Hd", u?", ":"", lt_key->offset[u]);
HDfputs("]\n", bt_udata->stream);
@ -936,7 +935,7 @@ H5F_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
udata.key.filter_mask = 0;
udata.addr = HADDR_UNDEF;
udata.key.nbytes = ent->chunk_size;
for (u=0; u<ent->layout.ndims; u++)
for (u=0; u<ent->layout.u.chunk.ndims; u++)
udata.key.offset[u] = ent->offset[u];
alloc = ent->alloc_size;
@ -974,7 +973,7 @@ H5F_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
* Create the chunk it if it doesn't exist, or reallocate the chunk if
* its size changed. Then write the data into the file.
*/
if (H5B_insert(f, dxpl_id, H5B_ISTORE, ent->layout.addr, &udata)<0)
if (H5B_insert(f, dxpl_id, H5B_ISTORE, ent->layout.u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk");
if (H5F_block_write(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, dxpl_id, buf)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
@ -1365,22 +1364,22 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
/* Get the chunk's size */
assert(layout->chunk_size>0);
H5_ASSIGN_OVERFLOW(chunk_size,layout->chunk_size,hsize_t,size_t);
assert(layout->u.chunk.size>0);
H5_ASSIGN_OVERFLOW(chunk_size,layout->u.chunk.size,hsize_t,size_t);
/* Search for the chunk in the cache */
if (rdcc->nslots>0) {
for (u=0, temp_idx=0; u<layout->ndims; u++) {
for (u=0, temp_idx=0; u<layout->u.chunk.ndims; u++) {
temp_idx += offset[u];
temp_idx *= layout->dim[u];
temp_idx *= layout->u.chunk.dim[u];
}
temp_idx += (hsize_t)(layout->addr);
temp_idx += (hsize_t)(layout->u.chunk.addr);
idx=H5F_HASH(f,temp_idx);
ent = rdcc->slot[idx];
if (ent && layout->ndims==ent->layout.ndims &&
H5F_addr_eq(layout->addr, ent->layout.addr)) {
for (u=0, found=TRUE; u<ent->layout.ndims; u++) {
if (ent && layout->u.chunk.ndims==ent->layout.u.chunk.ndims &&
H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
for (u=0, found=TRUE; u<ent->layout.u.chunk.ndims; u++) {
if (offset[u]!=ent->offset[u]) {
found = FALSE;
break;
@ -1508,7 +1507,7 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
ent->alloc_size = chunk_size;
H5O_copy(H5O_LAYOUT_ID, layout, &ent->layout);
H5O_copy(H5O_PLINE_ID, pline, &ent->pline);
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
ent->offset[u] = offset[u];
ent->rd_count = chunk_size;
ent->wr_count = chunk_size;
@ -1645,10 +1644,10 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
x.dirty = TRUE;
H5O_copy (H5O_LAYOUT_ID, layout, &x.layout);
H5O_copy (H5O_PLINE_ID, pline, &x.pline);
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
x.offset[u] = offset[u];
assert(layout->chunk_size>0);
H5_ASSIGN_OVERFLOW(x.chunk_size,layout->chunk_size,hsize_t,size_t);
assert(layout->u.chunk.size>0);
H5_ASSIGN_OVERFLOW(x.chunk_size,layout->u.chunk.size,hsize_t,size_t);
x.alloc_size = x.chunk_size;
x.chunk = chunk;
@ -1709,7 +1708,7 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
assert(f);
assert(dxpl_cache);
assert(layout && H5D_CHUNKED==layout->type);
assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS);
assert(layout->u.chunk.ndims>0 && layout->u.chunk.ndims<=H5O_LAYOUT_NDIMS);
assert(dcpl_cache);
assert(chunk_len_arr);
assert(chunk_offset_arr);
@ -1718,19 +1717,19 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
assert(buf);
#ifndef NDEBUG
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */
#endif
/* Get the address of this chunk on disk */
#ifdef QAK
HDfprintf(stderr,"%s: chunk_coords={",FUNC);
for(u=0; u<layout->ndims; u++)
HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->ndims-1) ? ", " : "}\n"));
for(u=0; u<layout->u.chunk.ndims; u++)
HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n"));
#endif /* QAK */
chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata);
#ifdef QAK
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->chunk_size);
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size);
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]);
@ -1743,9 +1742,9 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* for the chunk has been defined, then don't load the chunk into the
* cache, just write the data to it directly.
*/
if (layout->chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 &&
if (layout->u.chunk.size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 &&
chunk_addr!=HADDR_UNDEF) {
if ((ret_value=H5F_contig_readvv(f, layout->chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
if ((ret_value=H5F_contig_readvv(f, (hsize_t)layout->u.chunk.size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "unable to read raw data to file");
} /* end if */
else {
@ -1813,7 +1812,7 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
assert(f);
assert(dxpl_cache);
assert(layout && H5D_CHUNKED==layout->type);
assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS);
assert(layout->u.chunk.ndims>0 && layout->u.chunk.ndims<=H5O_LAYOUT_NDIMS);
assert(dcpl_cache);
assert(chunk_len_arr);
assert(chunk_offset_arr);
@ -1822,19 +1821,19 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
assert(buf);
#ifndef NDEBUG
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */
#endif
/* Get the address of this chunk on disk */
#ifdef QAK
HDfprintf(stderr,"%s: chunk_coords={",FUNC);
for(u=0; u<layout->ndims; u++)
HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->ndims-1) ? ", " : "}\n"));
for(u=0; u<layout->u.chunk.ndims; u++)
HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n"));
#endif /* QAK */
chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata);
#ifdef QAK
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->chunk_size);
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size);
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]);
@ -1852,14 +1851,14 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* writing to other elements in the same chunk. Do a direct
* write-through of only the elements requested.
*/
if ((layout->chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 && chunk_addr!=HADDR_UNDEF)
if ((layout->u.chunk.size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 && chunk_addr!=HADDR_UNDEF)
|| (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & f->shared->flags))) {
#ifdef H5_HAVE_PARALLEL
/* Additional sanity check when operating in parallel */
if (chunk_addr==HADDR_UNDEF || dcpl_cache->pline.nused>0)
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk");
#endif /* H5_HAVE_PARALLEL */
if ((ret_value=H5F_contig_writevv(f, layout->chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
if ((ret_value=H5F_contig_writevv(f, (hsize_t)layout->u.chunk.size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
} /* end if */
else {
@ -1872,7 +1871,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* Lock the chunk, copy from application to chunk, then unlock the
* chunk.
*/
if(chunk_max_nseq==1 && chunk_len_arr[0] == layout->chunk_size)
if(chunk_max_nseq==1 && chunk_len_arr[0] == layout->u.chunk.size)
relax = TRUE;
else
relax = FALSE;
@ -1933,14 +1932,14 @@ H5F_istore_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout /*out */ )
/* Check args */
assert(f);
assert(layout && H5D_CHUNKED == layout->type);
assert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS);
assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
#ifndef NDEBUG
for (u = 0; u < layout->ndims; u++)
assert(layout->dim[u] > 0);
for (u = 0; u < layout->u.chunk.ndims; u++)
assert(layout->u.chunk.dim[u] > 0);
#endif
udata.mesg.ndims = layout->ndims;
if (H5B_create(f, dxpl_id, H5B_ISTORE, &udata, &(layout->addr)/*out*/) < 0)
udata.mesg.u.chunk.ndims = layout->u.chunk.ndims;
if (H5B_create(f, dxpl_id, H5B_ISTORE, &udata, &(layout->u.chunk.addr)/*out*/) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "can't create B-tree");
done:
@ -1976,7 +1975,7 @@ H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr)
FUNC_ENTER_NOAPI(H5F_istore_allocated, 0);
HDmemset(&udata, 0, sizeof udata);
udata.mesg.ndims = ndims;
udata.mesg.u.chunk.ndims = ndims;
if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_iter_allocated, addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree");
@ -2018,20 +2017,20 @@ H5F_istore_get_addr(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
FUNC_ENTER_NOAPI_NOINIT(H5F_istore_get_addr);
assert(f);
assert(layout && (layout->ndims > 0));
assert(layout && (layout->u.chunk.ndims > 0));
assert(offset);
/* Check for udata struct to return */
udata = (_udata!=NULL ? _udata : &tmp_udata);
/* Initialize the information about the chunk we are looking for */
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
udata->key.offset[u] = offset[u];
udata->mesg = *layout;
udata->addr = HADDR_UNDEF;
/* Go get the chunk information */
if (H5B_find (f, dxpl_id, H5B_ISTORE, layout->addr, udata)<0) {
if (H5B_find (f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, udata)<0) {
H5E_clear(NULL);
HGOTO_ERROR(H5E_BTREE,H5E_NOTFOUND,HADDR_UNDEF,"Can't locate chunk info");
@ -2189,8 +2188,8 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
assert(f);
assert(space_dim);
assert(layout && H5D_CHUNKED==layout->type);
assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->addr));
assert(layout->u.chunk.ndims>0 && layout->u.chunk.ndims<=H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->u.chunk.addr));
assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
assert(dc_plist);
@ -2230,9 +2229,9 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
* Setup indice to go through all chunks. (Future improvement
* should allocate only chunks that have no file space assigned yet.
*/
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
chunk_offset[u] = 0;
chunk_size = layout->chunk_size;
chunk_size = layout->u.chunk.size;
/* Check the dataset's fill-value status */
if (H5P_is_fill_value_defined(&fill, &fill_status) < 0)
@ -2297,12 +2296,12 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
/* Look for chunk in cache */
for(ent = rdcc->head; ent && !chunk_exists; ent = ent->next) {
/* Make certain we are dealing with the correct B-tree, etc */
if (layout->ndims==ent->layout.ndims &&
H5F_addr_eq(layout->addr, ent->layout.addr)) {
if (layout->u.chunk.ndims==ent->layout.u.chunk.ndims &&
H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
/* Assume a match */
chunk_exists = 1;
for(u = 0; u < layout->ndims && chunk_exists; u++) {
for(u = 0; u < layout->u.chunk.ndims && chunk_exists; u++) {
if(ent->offset[u] != chunk_offset[u])
chunk_exists = 0; /* Reset if no match */
} /* end for */
@ -2317,11 +2316,11 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
udata.addr = HADDR_UNDEF;
H5_CHECK_OVERFLOW(chunk_size,hsize_t,size_t);
udata.key.nbytes = (size_t)chunk_size;
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
udata.key.offset[u] = chunk_offset[u];
/* Allocate the chunk with all processes */
if (H5B_insert(f, dxpl_id, H5B_ISTORE, layout->addr, &udata)<0)
if (H5B_insert(f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk");
/* Check if fill values should be written to blocks */
@ -2350,8 +2349,8 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
} /* end if */
/* Increment indices */
for (i=layout->ndims-1, carry=1; i>=0 && carry; --i) {
chunk_offset[i] += layout->dim[i];
for (i=layout->u.chunk.ndims-1, carry=1; i>=0 && carry; --i) {
chunk_offset[i] += layout->u.chunk.dim[i];
if (chunk_offset[i] >= (hssize_t)(space_dim[i]))
chunk_offset[i] = 0;
else
@ -2498,8 +2497,8 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
/* Check args */
assert(f);
assert(layout && H5D_CHUNKED == layout->type);
assert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->addr));
assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->u.chunk.addr));
assert(space);
/* Go get the rank & dimensions */
@ -2515,10 +2514,10 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
next = ent->next;
/* Make certain we are dealing with the correct B-tree, etc */
if (layout->ndims==ent->layout.ndims &&
H5F_addr_eq(layout->addr, ent->layout.addr)) {
if (layout->u.chunk.ndims==ent->layout.u.chunk.ndims &&
H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
found = 0;
for(u = 0; u < ent->layout.ndims - 1; u++) {
for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) {
if((hsize_t)ent->offset[u] > curr_dims[u]) {
found = 1;
break;
@ -2529,7 +2528,7 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
if(found) {
#if defined (H5F_ISTORE_DEBUG)
HDfputs("cache:remove:[", stdout);
for(u = 0; u < ent->layout.ndims - 1; u++) {
for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) {
HDfprintf(stdout, "%s%Hd", u ? ", " : "", ent->offset[u]);
}
HDfputs("]\n", stdout);
@ -2548,13 +2547,13 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
HDmemset(&udata, 0, sizeof udata);
udata.stream = stdout;
udata.mesg.addr = layout->addr;
udata.mesg.ndims = layout->ndims;
for(u = 0; u < udata.mesg.ndims; u++)
udata.mesg.dim[u] = layout->dim[u];
udata.mesg.u.chunk.addr = layout->u.chunk.addr;
udata.mesg.u.chunk.ndims = layout->u.chunk.ndims;
for(u = 0; u < udata.mesg.u.chunk.ndims; u++)
udata.mesg.u.chunk.dim[u] = layout->u.chunk.dim[u];
udata.dims = curr_dims;
if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_prune_extent, layout->addr, &udata) < 0)
if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_prune_extent, layout->u.chunk.addr, &udata) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over B-tree");
done:
@ -2596,11 +2595,11 @@ H5F_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr_t UNUSED a
FUNC_ENTER_NOAPI_NOINIT(H5F_istore_prune_extent);
/* Figure out what chunks are no longer in use for the specified extent and release them */
for(u = 0; u < bt_udata->mesg.ndims - 1; u++)
for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++)
if((hsize_t)lt_key->offset[u] > bt_udata->dims[u]) {
#if defined (H5F_ISTORE_DEBUG)
HDfputs("b-tree:remove:[", bt_udata->stream);
for(u = 0; u < bt_udata->mesg.ndims - 1; u++) {
for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) {
HDfprintf(bt_udata->stream, "%s%Hd", u ? ", " : "",
lt_key->offset[u]);
}
@ -2612,7 +2611,7 @@ H5F_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr_t UNUSED a
udata.mesg = bt_udata->mesg;
/* Remove */
if(H5B_remove(f, dxpl_id, H5B_ISTORE, bt_udata->mesg.addr, &udata) < 0)
if(H5B_remove(f, dxpl_id, H5B_ISTORE, bt_udata->mesg.u.chunk.addr, &udata) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, H5B_ITER_ERROR, "unable to remove entry");
break;
} /* end if */
@ -2709,6 +2708,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
hsize_t count[H5O_LAYOUT_NDIMS]; /*element count of hyperslab */
hsize_t size[H5O_LAYOUT_NDIMS]; /*current size of dimensions */
H5S_t *space_chunk = NULL; /*dataspace for a chunk */
hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /*current chunk dimensions */
hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */
int srank; /*current # of dimensions (signed) */
unsigned rank; /*current # of dimensions */
@ -2725,8 +2725,8 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
/* Check args */
assert(f);
assert(layout && H5D_CHUNKED == layout->type);
assert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->addr));
assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->u.chunk.addr));
assert(space);
/* Get necessary properties from property list */
@ -2749,10 +2749,12 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
/* Copy current dimensions */
for(u = 0; u < rank; u++)
size[u] = curr_dims[u];
size[u] = layout->dim[u];
size[u] = layout->u.chunk.dim[u];
/* Create a data space for a chunk & set the extent */
if(NULL == (space_chunk = H5S_create_simple(rank,layout->dim,NULL)))
for(u = 0; u < rank; u++)
chunk_dims[u] = layout->u.chunk.dim[u];
if(NULL == (space_chunk = H5S_create_simple(rank,chunk_dims,NULL)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace");
/*
@ -2760,18 +2762,18 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
* loop through the chunks copying each chunk from the application to the
* chunk cache.
*/
for(u = 0; u < layout->ndims; u++) {
idx_max[u] = (size[u] - 1) / layout->dim[u] + 1;
for(u = 0; u < layout->u.chunk.ndims; u++) {
idx_max[u] = (size[u] - 1) / layout->u.chunk.dim[u] + 1;
idx_cur[u] = 0;
} /* end for */
/* Loop over all chunks */
carry=0;
while(carry==0) {
for(u = 0, naccessed = 1; u < layout->ndims; u++) {
for(u = 0, naccessed = 1; u < layout->u.chunk.ndims; u++) {
/* The location and size of the chunk being accessed */
chunk_offset[u] = idx_cur[u] * (hssize_t)(layout->dim[u]);
sub_size[u] = MIN((idx_cur[u] + 1) * layout->dim[u],
chunk_offset[u] = idx_cur[u] * (hssize_t)(layout->u.chunk.dim[u]);
sub_size[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u],
size[u]) - chunk_offset[u];
naccessed *= sub_size[u];
} /* end for */
@ -2780,8 +2782,8 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
* Figure out what chunks have to be initialized. These are the chunks where the dataspace
* extent boundary is within the chunk
*/
for(u = 0, found = 0; u < layout->ndims - 1; u++) {
end_chunk = chunk_offset[u] + layout->dim[u];
for(u = 0, found = 0; u < layout->u.chunk.ndims - 1; u++) {
end_chunk = chunk_offset[u] + layout->u.chunk.dim[u];
if(end_chunk > size[u]) {
found = 1;
break;
@ -2798,15 +2800,15 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to select space");
for(u = 0; u < rank; u++)
count[u] = MIN((idx_cur[u] + 1) * layout->dim[u], size[u] - chunk_offset[u]);
count[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u] - chunk_offset[u]);
#if defined (H5F_ISTORE_DEBUG)
HDfputs("cache:initialize:offset:[", stdout);
for(u = 0; u < layout->ndims - 1; u++)
for(u = 0; u < layout->u.chunk.ndims - 1; u++)
HDfprintf(stdout, "%s%Hd", u ? ", " : "", chunk_offset[u]);
HDfputs("]", stdout);
HDfputs(":count:[", stdout);
for(u = 0; u < layout->ndims - 1; u++)
for(u = 0; u < layout->u.chunk.ndims - 1; u++)
HDfprintf(stdout, "%s%Hd", u ? ", " : "", count[u]);
HDfputs("]\n", stdout);
#endif
@ -2829,7 +2831,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
} /*found */
/* Increment indices */
for(i = layout->ndims - 1, carry = 1; i >= 0 && carry; --i) {
for(i = layout->u.chunk.ndims - 1, carry = 1; i >= 0 && carry; --i) {
if(++idx_cur[i] >= idx_max[i])
idx_cur[i] = 0;
else
@ -2876,14 +2878,14 @@ H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
/* Check if the B-tree has been created in the file */
if(H5F_addr_defined(layout->addr)) {
if(H5F_addr_defined(layout->u.chunk.addr)) {
/* Iterate through the entries in the cache, checking for the chunks to be deleted */
for (ent=rdcc->head; ent; ent=next) {
/* Get pointer to next node, in case this one is deleted */
next=ent->next;
/* Is the chunk to be deleted this cache entry? */
if(layout->addr==ent->layout.addr)
if(layout->u.chunk.addr==ent->layout.u.chunk.addr)
/* Remove entry without flushing */
if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, ent, FALSE )<0)
HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks");
@ -2894,7 +2896,7 @@ H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout)
udata.mesg = *layout;
/* Delete entire B-tree */
if(H5B_delete(f, dxpl_id, H5B_ISTORE, layout->addr, &udata)<0)
if(H5B_delete(f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTDELETE, 0, "unable to delete chunk B-tree");
} /* end if */
@ -2930,7 +2932,7 @@ H5F_istore_dump_btree(H5F_t *f, hid_t dxpl_id, FILE *stream, unsigned ndims, had
FUNC_ENTER_NOAPI(H5F_istore_dump_btree, FAIL);
HDmemset(&udata, 0, sizeof udata);
udata.mesg.ndims = ndims;
udata.mesg.u.chunk.ndims = ndims;
udata.stream = stream;
if(stream)
HDfprintf(stream, " Address: %a\n",addr);
@ -3034,7 +3036,7 @@ H5F_istore_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int inden
FUNC_ENTER_NOAPI(H5F_istore_debug, FAIL);
HDmemset (&udata, 0, sizeof udata);
udata.mesg.ndims = ndims;
udata.mesg.u.chunk.ndims = ndims;
H5B_debug (f, dxpl_id, addr, stream, indent, fwidth, H5B_ISTORE, &udata);

View File

@ -86,4 +86,11 @@ extern H5D_dxpl_cache_t H5D_def_dxpl_cache;
H5_DLL herr_t H5D_alloc_storage (H5F_t *f, hid_t dxpl_id, H5D_t *dset, H5D_time_alloc_t time_alloc,
hbool_t update_time, hbool_t full_overwrite);
/* Testing functions */
#ifdef H5D_TESTING
H5_DLL herr_t H5D_layout_version_test(hid_t did, unsigned *version);
H5_DLL herr_t H5D_layout_contig_size_test(hid_t did, hsize_t *size);
#endif /* H5D_TESTING */
#endif /*_H5Dpkg_H*/

View File

@ -46,7 +46,7 @@
#define H5D_CRT_CHUNK_DIM_DEF 1
/* Definitions for chunk size */
#define H5D_CRT_CHUNK_SIZE_NAME "chunk_size"
#define H5D_CRT_CHUNK_SIZE_SIZE sizeof(hsize_t[H5O_LAYOUT_NDIMS])
#define H5D_CRT_CHUNK_SIZE_SIZE sizeof(size_t[H5O_LAYOUT_NDIMS])
#define H5D_CRT_CHUNK_SIZE_DEF {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,\
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}
/* Definitions for fill value. size=0 means fill value will be 0 as

View File

@ -227,7 +227,7 @@ H5F_seq_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_i
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "external data read failed");
} else {
/* Pass along the vector of sequences to read */
if((ret_value=H5F_contig_readvv(f, layout->chunk_size, layout->addr,
if((ret_value=H5F_contig_readvv(f, layout->u.contig.size, layout->u.contig.addr,
dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
dxpl_id, buf))<0)
@ -336,7 +336,7 @@ H5F_seq_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "external data write failed");
} else {
/* Pass along the vector of sequences to write */
if ((ret_value=H5F_contig_writevv(f, layout->chunk_size, layout->addr,
if ((ret_value=H5F_contig_writevv(f, layout->u.contig.size, layout->u.contig.addr,
dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
dxpl_id, buf))<0)

116
src/H5Dtest.c Normal file
View File

@ -0,0 +1,116 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by the Board of Trustees of the University of Illinois. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the files COPYING and Copyright.html. COPYING can be found at the root *
* of the source code distribution tree; Copyright.html can be found at the *
* root level of an installed copy of the electronic HDF5 document set and *
* is linked from the top-level documents page. It can also be found at *
* http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
* access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
* Thusdayr, May 27, 2004
*
* Purpose: Dataset testing functions.
*/
#define H5D_PACKAGE /*suppress error about including H5Dpkg */
#define H5D_TESTING /*suppress warning about H5D testing funcs*/
/* Pablo information */
/* (Put before include files to avoid problems with inline functions) */
#define PABLO_MASK H5Dtest_mask
#include "H5private.h" /* Generic Functions */
#include "H5Dpkg.h" /* Datasets */
#include "H5Eprivate.h" /* Error handling */
#include "H5Iprivate.h" /* ID Functions */
/* Interface initialization */
#define INTERFACE_INIT NULL
static int interface_initialize_g = 0;
/*--------------------------------------------------------------------------
NAME
H5D_layout_version_test
PURPOSE
Determine the storage layout version for a dataset's layout information
USAGE
herr_t H5D_layout_version_test(did, version)
hid_t did; IN: Dataset to query
unsigned *version; OUT: Pointer to location to place version info
RETURNS
Non-negative on success, negative on failure
DESCRIPTION
Checks the version of the storage layout information for a dataset.
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
DO NOT USE THIS FUNCTION FOR ANYTHING EXCEPT TESTING
EXAMPLES
REVISION LOG
--------------------------------------------------------------------------*/
herr_t
H5D_layout_version_test(hid_t did, unsigned *version)
{
H5D_t *dset; /* Pointer to dataset to query */
herr_t ret_value=SUCCEED; /* return value */
FUNC_ENTER_NOAPI(H5D_layout_version_test, FAIL);
/* Check args */
if (NULL==(dset=H5I_object_verify(did, H5I_DATASET)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
if(version)
*version=dset->layout.version;
done:
FUNC_LEAVE_NOAPI(ret_value);
} /* H5D_layout_version_test() */
/*--------------------------------------------------------------------------
NAME
H5D_layout_contig_size_test
PURPOSE
Determine the size of a contiguous layout for a dataset's layout information
USAGE
herr_t H5D_layout_contig_size_test(did, size)
hid_t did; IN: Dataset to query
hsize_t *size; OUT: Pointer to location to place size info
RETURNS
Non-negative on success, negative on failure
DESCRIPTION
Checks the size of a contiguous dataset's storage.
GLOBAL VARIABLES
COMMENTS, BUGS, ASSUMPTIONS
DO NOT USE THIS FUNCTION FOR ANYTHING EXCEPT TESTING
EXAMPLES
REVISION LOG
--------------------------------------------------------------------------*/
herr_t
H5D_layout_contig_size_test(hid_t did, hsize_t *size)
{
H5D_t *dset; /* Pointer to dataset to query */
herr_t ret_value=SUCCEED; /* return value */
FUNC_ENTER_NOAPI(H5D_layout_contig_size_test, FAIL);
/* Check args */
if (NULL==(dset=H5I_object_verify(did, H5I_DATASET)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
if(size) {
assert(dset->layout.type==H5D_CONTIGUOUS);
*size=dset->layout.u.contig.size;
} /* end if */
done:
FUNC_LEAVE_NOAPI(ret_value);
} /* H5D_layout_contig_size_test() */

View File

@ -70,7 +70,7 @@ H5F_compact_readvv(H5F_t UNUSED *f, const H5O_layout_t *layout,
FUNC_ENTER_NOAPI(H5F_compact_readvv, FAIL);
/* Use the vectorized memory copy routine to do actual work */
if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,layout->buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0)
if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,layout->u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed");
done:
@ -112,10 +112,10 @@ H5F_compact_writevv(H5F_t UNUSED *f, H5O_layout_t *layout,
FUNC_ENTER_NOAPI(H5F_compact_writevv, FAIL);
/* Use the vectorized memory copy routine to do actual work */
if((ret_value=H5V_memcpyvv(layout->buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0)
if((ret_value=H5V_memcpyvv(layout->u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed");
layout->dirty = TRUE;
layout->u.compact.dirty = TRUE;
done:
FUNC_LEAVE_NOAPI(ret_value);

View File

@ -75,8 +75,6 @@ H5FL_BLK_DEFINE_STATIC(zero_fill);
herr_t
H5F_contig_create(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout)
{
hsize_t size; /* Size of contiguous block of data */
unsigned u; /* Local index variable */
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5F_contig_create, FAIL);
@ -85,14 +83,8 @@ H5F_contig_create(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout)
assert(f);
assert(layout);
/* Compute size */
size=layout->dim[0];
for (u = 1; u < layout->ndims; u++)
size *= layout->dim[u];
assert (size>0);
/* Allocate space for the contiguous data */
if (HADDR_UNDEF==(layout->addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, size)))
if (HADDR_UNDEF==(layout->u.contig.addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, layout->u.contig.size)))
HGOTO_ERROR (H5E_IO, H5E_NOSPACE, FAIL, "unable to reserve file space");
done:
@ -145,8 +137,8 @@ H5F_contig_fill(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout,
assert(f);
assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
assert(layout && H5D_CONTIGUOUS==layout->type);
assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->addr));
assert(H5F_addr_defined(layout->u.contig.addr));
assert(layout->u.contig.size>0);
assert(space);
assert(elmt_size>0);
@ -167,7 +159,7 @@ H5F_contig_fill(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout,
#endif /* H5_HAVE_PARALLEL */
/* Get the number of elements in the dataset's dataspace */
snpoints = H5S_get_simple_extent_npoints(space);
snpoints = H5S_GET_SIMPLE_EXTENT_NPOINTS(space);
assert(snpoints>=0);
H5_ASSIGN_OVERFLOW(npoints,snpoints,hssize_t,size_t);
@ -214,7 +206,7 @@ H5F_contig_fill(H5F_t *f, hid_t dxpl_id, struct H5O_layout_t *layout,
} /* end else */
/* Start at the beginning of the dataset */
addr = layout->addr;
addr = layout->u.contig.addr;
/* Loop through writing the fill value to the dataset */
while (npoints>0) {
@ -290,8 +282,6 @@ done:
herr_t
H5F_contig_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout)
{
hsize_t size; /* Size of contiguous block of data */
unsigned u; /* Local index variable */
herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(H5F_contig_delete, FAIL);
@ -300,17 +290,12 @@ H5F_contig_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout)
assert(f);
assert(layout);
/* Compute size */
size=layout->dim[0];
for (u = 1; u < layout->ndims; u++)
size *= layout->dim[u];
/* Check for overlap with the sieve buffer and reset it */
if (H5F_sieve_overlap_clear(f, dxpl_id, layout->addr, size)<0)
if (H5F_sieve_overlap_clear(f, dxpl_id, layout->u.contig.addr, layout->u.contig.size)<0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to clear sieve buffer");
/* Free the file space for the chunk */
if (H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, layout->addr, size)<0)
if (H5MF_xfree(f, H5FD_MEM_DRAW, dxpl_id, layout->u.contig.addr, layout->u.contig.size)<0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to free object header");
done:

View File

@ -248,11 +248,11 @@ H5F_istore_sizeof_rkey(H5F_t UNUSED *f, const void *_udata)
FUNC_ENTER_NOAPI_NOINIT_NOFUNC(H5F_istore_sizeof_rkey);
assert(udata);
assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS);
assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
nbytes = 4 + /*storage size */
4 + /*filter mask */
udata->mesg.ndims*8; /*dimension indices */
udata->mesg.u.chunk.ndims*8; /*dimension indices */
FUNC_LEAVE_NOAPI(nbytes);
}
@ -375,7 +375,7 @@ H5F_istore_debug_key (FILE *stream, H5F_t UNUSED *f, hid_t UNUSED dxpl_id, int i
"Filter mask:", key->filter_mask);
HDfprintf(stream, "%*s%-*s {", indent, "", fwidth,
"Logical offset:");
for (u=0; u<udata->mesg.ndims; u++)
for (u=0; u<udata->mesg.u.chunk.ndims; u++)
HDfprintf (stream, "%s%Hd", u?", ":"", key->offset[u]);
HDfputs ("}\n", stream);
@ -419,10 +419,10 @@ H5F_istore_cmp2(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, void *_uda
assert(lt_key);
assert(rt_key);
assert(udata);
assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS);
assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
/* Compare the offsets but ignore the other fields */
ret_value = H5V_vector_cmp_s(udata->mesg.ndims, lt_key->offset, rt_key->offset);
ret_value = H5V_vector_cmp_s(udata->mesg.u.chunk.ndims, lt_key->offset, rt_key->offset);
done:
FUNC_LEAVE_NOAPI(ret_value);
@ -472,12 +472,12 @@ H5F_istore_cmp3(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, void *_uda
assert(lt_key);
assert(rt_key);
assert(udata);
assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS);
assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
if (H5V_vector_lt_s(udata->mesg.ndims, udata->key.offset,
if (H5V_vector_lt_s(udata->mesg.u.chunk.ndims, udata->key.offset,
lt_key->offset)) {
ret_value = -1;
} else if (H5V_vector_ge_s(udata->mesg.ndims, udata->key.offset,
} else if (H5V_vector_ge_s(udata->mesg.u.chunk.ndims, udata->key.offset,
rt_key->offset)) {
ret_value = 1;
}
@ -525,7 +525,7 @@ H5F_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
assert(lt_key);
assert(rt_key);
assert(udata);
assert(udata->mesg.ndims > 0 && udata->mesg.ndims < H5O_LAYOUT_NDIMS);
assert(udata->mesg.u.chunk.ndims > 0 && udata->mesg.u.chunk.ndims < H5O_LAYOUT_NDIMS);
assert(addr_p);
/* Allocate new storage */
@ -541,7 +541,7 @@ H5F_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
*/
lt_key->nbytes = udata->key.nbytes;
lt_key->filter_mask = udata->key.filter_mask;
for (u=0; u<udata->mesg.ndims; u++)
for (u=0; u<udata->mesg.u.chunk.ndims; u++)
lt_key->offset[u] = udata->key.offset[u];
/*
@ -551,12 +551,11 @@ H5F_istore_new_node(H5F_t *f, hid_t dxpl_id, H5B_ins_t op,
if (H5B_INS_LEFT != op) {
rt_key->nbytes = 0;
rt_key->filter_mask = 0;
for (u=0; u<udata->mesg.ndims; u++) {
assert (udata->mesg.dim[u] < HSSIZET_MAX);
assert (udata->key.offset[u]+(hssize_t)(udata->mesg.dim[u]) >
for (u=0; u<udata->mesg.u.chunk.ndims; u++) {
assert (udata->key.offset[u]+(hssize_t)(udata->mesg.u.chunk.dim[u]) >
udata->key.offset[u]);
rt_key->offset[u] = udata->key.offset[u] +
(hssize_t)(udata->mesg.dim[u]);
(hssize_t)(udata->mesg.u.chunk.dim[u]);
}
}
@ -610,8 +609,8 @@ H5F_istore_found(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, const void
assert(lt_key);
/* Is this *really* the requested chunk? */
for (u=0; u<udata->mesg.ndims; u++) {
if (udata->key.offset[u] >= lt_key->offset[u]+(hssize_t)(udata->mesg.dim[u]))
for (u=0; u<udata->mesg.u.chunk.ndims; u++) {
if (udata->key.offset[u] >= lt_key->offset[u]+(hssize_t)(udata->mesg.u.chunk.dim[u]))
HGOTO_DONE(FAIL);
}
@ -620,7 +619,7 @@ H5F_istore_found(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, haddr_t addr, const void
udata->key.nbytes = lt_key->nbytes;
udata->key.filter_mask = lt_key->filter_mask;
assert (lt_key->nbytes>0);
for (u = 0; u < udata->mesg.ndims; u++)
for (u = 0; u < udata->mesg.u.chunk.ndims; u++)
udata->key.offset[u] = lt_key->offset[u];
done:
@ -696,7 +695,7 @@ H5F_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key,
assert("HDF5 INTERNAL ERROR -- see rpm" && 0);
HGOTO_ERROR(H5E_STORAGE, H5E_UNSUPPORTED, H5B_INS_ERROR, "internal error");
} else if (H5V_vector_eq_s (udata->mesg.ndims,
} else if (H5V_vector_eq_s (udata->mesg.u.chunk.ndims,
udata->key.offset, lt_key->offset) &&
lt_key->nbytes>0) {
/*
@ -735,20 +734,20 @@ H5F_istore_insert(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *_lt_key,
ret_value = H5B_INS_NOOP;
}
} else if (H5V_hyper_disjointp(udata->mesg.ndims,
lt_key->offset, udata->mesg.dim,
udata->key.offset, udata->mesg.dim)) {
assert(H5V_hyper_disjointp(udata->mesg.ndims,
rt_key->offset, udata->mesg.dim,
udata->key.offset, udata->mesg.dim));
} else if (H5V_hyper_disjointp(udata->mesg.u.chunk.ndims,
lt_key->offset, udata->mesg.u.chunk.dim,
udata->key.offset, udata->mesg.u.chunk.dim)) {
assert(H5V_hyper_disjointp(udata->mesg.u.chunk.ndims,
rt_key->offset, udata->mesg.u.chunk.dim,
udata->key.offset, udata->mesg.u.chunk.dim));
/*
* Split this node, inserting the new new node to the right of the
* current node. The MD_KEY is where the split occurs.
*/
md_key->nbytes = udata->key.nbytes;
md_key->filter_mask = udata->key.filter_mask;
for (u=0; u<udata->mesg.ndims; u++) {
assert(0 == udata->key.offset[u] % udata->mesg.dim[u]);
for (u=0; u<udata->mesg.u.chunk.ndims; u++) {
assert(0 == udata->key.offset[u] % udata->mesg.u.chunk.dim[u]);
md_key->offset[u] = udata->key.offset[u];
}
@ -847,7 +846,7 @@ H5F_istore_iter_dump (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, hadd
}
HDfprintf(bt_udata->stream, " 0x%08x %8Zu %10a [",
lt_key->filter_mask, lt_key->nbytes, addr);
for (u=0; u<bt_udata->mesg.ndims; u++)
for (u=0; u<bt_udata->mesg.u.chunk.ndims; u++)
HDfprintf(bt_udata->stream, "%s%Hd", u?", ":"", lt_key->offset[u]);
HDfputs("]\n", bt_udata->stream);
@ -936,7 +935,7 @@ H5F_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
udata.key.filter_mask = 0;
udata.addr = HADDR_UNDEF;
udata.key.nbytes = ent->chunk_size;
for (u=0; u<ent->layout.ndims; u++)
for (u=0; u<ent->layout.u.chunk.ndims; u++)
udata.key.offset[u] = ent->offset[u];
alloc = ent->alloc_size;
@ -974,7 +973,7 @@ H5F_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
* Create the chunk it if it doesn't exist, or reallocate the chunk if
* its size changed. Then write the data into the file.
*/
if (H5B_insert(f, dxpl_id, H5B_ISTORE, ent->layout.addr, &udata)<0)
if (H5B_insert(f, dxpl_id, H5B_ISTORE, ent->layout.u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk");
if (H5F_block_write(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, dxpl_id, buf)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
@ -1365,22 +1364,22 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
/* Get the chunk's size */
assert(layout->chunk_size>0);
H5_ASSIGN_OVERFLOW(chunk_size,layout->chunk_size,hsize_t,size_t);
assert(layout->u.chunk.size>0);
H5_ASSIGN_OVERFLOW(chunk_size,layout->u.chunk.size,hsize_t,size_t);
/* Search for the chunk in the cache */
if (rdcc->nslots>0) {
for (u=0, temp_idx=0; u<layout->ndims; u++) {
for (u=0, temp_idx=0; u<layout->u.chunk.ndims; u++) {
temp_idx += offset[u];
temp_idx *= layout->dim[u];
temp_idx *= layout->u.chunk.dim[u];
}
temp_idx += (hsize_t)(layout->addr);
temp_idx += (hsize_t)(layout->u.chunk.addr);
idx=H5F_HASH(f,temp_idx);
ent = rdcc->slot[idx];
if (ent && layout->ndims==ent->layout.ndims &&
H5F_addr_eq(layout->addr, ent->layout.addr)) {
for (u=0, found=TRUE; u<ent->layout.ndims; u++) {
if (ent && layout->u.chunk.ndims==ent->layout.u.chunk.ndims &&
H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
for (u=0, found=TRUE; u<ent->layout.u.chunk.ndims; u++) {
if (offset[u]!=ent->offset[u]) {
found = FALSE;
break;
@ -1508,7 +1507,7 @@ H5F_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, con
ent->alloc_size = chunk_size;
H5O_copy(H5O_LAYOUT_ID, layout, &ent->layout);
H5O_copy(H5O_PLINE_ID, pline, &ent->pline);
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
ent->offset[u] = offset[u];
ent->rd_count = chunk_size;
ent->wr_count = chunk_size;
@ -1645,10 +1644,10 @@ H5F_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
x.dirty = TRUE;
H5O_copy (H5O_LAYOUT_ID, layout, &x.layout);
H5O_copy (H5O_PLINE_ID, pline, &x.pline);
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
x.offset[u] = offset[u];
assert(layout->chunk_size>0);
H5_ASSIGN_OVERFLOW(x.chunk_size,layout->chunk_size,hsize_t,size_t);
assert(layout->u.chunk.size>0);
H5_ASSIGN_OVERFLOW(x.chunk_size,layout->u.chunk.size,hsize_t,size_t);
x.alloc_size = x.chunk_size;
x.chunk = chunk;
@ -1709,7 +1708,7 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
assert(f);
assert(dxpl_cache);
assert(layout && H5D_CHUNKED==layout->type);
assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS);
assert(layout->u.chunk.ndims>0 && layout->u.chunk.ndims<=H5O_LAYOUT_NDIMS);
assert(dcpl_cache);
assert(chunk_len_arr);
assert(chunk_offset_arr);
@ -1718,19 +1717,19 @@ H5F_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
assert(buf);
#ifndef NDEBUG
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */
#endif
/* Get the address of this chunk on disk */
#ifdef QAK
HDfprintf(stderr,"%s: chunk_coords={",FUNC);
for(u=0; u<layout->ndims; u++)
HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->ndims-1) ? ", " : "}\n"));
for(u=0; u<layout->u.chunk.ndims; u++)
HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n"));
#endif /* QAK */
chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata);
#ifdef QAK
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->chunk_size);
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size);
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]);
@ -1743,9 +1742,9 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* for the chunk has been defined, then don't load the chunk into the
* cache, just write the data to it directly.
*/
if (layout->chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 &&
if (layout->u.chunk.size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 &&
chunk_addr!=HADDR_UNDEF) {
if ((ret_value=H5F_contig_readvv(f, layout->chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
if ((ret_value=H5F_contig_readvv(f, (hsize_t)layout->u.chunk.size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "unable to read raw data to file");
} /* end if */
else {
@ -1813,7 +1812,7 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
assert(f);
assert(dxpl_cache);
assert(layout && H5D_CHUNKED==layout->type);
assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS);
assert(layout->u.chunk.ndims>0 && layout->u.chunk.ndims<=H5O_LAYOUT_NDIMS);
assert(dcpl_cache);
assert(chunk_len_arr);
assert(chunk_offset_arr);
@ -1822,19 +1821,19 @@ H5F_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
assert(buf);
#ifndef NDEBUG
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
assert(chunk_coords[u]>=0); /*negative coordinates not supported (yet) */
#endif
/* Get the address of this chunk on disk */
#ifdef QAK
HDfprintf(stderr,"%s: chunk_coords={",FUNC);
for(u=0; u<layout->ndims; u++)
HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->ndims-1) ? ", " : "}\n"));
for(u=0; u<layout->u.chunk.ndims; u++)
HDfprintf(stderr,"%Hd%s",chunk_coords[u],(u<(layout->u.chunk.ndims-1) ? ", " : "}\n"));
#endif /* QAK */
chunk_addr=H5F_istore_get_addr(f, dxpl_id, layout, chunk_coords, &udata);
#ifdef QAK
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->chunk_size);
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Hu\n",FUNC,chunk_addr,layout->u.chunk.size);
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]);
@ -1852,14 +1851,14 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* writing to other elements in the same chunk. Do a direct
* write-through of only the elements requested.
*/
if ((layout->chunk_size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 && chunk_addr!=HADDR_UNDEF)
if ((layout->u.chunk.size>f->shared->rdcc_nbytes && dcpl_cache->pline.nused==0 && chunk_addr!=HADDR_UNDEF)
|| (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & f->shared->flags))) {
#ifdef H5_HAVE_PARALLEL
/* Additional sanity check when operating in parallel */
if (chunk_addr==HADDR_UNDEF || dcpl_cache->pline.nused>0)
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk");
#endif /* H5_HAVE_PARALLEL */
if ((ret_value=H5F_contig_writevv(f, layout->chunk_size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
if ((ret_value=H5F_contig_writevv(f, (hsize_t)layout->u.chunk.size, chunk_addr, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, dxpl_id, buf))<0)
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
} /* end if */
else {
@ -1872,7 +1871,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
* Lock the chunk, copy from application to chunk, then unlock the
* chunk.
*/
if(chunk_max_nseq==1 && chunk_len_arr[0] == layout->chunk_size)
if(chunk_max_nseq==1 && chunk_len_arr[0] == layout->u.chunk.size)
relax = TRUE;
else
relax = FALSE;
@ -1933,14 +1932,14 @@ H5F_istore_create(H5F_t *f, hid_t dxpl_id, H5O_layout_t *layout /*out */ )
/* Check args */
assert(f);
assert(layout && H5D_CHUNKED == layout->type);
assert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS);
assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
#ifndef NDEBUG
for (u = 0; u < layout->ndims; u++)
assert(layout->dim[u] > 0);
for (u = 0; u < layout->u.chunk.ndims; u++)
assert(layout->u.chunk.dim[u] > 0);
#endif
udata.mesg.ndims = layout->ndims;
if (H5B_create(f, dxpl_id, H5B_ISTORE, &udata, &(layout->addr)/*out*/) < 0)
udata.mesg.u.chunk.ndims = layout->u.chunk.ndims;
if (H5B_create(f, dxpl_id, H5B_ISTORE, &udata, &(layout->u.chunk.addr)/*out*/) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "can't create B-tree");
done:
@ -1976,7 +1975,7 @@ H5F_istore_allocated(H5F_t *f, hid_t dxpl_id, unsigned ndims, haddr_t addr)
FUNC_ENTER_NOAPI(H5F_istore_allocated, 0);
HDmemset(&udata, 0, sizeof udata);
udata.mesg.ndims = ndims;
udata.mesg.u.chunk.ndims = ndims;
if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_iter_allocated, addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree");
@ -2018,20 +2017,20 @@ H5F_istore_get_addr(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
FUNC_ENTER_NOAPI_NOINIT(H5F_istore_get_addr);
assert(f);
assert(layout && (layout->ndims > 0));
assert(layout && (layout->u.chunk.ndims > 0));
assert(offset);
/* Check for udata struct to return */
udata = (_udata!=NULL ? _udata : &tmp_udata);
/* Initialize the information about the chunk we are looking for */
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
udata->key.offset[u] = offset[u];
udata->mesg = *layout;
udata->addr = HADDR_UNDEF;
/* Go get the chunk information */
if (H5B_find (f, dxpl_id, H5B_ISTORE, layout->addr, udata)<0) {
if (H5B_find (f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, udata)<0) {
H5E_clear(NULL);
HGOTO_ERROR(H5E_BTREE,H5E_NOTFOUND,HADDR_UNDEF,"Can't locate chunk info");
@ -2189,8 +2188,8 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
assert(f);
assert(space_dim);
assert(layout && H5D_CHUNKED==layout->type);
assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->addr));
assert(layout->u.chunk.ndims>0 && layout->u.chunk.ndims<=H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->u.chunk.addr));
assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
assert(dc_plist);
@ -2230,9 +2229,9 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
* Setup indice to go through all chunks. (Future improvement
* should allocate only chunks that have no file space assigned yet.
*/
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
chunk_offset[u] = 0;
chunk_size = layout->chunk_size;
chunk_size = layout->u.chunk.size;
/* Check the dataset's fill-value status */
if (H5P_is_fill_value_defined(&fill, &fill_status) < 0)
@ -2297,12 +2296,12 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
/* Look for chunk in cache */
for(ent = rdcc->head; ent && !chunk_exists; ent = ent->next) {
/* Make certain we are dealing with the correct B-tree, etc */
if (layout->ndims==ent->layout.ndims &&
H5F_addr_eq(layout->addr, ent->layout.addr)) {
if (layout->u.chunk.ndims==ent->layout.u.chunk.ndims &&
H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
/* Assume a match */
chunk_exists = 1;
for(u = 0; u < layout->ndims && chunk_exists; u++) {
for(u = 0; u < layout->u.chunk.ndims && chunk_exists; u++) {
if(ent->offset[u] != chunk_offset[u])
chunk_exists = 0; /* Reset if no match */
} /* end for */
@ -2317,11 +2316,11 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
udata.addr = HADDR_UNDEF;
H5_CHECK_OVERFLOW(chunk_size,hsize_t,size_t);
udata.key.nbytes = (size_t)chunk_size;
for (u=0; u<layout->ndims; u++)
for (u=0; u<layout->u.chunk.ndims; u++)
udata.key.offset[u] = chunk_offset[u];
/* Allocate the chunk with all processes */
if (H5B_insert(f, dxpl_id, H5B_ISTORE, layout->addr, &udata)<0)
if (H5B_insert(f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk");
/* Check if fill values should be written to blocks */
@ -2350,8 +2349,8 @@ H5F_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5O_layout_t *layout,
} /* end if */
/* Increment indices */
for (i=layout->ndims-1, carry=1; i>=0 && carry; --i) {
chunk_offset[i] += layout->dim[i];
for (i=layout->u.chunk.ndims-1, carry=1; i>=0 && carry; --i) {
chunk_offset[i] += layout->u.chunk.dim[i];
if (chunk_offset[i] >= (hssize_t)(space_dim[i]))
chunk_offset[i] = 0;
else
@ -2498,8 +2497,8 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
/* Check args */
assert(f);
assert(layout && H5D_CHUNKED == layout->type);
assert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->addr));
assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->u.chunk.addr));
assert(space);
/* Go get the rank & dimensions */
@ -2515,10 +2514,10 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
next = ent->next;
/* Make certain we are dealing with the correct B-tree, etc */
if (layout->ndims==ent->layout.ndims &&
H5F_addr_eq(layout->addr, ent->layout.addr)) {
if (layout->u.chunk.ndims==ent->layout.u.chunk.ndims &&
H5F_addr_eq(layout->u.chunk.addr, ent->layout.u.chunk.addr)) {
found = 0;
for(u = 0; u < ent->layout.ndims - 1; u++) {
for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) {
if((hsize_t)ent->offset[u] > curr_dims[u]) {
found = 1;
break;
@ -2529,7 +2528,7 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
if(found) {
#if defined (H5F_ISTORE_DEBUG)
HDfputs("cache:remove:[", stdout);
for(u = 0; u < ent->layout.ndims - 1; u++) {
for(u = 0; u < ent->layout.u.chunk.ndims - 1; u++) {
HDfprintf(stdout, "%s%Hd", u ? ", " : "", ent->offset[u]);
}
HDfputs("]\n", stdout);
@ -2548,13 +2547,13 @@ H5F_istore_prune_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
HDmemset(&udata, 0, sizeof udata);
udata.stream = stdout;
udata.mesg.addr = layout->addr;
udata.mesg.ndims = layout->ndims;
for(u = 0; u < udata.mesg.ndims; u++)
udata.mesg.dim[u] = layout->dim[u];
udata.mesg.u.chunk.addr = layout->u.chunk.addr;
udata.mesg.u.chunk.ndims = layout->u.chunk.ndims;
for(u = 0; u < udata.mesg.u.chunk.ndims; u++)
udata.mesg.u.chunk.dim[u] = layout->u.chunk.dim[u];
udata.dims = curr_dims;
if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_prune_extent, layout->addr, &udata) < 0)
if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5F_istore_prune_extent, layout->u.chunk.addr, &udata) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over B-tree");
done:
@ -2596,11 +2595,11 @@ H5F_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr_t UNUSED a
FUNC_ENTER_NOAPI_NOINIT(H5F_istore_prune_extent);
/* Figure out what chunks are no longer in use for the specified extent and release them */
for(u = 0; u < bt_udata->mesg.ndims - 1; u++)
for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++)
if((hsize_t)lt_key->offset[u] > bt_udata->dims[u]) {
#if defined (H5F_ISTORE_DEBUG)
HDfputs("b-tree:remove:[", bt_udata->stream);
for(u = 0; u < bt_udata->mesg.ndims - 1; u++) {
for(u = 0; u < bt_udata->mesg.u.chunk.ndims - 1; u++) {
HDfprintf(bt_udata->stream, "%s%Hd", u ? ", " : "",
lt_key->offset[u]);
}
@ -2612,7 +2611,7 @@ H5F_istore_prune_extent(H5F_t *f, hid_t dxpl_id, void *_lt_key, haddr_t UNUSED a
udata.mesg = bt_udata->mesg;
/* Remove */
if(H5B_remove(f, dxpl_id, H5B_ISTORE, bt_udata->mesg.addr, &udata) < 0)
if(H5B_remove(f, dxpl_id, H5B_ISTORE, bt_udata->mesg.u.chunk.addr, &udata) < 0)
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, H5B_ITER_ERROR, "unable to remove entry");
break;
} /* end if */
@ -2709,6 +2708,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
hsize_t count[H5O_LAYOUT_NDIMS]; /*element count of hyperslab */
hsize_t size[H5O_LAYOUT_NDIMS]; /*current size of dimensions */
H5S_t *space_chunk = NULL; /*dataspace for a chunk */
hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /*current chunk dimensions */
hsize_t curr_dims[H5O_LAYOUT_NDIMS]; /*current dataspace dimensions */
int srank; /*current # of dimensions (signed) */
unsigned rank; /*current # of dimensions */
@ -2725,8 +2725,8 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
/* Check args */
assert(f);
assert(layout && H5D_CHUNKED == layout->type);
assert(layout->ndims > 0 && layout->ndims <= H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->addr));
assert(layout->u.chunk.ndims > 0 && layout->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(layout->u.chunk.addr));
assert(space);
/* Get necessary properties from property list */
@ -2749,10 +2749,12 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
/* Copy current dimensions */
for(u = 0; u < rank; u++)
size[u] = curr_dims[u];
size[u] = layout->dim[u];
size[u] = layout->u.chunk.dim[u];
/* Create a data space for a chunk & set the extent */
if(NULL == (space_chunk = H5S_create_simple(rank,layout->dim,NULL)))
for(u = 0; u < rank; u++)
chunk_dims[u] = layout->u.chunk.dim[u];
if(NULL == (space_chunk = H5S_create_simple(rank,chunk_dims,NULL)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace");
/*
@ -2760,18 +2762,18 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
* loop through the chunks copying each chunk from the application to the
* chunk cache.
*/
for(u = 0; u < layout->ndims; u++) {
idx_max[u] = (size[u] - 1) / layout->dim[u] + 1;
for(u = 0; u < layout->u.chunk.ndims; u++) {
idx_max[u] = (size[u] - 1) / layout->u.chunk.dim[u] + 1;
idx_cur[u] = 0;
} /* end for */
/* Loop over all chunks */
carry=0;
while(carry==0) {
for(u = 0, naccessed = 1; u < layout->ndims; u++) {
for(u = 0, naccessed = 1; u < layout->u.chunk.ndims; u++) {
/* The location and size of the chunk being accessed */
chunk_offset[u] = idx_cur[u] * (hssize_t)(layout->dim[u]);
sub_size[u] = MIN((idx_cur[u] + 1) * layout->dim[u],
chunk_offset[u] = idx_cur[u] * (hssize_t)(layout->u.chunk.dim[u]);
sub_size[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u],
size[u]) - chunk_offset[u];
naccessed *= sub_size[u];
} /* end for */
@ -2780,8 +2782,8 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
* Figure out what chunks have to be initialized. These are the chunks where the dataspace
* extent boundary is within the chunk
*/
for(u = 0, found = 0; u < layout->ndims - 1; u++) {
end_chunk = chunk_offset[u] + layout->dim[u];
for(u = 0, found = 0; u < layout->u.chunk.ndims - 1; u++) {
end_chunk = chunk_offset[u] + layout->u.chunk.dim[u];
if(end_chunk > size[u]) {
found = 1;
break;
@ -2798,15 +2800,15 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to select space");
for(u = 0; u < rank; u++)
count[u] = MIN((idx_cur[u] + 1) * layout->dim[u], size[u] - chunk_offset[u]);
count[u] = MIN((idx_cur[u] + 1) * layout->u.chunk.dim[u], size[u] - chunk_offset[u]);
#if defined (H5F_ISTORE_DEBUG)
HDfputs("cache:initialize:offset:[", stdout);
for(u = 0; u < layout->ndims - 1; u++)
for(u = 0; u < layout->u.chunk.ndims - 1; u++)
HDfprintf(stdout, "%s%Hd", u ? ", " : "", chunk_offset[u]);
HDfputs("]", stdout);
HDfputs(":count:[", stdout);
for(u = 0; u < layout->ndims - 1; u++)
for(u = 0; u < layout->u.chunk.ndims - 1; u++)
HDfprintf(stdout, "%s%Hd", u ? ", " : "", count[u]);
HDfputs("]\n", stdout);
#endif
@ -2829,7 +2831,7 @@ H5F_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
} /*found */
/* Increment indices */
for(i = layout->ndims - 1, carry = 1; i >= 0 && carry; --i) {
for(i = layout->u.chunk.ndims - 1, carry = 1; i >= 0 && carry; --i) {
if(++idx_cur[i] >= idx_max[i])
idx_cur[i] = 0;
else
@ -2876,14 +2878,14 @@ H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout)
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
/* Check if the B-tree has been created in the file */
if(H5F_addr_defined(layout->addr)) {
if(H5F_addr_defined(layout->u.chunk.addr)) {
/* Iterate through the entries in the cache, checking for the chunks to be deleted */
for (ent=rdcc->head; ent; ent=next) {
/* Get pointer to next node, in case this one is deleted */
next=ent->next;
/* Is the chunk to be deleted this cache entry? */
if(layout->addr==ent->layout.addr)
if(layout->u.chunk.addr==ent->layout.u.chunk.addr)
/* Remove entry without flushing */
if (H5F_istore_preempt(f, &dxpl_cache, dxpl_id, ent, FALSE )<0)
HGOTO_ERROR (H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks");
@ -2894,7 +2896,7 @@ H5F_istore_delete(H5F_t *f, hid_t dxpl_id, const struct H5O_layout_t *layout)
udata.mesg = *layout;
/* Delete entire B-tree */
if(H5B_delete(f, dxpl_id, H5B_ISTORE, layout->addr, &udata)<0)
if(H5B_delete(f, dxpl_id, H5B_ISTORE, layout->u.chunk.addr, &udata)<0)
HGOTO_ERROR(H5E_IO, H5E_CANTDELETE, 0, "unable to delete chunk B-tree");
} /* end if */
@ -2930,7 +2932,7 @@ H5F_istore_dump_btree(H5F_t *f, hid_t dxpl_id, FILE *stream, unsigned ndims, had
FUNC_ENTER_NOAPI(H5F_istore_dump_btree, FAIL);
HDmemset(&udata, 0, sizeof udata);
udata.mesg.ndims = ndims;
udata.mesg.u.chunk.ndims = ndims;
udata.stream = stream;
if(stream)
HDfprintf(stream, " Address: %a\n",addr);
@ -3034,7 +3036,7 @@ H5F_istore_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr, FILE * stream, int inden
FUNC_ENTER_NOAPI(H5F_istore_debug, FAIL);
HDmemset (&udata, 0, sizeof udata);
udata.mesg.ndims = ndims;
udata.mesg.u.chunk.ndims = ndims;
H5B_debug (f, dxpl_id, addr, stream, indent, fwidth, H5B_ISTORE, &udata);

View File

@ -227,7 +227,7 @@ H5F_seq_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_i
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "external data read failed");
} else {
/* Pass along the vector of sequences to read */
if((ret_value=H5F_contig_readvv(f, layout->chunk_size, layout->addr,
if((ret_value=H5F_contig_readvv(f, layout->u.contig.size, layout->u.contig.addr,
dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
dxpl_id, buf))<0)
@ -336,7 +336,7 @@ H5F_seq_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "external data write failed");
} else {
/* Pass along the vector of sequences to write */
if ((ret_value=H5F_contig_writevv(f, layout->chunk_size, layout->addr,
if ((ret_value=H5F_contig_writevv(f, layout->u.contig.size, layout->u.contig.addr,
dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
dxpl_id, buf))<0)

View File

@ -200,7 +200,7 @@ H5O_attr_decode(H5F_t *f, hid_t dxpl_id, const uint8_t *p, H5O_shared_t UNUSED *
p += attr->ds_size;
/* Compute the size of the data */
H5_ASSIGN_OVERFLOW(attr->data_size,H5S_get_simple_extent_npoints(attr->ds)*H5T_get_size(attr->dt),hsize_t,size_t);
H5_ASSIGN_OVERFLOW(attr->data_size,H5S_GET_SIMPLE_EXTENT_NPOINTS(attr->ds)*H5T_get_size(attr->dt),hsize_t,size_t);
/* Go get the data */
if(attr->data_size) {

View File

@ -58,9 +58,11 @@ const H5O_class_t H5O_LAYOUT[1] = {{
}};
/* For forward and backward compatibility. Version is 1 when space is
* allocated; 2 when space is delayed for allocation. */
* allocated; 2 when space is delayed for allocation; 3 is default now and
* is revised to just store information needed for each storage type. */
#define H5O_LAYOUT_VERSION_1 1
#define H5O_LAYOUT_VERSION_2 2
#define H5O_LAYOUT_VERSION_3 3
/* Interface initialization */
#define PABLO_MASK H5O_layout_mask
@ -92,13 +94,18 @@ H5FL_DEFINE(H5O_layout_t);
* Added version number 2 case depends on if space has been allocated
* at the moment when layout header message is updated.
*
* Quincey Koziol, 2004-5-21
* Added version number 3 case to straighten out problems with contiguous
* layout's sizes (was encoding them as 4-byte values when they were
* really n-byte values (where n usually is 8)) and additionally clean up
* the information written out.
*
*-------------------------------------------------------------------------
*/
static void *
H5O_layout_decode(H5F_t *f, hid_t UNUSED dxpl_id, const uint8_t *p, H5O_shared_t UNUSED *sh)
{
H5O_layout_t *mesg = NULL;
int version;
unsigned u;
void *ret_value; /* Return value */
@ -114,44 +121,108 @@ H5O_layout_decode(H5F_t *f, hid_t UNUSED dxpl_id, const uint8_t *p, H5O_shared_t
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
/* Version. 1 when space allocated; 2 when space allocation is delayed */
version = *p++;
if (version!=H5O_LAYOUT_VERSION_1 && version!=H5O_LAYOUT_VERSION_2)
mesg->version = *p++;
if (mesg->version<H5O_LAYOUT_VERSION_1 || mesg->version>H5O_LAYOUT_VERSION_3)
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for layout message");
/* Dimensionality */
mesg->ndims = *p++;
if (mesg->ndims>H5O_LAYOUT_NDIMS)
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large");
if(mesg->version < H5O_LAYOUT_VERSION_3) {
unsigned ndims; /* Num dimensions in chunk */
/* Layout class */
mesg->type = *p++;
assert(H5D_CONTIGUOUS == mesg->type || H5D_CHUNKED == mesg->type || H5D_COMPACT == mesg->type);
/* Dimensionality */
ndims = *p++;
if (ndims>H5O_LAYOUT_NDIMS)
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large");
/* Reserved bytes */
p += 5;
/* Layout class */
mesg->type = *p++;
assert(H5D_CONTIGUOUS == mesg->type || H5D_CHUNKED == mesg->type || H5D_COMPACT == mesg->type);
/* Address */
if(mesg->type!=H5D_COMPACT)
H5F_addr_decode(f, &p, &(mesg->addr));
/* Reserved bytes */
p += 5;
/* Read the size */
for (u = 0; u < mesg->ndims; u++)
UINT32DECODE(p, mesg->dim[u]);
/* Address */
if(mesg->type==H5D_CONTIGUOUS)
H5F_addr_decode(f, &p, &(mesg->u.contig.addr));
else if(mesg->type==H5D_CHUNKED)
H5F_addr_decode(f, &p, &(mesg->u.chunk.addr));
if(mesg->type == H5D_COMPACT) {
UINT32DECODE(p, mesg->size);
if(mesg->size > 0) {
if(NULL==(mesg->buf=H5MM_malloc(mesg->size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for fill value");
HDmemcpy(mesg->buf, p, mesg->size);
p += mesg->size;
/* Read the size */
if(mesg->type!=H5D_CHUNKED) {
size_t temp_dim[H5O_LAYOUT_NDIMS];
for (u = 0; u < ndims; u++)
UINT32DECODE(p, temp_dim[u]);
/* Don't compute size of contiguous storage here, due to possible
* truncation of the dimension sizes when they were stored in this
* version of the layout message. Compute the contiguous storage
* size in the dataset code, where we've got the dataspace
* information available also. - QAK 5/26/04
*/
} /* end if */
else {
mesg->u.chunk.ndims=ndims;
for (u = 0; u < ndims; u++)
UINT32DECODE(p, mesg->u.chunk.dim[u]);
/* Compute chunk size */
for (u=1, mesg->u.chunk.size=mesg->u.chunk.dim[0]; u<ndims; u++)
mesg->u.chunk.size *= mesg->u.chunk.dim[u];
} /* end if */
if(mesg->type == H5D_COMPACT) {
UINT32DECODE(p, mesg->u.compact.size);
if(mesg->u.compact.size > 0) {
if(NULL==(mesg->u.compact.buf=H5MM_malloc(mesg->u.compact.size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for compact data buffer");
HDmemcpy(mesg->u.compact.buf, p, mesg->u.compact.size);
p += mesg->u.compact.size;
}
}
}
else if(mesg->type == H5D_CHUNKED || mesg->type == H5D_CONTIGUOUS) {
/* Compute chunk size */
for (u=1, mesg->chunk_size=mesg->dim[0]; u<mesg->ndims; u++)
mesg->chunk_size *= mesg->dim[u];
} /* end if */
else {
/* Layout class */
mesg->type = *p++;
/* Interpret the rest of the message according to the layout class */
switch(mesg->type) {
case H5D_CONTIGUOUS:
H5F_addr_decode(f, &p, &(mesg->u.contig.addr));
H5F_DECODE_LENGTH(f, p, mesg->u.contig.size);
break;
case H5D_CHUNKED:
/* Dimensionality */
mesg->u.chunk.ndims = *p++;
if (mesg->u.chunk.ndims>H5O_LAYOUT_NDIMS)
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "dimensionality is too large");
/* B-tree address */
H5F_addr_decode(f, &p, &(mesg->u.chunk.addr));
/* Chunk dimensions */
for (u = 0; u < mesg->u.chunk.ndims; u++)
UINT32DECODE(p, mesg->u.chunk.dim[u]);
/* Compute chunk size */
for (u=1, mesg->u.chunk.size=mesg->u.chunk.dim[0]; u<mesg->u.chunk.ndims; u++)
mesg->u.chunk.size *= mesg->u.chunk.dim[u];
break;
case H5D_COMPACT:
UINT16DECODE(p, mesg->u.compact.size);
if(mesg->u.compact.size > 0) {
if(NULL==(mesg->u.compact.buf=H5MM_malloc(mesg->u.compact.size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for compact data buffer");
HDmemcpy(mesg->u.compact.buf, p, mesg->u.compact.size);
p += mesg->u.compact.size;
} /* end if */
break;
default:
HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "Invalid layout class");
} /* end switch */
} /* end else */
/* Set return value */
ret_value=mesg;
@ -175,6 +246,10 @@ done:
* Programmer: Robb Matzke
* Wednesday, October 8, 1997
*
* Note:
* Quincey Koziol, 2004-5-21
* We write out version 3 messages by default now.
*
* Modifications:
* Robb Matzke, 1998-07-20
* Rearranged the message to add a version number at the beginning.
@ -183,6 +258,12 @@ done:
* Added version number 2 case depends on if space has been allocated
* at the moment when layout header message is updated.
*
* Quincey Koziol, 2004-5-21
* Added version number 3 case to straighten out problems with contiguous
* layout's sizes (was encoding them as 4-byte values when they were
* really n-byte values (where n usually is 8)) and additionally clean up
* the information written out.
*
*-------------------------------------------------------------------------
*/
static herr_t
@ -197,46 +278,50 @@ H5O_layout_encode(H5F_t *f, uint8_t *p, const void *_mesg)
/* check args */
assert(f);
assert(mesg);
assert(mesg->ndims > 0 && mesg->ndims <= H5O_LAYOUT_NDIMS);
assert(p);
/* Version: 1 when space allocated; 2 when space allocation is delayed */
if(mesg->type==H5D_CONTIGUOUS) {
if(mesg->addr==HADDR_UNDEF)
*p++ = H5O_LAYOUT_VERSION_2;
else
*p++ = H5O_LAYOUT_VERSION_1;
} else if(mesg->type==H5D_COMPACT) {
*p++ = H5O_LAYOUT_VERSION_2;
} else
*p++ = H5O_LAYOUT_VERSION_1;
/* Version 3 by default now. */
*p++ = H5O_LAYOUT_VERSION_3;
/* number of dimensions */
*p++ = mesg->ndims;
/* layout class */
/* Layout class */
*p++ = mesg->type;
/* reserved bytes should be zero */
for (u=0; u<5; u++)
*p++ = 0;
/* Write out layout class specific information */
switch(mesg->type) {
case H5D_CONTIGUOUS:
H5F_addr_encode(f, &p, mesg->u.contig.addr);
H5F_ENCODE_LENGTH(f, p, mesg->u.contig.size);
break;
/* data or B-tree address */
if(mesg->type!=H5D_COMPACT)
H5F_addr_encode(f, &p, mesg->addr);
case H5D_CHUNKED:
/* Number of dimensions */
assert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
*p++ = mesg->u.chunk.ndims;
/* dimension size */
for (u = 0; u < mesg->ndims; u++)
UINT32ENCODE(p, mesg->dim[u]);
/* B-tree address */
H5F_addr_encode(f, &p, mesg->u.chunk.addr);
/* Dimension sizes */
for (u = 0; u < mesg->u.chunk.ndims; u++)
UINT32ENCODE(p, mesg->u.chunk.dim[u]);
break;
case H5D_COMPACT:
/* Size of raw data */
UINT16ENCODE(p, mesg->u.compact.size);
/* Raw data */
if(mesg->u.compact.size>0 && mesg->u.compact.buf) {
HDmemcpy(p, mesg->u.compact.buf, mesg->u.compact.size);
p += mesg->u.compact.size;
} /* end if */
break;
default:
HGOTO_ERROR(H5E_OHDR, H5E_CANTENCODE, FAIL, "Invalid layout class");
break;
} /* end switch */
if(mesg->type==H5D_COMPACT) {
UINT32ENCODE(p, mesg->size);
if(mesg->size>0 && mesg->buf) {
HDmemcpy(p, mesg->buf, mesg->size);
p += mesg->size;
}
}
done:
FUNC_LEAVE_NOAPI(ret_value);
}
@ -279,11 +364,11 @@ H5O_layout_copy(const void *_mesg, void *_dest)
/* Deep copy the buffer for compact datasets also */
if(mesg->type==H5D_COMPACT) {
/* Allocate memory for the raw data */
if (NULL==(dest->buf=H5MM_malloc(dest->size)))
if (NULL==(dest->u.compact.buf=H5MM_malloc(dest->u.compact.size)))
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "unable to allocate memory for compact dataset");
/* Copy over the raw data */
HDmemcpy(dest->buf,mesg->buf,dest->size);
HDmemcpy(dest->u.compact.buf,mesg->u.compact.buf,dest->u.compact.size);
} /* end if */
/* Set return value */
@ -323,18 +408,37 @@ H5O_layout_meta_size(H5F_t *f, const void *_mesg)
/* check args */
assert(f);
assert(mesg);
assert(mesg->ndims > 0 && mesg->ndims <= H5O_LAYOUT_NDIMS);
ret_value = 1 + /* Version number */
1 + /* layout class type */
1 + /* dimensionality */
5 + /* reserved bytes */
mesg->ndims * 4; /* size of each dimension */
1; /* layout class type */
if(mesg->type==H5D_COMPACT)
ret_value += 4; /* size field for compact dataset */
else
ret_value += H5F_SIZEOF_ADDR(f); /* file address of data or B-tree for chunked dataset */
switch(mesg->type) {
case H5D_CONTIGUOUS:
ret_value += H5F_SIZEOF_ADDR(f); /* Address of data */
ret_value += H5F_SIZEOF_SIZE(f); /* Length of data */
break;
case H5D_CHUNKED:
/* Number of dimensions (1 byte) */
assert(mesg->u.chunk.ndims > 0 && mesg->u.chunk.ndims <= H5O_LAYOUT_NDIMS);
ret_value++;
/* B-tree address */
ret_value += H5F_SIZEOF_ADDR(f); /* Address of data */
/* Dimension sizes */
ret_value += mesg->u.chunk.ndims*4;
break;
case H5D_COMPACT:
/* Size of raw data */
ret_value+=2;
break;
default:
HGOTO_ERROR(H5E_OHDR, H5E_CANTENCODE, 0, "Invalid layout class");
break;
} /* end switch */
done:
FUNC_LEAVE_NOAPI(ret_value);
@ -370,11 +474,10 @@ H5O_layout_size(H5F_t *f, const void *_mesg)
/* check args */
assert(f);
assert(mesg);
assert(mesg->ndims > 0 && mesg->ndims <= H5O_LAYOUT_NDIMS);
ret_value = H5O_layout_meta_size(f, mesg);
if(mesg->type==H5D_COMPACT)
ret_value += mesg->size;/* data for compact dataset */
ret_value += mesg->u.compact.size;/* data for compact dataset */
done:
FUNC_LEAVE_NOAPI(ret_value);
}
@ -406,7 +509,7 @@ H5O_layout_reset (void *_mesg)
if(mesg) {
/* Free the compact storage buffer */
if(mesg->type==H5D_COMPACT)
mesg->buf=H5MM_xfree(mesg->buf);
mesg->u.compact.buf=H5MM_xfree(mesg->u.compact.buf);
/* Reset the message */
mesg->type=H5D_CONTIGUOUS;
@ -443,7 +546,7 @@ H5O_layout_free (void *_mesg)
/* Free the compact storage buffer */
if(mesg->type==H5D_COMPACT)
mesg->buf=H5MM_xfree(mesg->buf);
mesg->u.compact.buf=H5MM_xfree(mesg->u.compact.buf);
H5FL_FREE(H5O_layout_t,mesg);
@ -536,21 +639,30 @@ H5O_layout_debug(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const void *_mesg, FILE
assert(indent >= 0);
assert(fwidth >= 0);
HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth,
H5D_CHUNKED == mesg->type ? "B-tree address:" : "Data address:",
mesg->addr);
HDfprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth,
"Number of dimensions:",
(unsigned long) (mesg->ndims));
/* Size */
HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Size:");
for (u = 0; u < mesg->ndims; u++) {
HDfprintf(stream, "%s%lu", u ? ", " : "",
(unsigned long) (mesg->dim[u]));
}
HDfprintf(stream, "}\n");
if(mesg->type==H5D_CHUNKED) {
HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth,
"B-tree address:", mesg->u.chunk.addr);
HDfprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth,
"Number of dimensions:",
(unsigned long) (mesg->u.chunk.ndims));
/* Size */
HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Size:");
for (u = 0; u < mesg->u.chunk.ndims; u++) {
HDfprintf(stream, "%s%lu", u ? ", " : "",
(unsigned long) (mesg->u.chunk.dim[u]));
}
HDfprintf(stream, "}\n");
} /* end if */
else if(mesg->type==H5D_CONTIGUOUS) {
HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth,
"Data address:", mesg->u.contig.addr);
HDfprintf(stream, "%*s%-*s %Hu\n", indent, "", fwidth,
"Data Size:", mesg->u.contig.size);
} /* end if */
else {
HDfprintf(stream, "%*s%-*s %Zu\n", indent, "", fwidth,
"Data Size:", mesg->u.compact.size);
} /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value);

View File

@ -123,15 +123,32 @@ typedef struct H5O_efl_t {
*/
#define H5O_LAYOUT_NDIMS (H5S_MAX_RANK+1)
typedef struct H5O_layout_contig_t {
haddr_t addr; /* File address of data */
hsize_t size; /* Size of data in bytes */
} H5O_layout_contig_t;
typedef struct H5O_layout_chunk_t {
haddr_t addr; /* File address of B-tree */
unsigned ndims; /* Num dimensions in chunk */
size_t dim[H5O_LAYOUT_NDIMS]; /* Size of chunk in elements */
size_t size; /* Size of chunk in bytes */
} H5O_layout_chunk_t;
typedef struct H5O_layout_compact_t {
hbool_t dirty; /* Dirty flag for compact dataset */
size_t size; /* Size of buffer in bytes */
void *buf; /* Buffer for compact dataset */
} H5O_layout_compact_t;
typedef struct H5O_layout_t {
int type; /*type of layout, H5D_layout_t */
haddr_t addr; /*file address of data or B-tree */
unsigned ndims; /*num dimensions in stored data */
hsize_t dim[H5O_LAYOUT_NDIMS]; /*size of data or chunk in bytes */
hsize_t chunk_size; /*size of chunk in bytes */
hbool_t dirty; /*dirty flag for compact dataset */
size_t size; /*size of compact dataset in bytes */
void *buf; /*buffer for compact dataset */
H5D_layout_t type; /* Type of layout */
unsigned version; /* Version of message */
union {
H5O_layout_contig_t contig; /* Information for contiguous layout */
H5O_layout_chunk_t chunk; /* Information for chunked layout */
H5O_layout_compact_t compact; /* Information for compact layout */
} u;
} H5O_layout_t;
/* Enable reading/writing "bogus" messages */

View File

@ -151,7 +151,7 @@ herr_t
H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/])
{
int i;
hsize_t real_dims[H5O_LAYOUT_NDIMS]; /* Full-sized array to hold chunk dims */
size_t real_dims[H5O_LAYOUT_NDIMS]; /* Full-sized array to hold chunk dims */
H5D_layout_t layout;
H5P_genplist_t *plist; /* Property list pointer */
herr_t ret_value=SUCCEED; /* return value */
@ -172,12 +172,14 @@ H5Pset_chunk(hid_t plist_id, int ndims, const hsize_t dim[/*ndims*/])
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID");
/* Initialize chunk dims to 0s */
HDmemset(real_dims,0,H5O_LAYOUT_NDIMS*sizeof(hsize_t));
HDmemset(real_dims,0,sizeof(real_dims));
for (i=0; i<ndims; i++) {
if (dim[i] <= 0)
if (dim[i] == 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "all chunk dimensions must be positive");
if (dim[i] != (dim[i]&0xffffffff))
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "all chunk dimensions must be less than 2^32");
real_dims[i]=dim[i]; /* Store user's chunk dimensions */
}
} /* end for */
layout = H5D_CHUNKED;
if(H5P_set(plist, H5D_CRT_LAYOUT_NAME, &layout) < 0)
@ -219,10 +221,8 @@ done:
int
H5Pget_chunk(hid_t plist_id, int max_ndims, hsize_t dim[]/*out*/)
{
int i;
int ndims;
H5D_layout_t layout;
hsize_t chunk_size[H5O_LAYOUT_NDIMS];
H5P_genplist_t *plist; /* Property list pointer */
int ret_value;
@ -238,14 +238,20 @@ H5Pget_chunk(hid_t plist_id, int max_ndims, hsize_t dim[]/*out*/)
if(H5D_CHUNKED != layout)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a chunked storage layout");
if(H5P_get(plist, H5D_CRT_CHUNK_SIZE_NAME, chunk_size) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get chunk size");
if(H5P_get(plist, H5D_CRT_CHUNK_DIM_NAME, &ndims) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get chunk dimensionality");
/* Get the dimension sizes */
for (i=0; i<ndims && i<max_ndims && dim; i++)
dim[i] = chunk_size[i];
if(dim) {
int i;
size_t chunk_size[H5O_LAYOUT_NDIMS];
if(H5P_get(plist, H5D_CRT_CHUNK_SIZE_NAME, chunk_size) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get chunk size");
/* Get the dimension sizes */
for (i=0; i<ndims && i<max_ndims; i++)
dim[i] = chunk_size[i];
} /* end if */
/* Set the return value */
ret_value=ndims;

View File

@ -752,6 +752,10 @@ done:
* Programmer: Robb Matzke
* Tuesday, December 9, 1997
*
* Note: This routine participates in the "Inlining C function pointers"
* pattern, don't call it directly, use the appropriate macro
* defined in H5Sprivate.h.
*
* Modifications:
* Changed Name - QAK 7/7/98
*
@ -805,7 +809,7 @@ H5Sget_simple_extent_npoints(hid_t space_id)
if (NULL == (ds = H5I_object_verify(space_id, H5I_DATASPACE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space");
ret_value = H5S_get_simple_extent_npoints(ds);
ret_value = H5S_GET_SIMPLE_EXTENT_NPOINTS(ds);
done:
FUNC_LEAVE_API(ret_value);

View File

@ -380,7 +380,7 @@ H5S_all_copy (H5S_t *dst, const H5S_t *src)
assert(dst);
/* Set number of elements in selection */
dst->select.num_elem=(hsize_t)H5S_get_simple_extent_npoints(dst);
dst->select.num_elem=(hsize_t)H5S_GET_SIMPLE_EXTENT_NPOINTS(dst);
done:
FUNC_LEAVE_NOAPI(ret_value);
@ -723,7 +723,7 @@ H5S_select_all (H5S_t *space, unsigned rel_prev)
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTDELETE, FAIL, "can't release selection");
/* Set number of elements in selection */
space->select.num_elem=(hsize_t)H5S_get_simple_extent_npoints(space);
space->select.num_elem=(hsize_t)H5S_GET_SIMPLE_EXTENT_NPOINTS(space);
/* Set selection type */
space->select.type=H5S_SEL_ALL;

View File

@ -106,7 +106,6 @@ H5S_mpio_all_type( const H5S_t *space, size_t elmt_size,
hsize_t total_bytes;
hssize_t snelmts; /*total number of elmts (signed) */
hsize_t nelmts; /*total number of elmts */
unsigned u;
herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI_NOINIT(H5S_mpio_all_type);
@ -115,7 +114,7 @@ H5S_mpio_all_type( const H5S_t *space, size_t elmt_size,
assert (space);
/* Just treat the entire extent as a block of bytes */
if((snelmts = H5S_get_simple_extent_npoints(space))<0)
if((snelmts = H5S_GET_SIMPLE_EXTENT_NPOINTS(space))<0)
HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "src dataspace has invalid selection")
H5_ASSIGN_OVERFLOW(nelmts,snelmts,hssize_t,hsize_t);
@ -678,7 +677,8 @@ H5S_mpio_spaces_xfer(H5F_t *f, const H5O_layout_t *layout, size_t elmt_size,
* the address to read from. This should be used as the diplacement for
* a call to MPI_File_set_view() in the read or write call.
*/
addr = f->shared->base_addr + layout->addr + mpi_file_offset;
assert(layout->type==H5D_CONTIGUOUS);
addr = f->shared->base_addr + layout->u.contig.addr + mpi_file_offset;
#ifdef H5Smpi_DEBUG
HDfprintf(stderr, "spaces_xfer: addr=%a\n", addr );
#endif

View File

@ -176,6 +176,7 @@ typedef struct H5S_conv_t {
#ifdef H5S_PACKAGE
#define H5S_GET_SIMPLE_EXTENT_TYPE(S) ((S)->extent.type)
#define H5S_GET_SIMPLE_EXTENT_NDIMS(S) ((S)->extent.u.simple.rank)
#define H5S_GET_SIMPLE_EXTENT_NPOINTS(S) ((S)->extent.nelem)
#define H5S_GET_SELECT_NPOINTS(S) ((S)->select.num_elem)
#define H5S_GET_SELECT_TYPE(S) ((S)->select.type)
#define H5S_SELECT_GET_SEQ_LIST(S,FLAGS,ITER,MAXSEQ,MAXBYTES,NSEQ,NBYTES,OFF,LEN) ((*(S)->select.get_seq_list)(S,FLAGS,ITER,MAXSEQ,MAXBYTES,NSEQ,NBYTES,OFF,LEN))
@ -197,6 +198,7 @@ typedef struct H5S_conv_t {
#else /* H5S_PACKAGE */
#define H5S_GET_SIMPLE_EXTENT_TYPE(S) (H5S_get_simple_extent_type(S))
#define H5S_GET_SIMPLE_EXTENT_NDIMS(S) (H5S_get_simple_extent_ndims(S))
#define H5S_GET_SIMPLE_EXTENT_NPOINTS(S) (H5S_get_simple_extent_npoints(S))
#define H5S_GET_SELECT_NPOINTS(S) (H5S_get_select_npoints(S))
#define H5S_GET_SELECT_TYPE(S) (H5S_get_select_type(S))
#define H5S_SELECT_GET_SEQ_LIST(S,FLAGS,ITER,MAXSEQ,MAXBYTES,NSEQ,NBYTES,OFF,LEN) (H5S_select_get_seq_list(S,FLAGS,ITER,MAXSEQ,MAXBYTES,NSEQ,NBYTES,OFF,LEN))

View File

@ -433,8 +433,8 @@ done:
*/
htri_t
H5V_hyper_disjointp(unsigned n,
const hssize_t *offset1, const hsize_t *size1,
const hssize_t *offset2, const hsize_t *size2)
const hssize_t *offset1, const size_t *size1,
const hssize_t *offset2, const size_t *size2)
{
unsigned u;
htri_t ret_value=FALSE; /* Return value */
@ -1229,7 +1229,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
H5V_chunk_index(unsigned ndims, const hssize_t *coord, const hsize_t *chunk,
H5V_chunk_index(unsigned ndims, const hssize_t *coord, const size_t *chunk,
const hsize_t *down_nchunks, hsize_t *chunk_idx)
{
hssize_t scaled_coord[H5V_HYPER_NDIMS]; /* Scaled, coordinates, in terms of chunks */
@ -1246,7 +1246,7 @@ H5V_chunk_index(unsigned ndims, const hssize_t *coord, const hsize_t *chunk,
/* Compute the scaled coordinates for actual coordinates */
for(u=0; u<ndims; u++) {
H5_CHECK_OVERFLOW(chunk[u],hsize_t,hssize_t);
H5_CHECK_OVERFLOW(chunk[u],size_t,hssize_t);
scaled_coord[u]=coord[u]/(hssize_t)chunk[u];
} /* end for */

View File

@ -50,9 +50,9 @@ H5_DLL hsize_t H5V_hyper_stride(unsigned n, const hsize_t *size,
const hssize_t *offset,
hssize_t *stride);
H5_DLL htri_t H5V_hyper_disjointp(unsigned n, const hssize_t *offset1,
const hsize_t *size1,
const size_t *size1,
const hssize_t *offset2,
const hsize_t *size2);
const size_t *size2);
H5_DLL htri_t H5V_hyper_eq(int n, const hssize_t *offset1,
const hsize_t *size1, const hssize_t *offset2,
const hsize_t *size2);
@ -82,7 +82,7 @@ H5_DLL hsize_t H5V_array_offset(unsigned n, const hsize_t *total_size,
H5_DLL herr_t H5V_array_calc(hsize_t offset, unsigned n,
const hsize_t *total_size, hssize_t *coords);
H5_DLL herr_t H5V_chunk_index(unsigned ndims, const hssize_t *coord,
const hsize_t *chunk, const hsize_t *down_nchunks, hsize_t *chunk_idx);
const size_t *chunk, const hsize_t *down_nchunks, hsize_t *chunk_idx);
H5_DLL ssize_t H5V_memcpyvv(void *_dst,
size_t dst_max_nseq, size_t *dst_curr_seq, size_t dst_len_arr[], hsize_t dst_off_arr[],
const void *_src,

View File

@ -481,7 +481,8 @@ H5Z_prelude_callback(hid_t dcpl_id, hid_t type_id, H5Z_prelude_type_t prelude_ty
/* Check if the chunks have filters */
if(dcpl_pline.nused > 0) {
unsigned chunk_ndims; /* # of chunk dimensions */
hsize_t chunk_size[H5O_LAYOUT_NDIMS]; /* Size of chunk dimensions */
size_t chunk_size[H5O_LAYOUT_NDIMS]; /* Size of chunk dimensions */
hsize_t chunk_dims[H5O_LAYOUT_NDIMS]; /* Size of chunk dimensions */
H5S_t *space; /* Dataspace describing chunk */
hid_t space_id; /* ID for dataspace describing chunk */
size_t u; /* Local index variable */
@ -493,7 +494,9 @@ H5Z_prelude_callback(hid_t dcpl_id, hid_t type_id, H5Z_prelude_type_t prelude_ty
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't retrieve chunk size")
/* Create a data space for a chunk & set the extent */
if(NULL == (space = H5S_create_simple(chunk_ndims,chunk_size,NULL)))
for(u=0; u<chunk_ndims; u++)
chunk_dims[u]=chunk_size[u];
if(NULL == (space = H5S_create_simple(chunk_ndims,chunk_dims,NULL)))
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace")
/* Get ID for dataspace to pass to filter routines */

View File

@ -30,22 +30,23 @@ LIB=libhdf5.la
DISTCLEAN=libhdf5.settings
## Source and object files for the library (lexicographically)...
LIB_SRC=H5.c H5A.c H5AC.c H5B.c H5D.c H5Dio.c H5E.c H5F.c H5Fcontig.c \
H5Fcompact.c H5Fdbg.c H5Fistore.c H5Fseq.c H5FD.c H5FDcore.c \
H5FDfamily.c H5FDfphdf5.c H5FDgass.c H5FDlog.c H5FDmpi.c H5FDmpio.c \
H5FDmpiposix.c H5FDmulti.c H5FDsec2.c H5FDsrb.c H5FDstdio.c \
H5FDstream.c H5FL.c H5FO.c H5FP.c H5FPclient.c H5FPserver.c H5FS.c \
H5G.c H5Gent.c H5Gnode.c H5Gstab.c H5HG.c H5HGdbg.c H5HL.c H5HLdbg.c \
H5HP.c H5I.c H5MF.c H5MM.c H5O.c H5Oattr.c H5Obogus.c H5Ocont.c \
H5Odtype.c H5Oefl.c H5Ofill.c H5Olayout.c H5Omtime.c H5Oname.c \
H5Onull.c H5Opline.c H5Osdspace.c H5Oshared.c H5Ostab.c H5P.c \
H5Pdcpl.c H5Pdxpl.c H5Pfapl.c H5Pfcpl.c H5Ptest.c H5R.c H5RS.c H5S.c \
H5Sall.c H5Shyper.c H5Smpio.c H5Snone.c H5Spoint.c H5Sselect.c \
H5Stest.c H5ST.c H5T.c H5Tarray.c H5Tbit.c H5Tcommit.c H5Tcompound.c \
H5Tconv.c H5Tcset.c H5Tenum.c H5Tfields.c H5Tfixed.c H5Tfloat.c \
H5Tinit.c H5Tnative.c H5Toffset.c H5Topaque.c H5Torder.c H5Tpad.c \
H5Tprecis.c H5Tstrpad.c H5Tvlen.c H5TB.c H5TS.c H5V.c H5Z.c \
H5Zdeflate.c H5Zfletcher32.c H5Zshuffle.c H5Zszip.c H5Ztrans.c
LIB_SRC=H5.c H5A.c H5AC.c H5B.c H5D.c H5Dio.c H5Dtest.c H5E.c H5F.c \
H5Fcontig.c H5Fcompact.c H5Fdbg.c H5Fistore.c H5Fseq.c H5FD.c \
H5FDcore.c H5FDfamily.c H5FDfphdf5.c H5FDgass.c H5FDlog.c H5FDmpi.c \
H5FDmpio.c H5FDmpiposix.c H5FDmulti.c H5FDsec2.c H5FDsrb.c \
H5FDstdio.c H5FDstream.c H5FL.c H5FO.c H5FP.c H5FPclient.c \
H5FPserver.c H5FS.c H5G.c H5Gent.c H5Gnode.c H5Gstab.c H5HG.c \
H5HGdbg.c H5HL.c H5HLdbg.c H5HP.c H5I.c H5MF.c H5MM.c H5O.c H5Oattr.c \
H5Obogus.c H5Ocont.c H5Odtype.c H5Oefl.c H5Ofill.c H5Olayout.c \
H5Omtime.c H5Oname.c H5Onull.c H5Opline.c H5Osdspace.c H5Oshared.c \
H5Ostab.c H5P.c H5Pdcpl.c H5Pdxpl.c H5Pfapl.c H5Pfcpl.c H5Ptest.c \
H5R.c H5RS.c H5S.c H5Sall.c H5Shyper.c H5Smpio.c H5Snone.c \
H5Spoint.c H5Sselect.c H5Stest.c H5ST.c H5T.c H5Tarray.c H5Tbit.c \
H5Tcommit.c H5Tcompound.c H5Tconv.c H5Tcset.c H5Tenum.c H5Tfields.c \
H5Tfixed.c H5Tfloat.c H5Tinit.c H5Tnative.c H5Toffset.c H5Topaque.c \
H5Torder.c H5Tpad.c H5Tprecis.c H5Tstrpad.c H5Tvlen.c H5TB.c H5TS.c \
H5V.c H5Z.c H5Zdeflate.c H5Zfletcher32.c H5Zshuffle.c H5Zszip.c \
H5Ztrans.c
LIB_OBJ=$(LIB_SRC:.c=.lo)

View File

@ -578,6 +578,8 @@ test_compact_io(hid_t fapl)
H5_FAILED();
printf(" Read different values than written.\n");
printf(" At index %d,%d\n", i, j);
printf(" wbuf[%d][%d]=%d\n", i, j, wbuf[i][j]);
printf(" rbuf[%d][%d]=%d\n", i, j, rbuf[i][j]);
goto error;
}
}

95
test/gen_old_layout.c Normal file
View File

@ -0,0 +1,95 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by the Board of Trustees of the University of Illinois. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the files COPYING and Copyright.html. COPYING can be found at the root *
* of the source code distribution tree; Copyright.html can be found at the *
* root level of an installed copy of the electronic HDF5 document set and *
* is linked from the top-level documents page. It can also be found at *
* http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
* access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
* Thursday, May 27, 2004
*
* Purpose: Create two datasets (one for version 1 and one for version 2 of
* the layout message), which should have dimensions too large to
* represent in version 1 & 2 of the storage layout message.
* This program is used to create the test file `tlayouto.h5' which
* has truncated dimension information and can be used to verify that the
* library has fixed up the storage size correctly.
* To build the test file, this program MUST be compiled and linked
* with version hdf5-1.6.2 or _earlier_ libraries and the generated test
* file must be put into the 'test' directory in the 1.7+ (or 1.6+) branch
* of the library.
*/
#include "hdf5.h"
#define TESTFILE "tlayouto.h5"
#define SPACE_RANK 2
#define SPACE_DIM0 (8*1024*1024*1024ULL)
#define SPACE_DIM1 ((4*1024*1024*1024ULL)+1ULL)
/*-------------------------------------------------------------------------
* Function: main
*
* Purpose:
*
* Return: Success:
*
* Failure:
*
* Programmer: Quincey Koziol
* Friday, January 3, 2003
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
int
main(void)
{
hid_t file, space, dset, dcpl;
herr_t ret;
unsigned rank=SPACE_RANK; /* Rank of dataspace */
hsize_t big_dims[SPACE_RANK]={SPACE_DIM0,SPACE_DIM1}; /* Large dimensions */
/* Create the file */
file = H5Fcreate(TESTFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
if(file<0)
printf("file<0!\n");
/* Create the dataspace (for dataset) */
space = H5Screate_simple(rank,big_dims,NULL);
if(space<0)
printf("space<0!\n");
/* Create a dataset creation property list */
dcpl = H5Pcreate(H5P_DATASET_CREATE);
if(dcpl<0)
printf("dcpl<0!\n");
/* Make certain that the dataset's storage doesn't get allocated :-) */
ret = H5Pset_alloc_time(dcpl,H5D_ALLOC_TIME_LATE);
if(ret<0)
printf("H5Pset_alloc_time() failed!\n");
/* Create the dataset with deferred storage allocation */
dset = H5Dcreate(file, "Dataset", H5T_NATIVE_INT, space, dcpl);
if(dset<0)
printf("dset<0!\n");
H5Dclose(dset);
H5Sclose(space);
H5Pclose(dcpl);
H5Fclose(file);
return 0;
}

BIN
test/tlayouto.h5 Normal file

Binary file not shown.

View File

@ -22,8 +22,14 @@
*
*************************************************************/
#define H5D_PACKAGE /*suppress error about including H5Dpkg */
/* Define this macro to indicate that the testing APIs should be available */
#define H5D_TESTING
#include "hdf5.h"
#include "testhdf5.h"
#include "H5Dpkg.h" /* Datasets */
/* Definitions for misc. test #1 */
#define MISC1_FILE "tmisc1.h5"
@ -217,6 +223,17 @@ unsigned m13_rdata[MISC13_DIM1][MISC13_DIM2]; /* Data read from dataset
#define MISC19_ATTR_NAME "Attribute"
#define MISC19_GROUP_NAME "Group"
/* Definitions for misc. test #20 */
#define MISC20_FILE "tmisc20.h5"
#define MISC20_FILE_OLD "tlayouto.h5"
#define MISC20_DSET_NAME "Dataset"
#define MISC20_DSET2_NAME "Dataset2"
#define MISC20_SPACE_RANK 2
#define MISC20_SPACE_DIM0 (8*1024*1024*1024ULL)
#define MISC20_SPACE_DIM1 ((4*1024*1024*1024ULL)+1ULL)
#define MISC20_SPACE2_DIM0 8
#define MISC20_SPACE2_DIM1 4
/****************************************************************
**
** test_misc1(): test unlinking a dataset from a group and immediately
@ -3286,6 +3303,176 @@ test_misc19(void)
} /* end test_misc19() */
/****************************************************************
**
** test_misc20(): Test problems with version 2 of storage layout
** message truncating dimensions
**
****************************************************************/
static void
test_misc20(void)
{
hid_t fid; /* File ID */
hid_t sid; /* 'Space ID */
hid_t did; /* Dataset ID */
hid_t dcpl; /* Dataset creation property list ID */
unsigned rank=MISC20_SPACE_RANK; /* Rank of dataspace */
hsize_t big_dims[MISC20_SPACE_RANK]={MISC20_SPACE_DIM0,MISC20_SPACE_DIM1}; /* Large dimensions */
hsize_t small_dims[MISC20_SPACE_RANK]={MISC20_SPACE2_DIM0,MISC20_SPACE2_DIM1}; /* Small dimensions */
unsigned version; /* Version of storage layout info */
hsize_t contig_size; /* Size of contiguous storage size from layout into */
char testfile[512]=""; /* Character buffer for corrected test file name */
char *srcdir = HDgetenv("srcdir"); /* Pointer to the directory the source code is located within */
herr_t ret; /* Generic return value */
/* Output message about test being performed */
MESSAGE(5, ("Testing large dimension truncation fix\n"));
/* Verify that chunks with dimensions that are too large get rejected */
/* Create a dataset creation property list */
dcpl = H5Pcreate(H5P_DATASET_CREATE);
CHECK(dcpl, FAIL, "H5Pcreate");
/* Use chunked storage for this dataset */
ret = H5Pset_chunk(dcpl,rank,big_dims);
VERIFY(ret, FAIL, "H5Pset_chunk");
/* Verify that the storage for the dataset is the correct size and hasn't
* been truncated.
*/
/* Create the file */
fid = H5Fcreate(MISC20_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
CHECK(fid, FAIL, "H5Fcreate");
/* Create dataspace with _really_ big dimensions */
sid = H5Screate_simple(rank,big_dims,NULL);
CHECK(sid, FAIL, "H5Screate_simple");
/* Make certain that the dataset's storage doesn't get allocated :-) */
ret = H5Pset_alloc_time(dcpl,H5D_ALLOC_TIME_LATE);
CHECK(ret, FAIL, "H5Pset_alloc_time");
/* Create dataset with big dataspace */
did = H5Dcreate(fid, MISC20_DSET_NAME, H5T_NATIVE_INT, sid, dcpl);
CHECK(did, FAIL, "H5Dcreate");
/* Close datasset */
ret=H5Dclose(did);
CHECK(ret, FAIL, "H5Dclose");
/* Close dataspace */
ret=H5Sclose(sid);
CHECK(ret, FAIL, "H5Sclose");
/* Create dataspace with small dimensions */
sid = H5Screate_simple(rank,small_dims,NULL);
CHECK(sid, FAIL, "H5Screate_simple");
/* Create dataset with big dataspace */
did = H5Dcreate(fid, MISC20_DSET2_NAME, H5T_NATIVE_INT, sid, dcpl);
CHECK(did, FAIL, "H5Dcreate");
/* Close datasset */
ret=H5Dclose(did);
CHECK(ret, FAIL, "H5Dclose");
/* Close dataspace */
ret=H5Sclose(sid);
CHECK(ret, FAIL, "H5Sclose");
/* Close dataset creation property list */
ret=H5Pclose(dcpl);
CHECK(ret, FAIL, "H5Pclose");
/* Close file */
ret = H5Fclose(fid);
CHECK(ret, FAIL, "H5Fclose");
/* Re-open the file */
fid = H5Fopen(MISC20_FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
CHECK(fid, FAIL, "H5Fopen");
/* Open dataset with big dimensions */
did = H5Dopen(fid, MISC20_DSET_NAME);
CHECK(did, FAIL, "H5Dopen");
/* Get the layout version */
ret = H5D_layout_version_test(did,&version);
CHECK(ret, FAIL, "H5D_layout_version_test");
VERIFY(version,3,"H5D_layout_version_test");
/* Get the layout contiguous storage size */
ret = H5D_layout_contig_size_test(did,&contig_size);
CHECK(ret, FAIL, "H5D_layout_contig_size_test");
VERIFY(contig_size, MISC20_SPACE_DIM0*MISC20_SPACE_DIM1*H5Tget_size(H5T_NATIVE_INT), "H5D_layout_contig_size_test");
/* Close datasset */
ret=H5Dclose(did);
CHECK(ret, FAIL, "H5Dclose");
/* Open dataset with small dimensions */
did = H5Dopen(fid, MISC20_DSET2_NAME);
CHECK(did, FAIL, "H5Dopen");
/* Get the layout version */
ret = H5D_layout_version_test(did,&version);
CHECK(ret, FAIL, "H5D_layout_version_test");
VERIFY(version,3,"H5D_layout_version_test");
/* Get the layout contiguous storage size */
ret = H5D_layout_contig_size_test(did,&contig_size);
CHECK(ret, FAIL, "H5D_layout_contig_size_test");
VERIFY(contig_size, MISC20_SPACE2_DIM0*MISC20_SPACE2_DIM1*H5Tget_size(H5T_NATIVE_INT), "H5D_layout_contig_size_test");
/* Close datasset */
ret=H5Dclose(did);
CHECK(ret, FAIL, "H5Dclose");
/* Close file */
ret = H5Fclose(fid);
CHECK(ret, FAIL, "H5Fclose");
/* Verify that the storage size is computed correctly for older versions of layout info */
/* Generate the correct name for the test file, by prepending the source path */
if (srcdir && ((HDstrlen(srcdir) + HDstrlen(MISC20_FILE_OLD) + 1) < sizeof(testfile))) {
HDstrcpy(testfile, srcdir);
HDstrcat(testfile, "/");
}
HDstrcat(testfile, MISC20_FILE_OLD);
/*
* Open the old file and the dataset and get old settings.
*/
fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT);
CHECK(fid, FAIL, "H5Fopen");
/* Open dataset with small dimensions */
did = H5Dopen(fid, MISC20_DSET_NAME);
CHECK(did, FAIL, "H5Dopen");
/* Get the layout version */
ret = H5D_layout_version_test(did,&version);
CHECK(ret, FAIL, "H5D_layout_version_test");
VERIFY(version,2,"H5D_layout_version_test");
/* Get the layout contiguous storage size */
ret = H5D_layout_contig_size_test(did,&contig_size);
CHECK(ret, FAIL, "H5D_layout_contig_size_test");
VERIFY(contig_size, MISC20_SPACE_DIM0*MISC20_SPACE_DIM1*H5Tget_size(H5T_NATIVE_INT), "H5D_layout_contig_size_test");
/* Close datasset */
ret=H5Dclose(did);
CHECK(ret, FAIL, "H5Dclose");
/* Close file */
ret = H5Fclose(fid);
CHECK(ret, FAIL, "H5Fclose");
} /* end test_misc20() */
/****************************************************************
**
** test_misc(): Main misc. test routine.
@ -3316,6 +3503,7 @@ test_misc(void)
test_misc17(); /* Test array of ASCII character */
test_misc18(); /* Test new object header information in H5G_stat_t struct */
test_misc19(); /* Test incrementing & decrementing ref count on IDs */
test_misc20(); /* Test problems with truncated dimensions in version 2 of storage layout message */
} /* test_misc() */
@ -3359,4 +3547,5 @@ cleanup_misc(void)
HDremove(MISC17_FILE);
HDremove(MISC18_FILE);
HDremove(MISC19_FILE);
HDremove(MISC20_FILE);
}