[svn-r569] Changes since 19980731

----------------------

./bin/release
	Added ./Makefile to the distribution again -- it got lost in
	the changes last week although it isn't all that important a
	file since it gets clobbered by configure anyway.

./bin/trace
./doc/html/Filters.html
./doc/html/H5.format.html
./doc/html/H5.user.html
./src/H5.c
./src/H5D.c
./src/H5Dprivate.h
./src/H5E.c
./src/H5Epublic.h
./src/H5Farray.c
./src/H5Fistore.c
./src/H5Fprivate.h
./src/H5O.c
./src/H5Ocomp.c
./src/H5Oprivate.h
./src/H5P.c
./src/H5Ppublic.h
./src/H5Sall.c
./src/H5Shyper.c
./src/H5Spoint.c
./src/H5Sprivate.h
./src/H5Ssimp.c
./src/H5Z.c
./src/H5Zprivate.h
./src/H5Zpublic.h
./src/hdf5.h
./test/dsets.c
./tools/h5ls.c
	Added the data filter pipeline, a generalization of the
	compression stuff which allows things like checksums,
	encryption, compression, performance monitoring, etc.  See
	./doc/html/Filters.html for details -- it replaces the
	Compression.html doc.

./src/H5T.c
	Cleaned up debugging output.

./config/linux
	Added checks for egcs and pgcc and changed optimization flags
	for the compilers.

./src/H5G.c
./tools/h5dump.c
	Fixed compiler warnings in these files and others.

./configure.in
./src/H5private.h
./test/mtime.c
	Added a check for difftime() and defined HDdifftime() to do
	something else on systems that don't have difftime().
This commit is contained in:
Robb Matzke 1998-08-05 17:22:59 -05:00
parent 99506091b3
commit 002b1494b7
40 changed files with 1822 additions and 1230 deletions

View File

@ -91,7 +91,11 @@ echo "This is hdf5-$VERS released on `date`" >README.x
tail -n +2 <README >>README.x
mv README.x README
test "$verbose" && echo " Running tar..." 1>&2
(cd ..; tar cf x.tar `sed s+^.+hdf5-$VERS+ hdf5-$VERS/MANIFEST` || exit 1)
( \
cd ..; \
tar cf x.tar hdf5-$VERS/Makefile \
`sed s+^.+hdf5-$VERS+ hdf5-$VERS/MANIFEST` || exit 1 \
)
# Compress
for comp in $methods; do

View File

@ -1,5 +1,6 @@
#!/bin/sh
set -x
date
#
# This script should be run nightly from cron. It checks out hdf5
# from the CVS source tree and compares it against the previous
@ -33,8 +34,8 @@ cvs -Q co -d ${COMPARE}/current hdf5 || exit 1
# Compare it with the previous version.
if [ -d ${COMPARE}/previous ]; then
if (diff -r -I H5_VERS_RELEASE -I " released on " \
${COMPARE}/previous ${COMPARE}/current >/dev/null); then
if (diff -r -I H5_VERS_RELEASE -I " released on " --exclude CVS \
${COMPARE}/previous ${COMPARE}/current); then
update=no
else
update=yes

View File

@ -55,7 +55,7 @@ $Source = "";
"H5T_overflow_t" => "x",
"H5Z_func_t" => "x",
"size_t" => "z",
"H5Z_method_t" => "Zm",
"H5Z_filter_t" => "Zf",
"ssize_t" => "Zs",
);

View File

@ -18,15 +18,24 @@ case `$CC -v 2>&1 |tail -1 |sed 's/gcc version //'` in
echo " | code generation may be wrong on some platforms.|"
echo " +------------------------------------------------+"
sleep 5
CCVENDOR=gcc
;;
2.8.*)
CFLAGS="$CFLAGS -Wundef -Wsign-compare"
CFLAGS="$CFLAGS -Wsign-compare"
CCVENDOR=gcc
;;
egcs-*)
CFLAGS="$CFLAGS -Wsign-compare"
CCVENDOR=egcs
;;
pgcc-*)
CFLAGS="$CFLAGS -Wsign-compare"
CCVENDOR=pgcc
;;
esac
# What must *always* be present for things to compile correctly?
CFLAGS="$CFLAGS -ansi"
#CPPFLAGS="$CPPFLAGS -I."
# What compiler flags should be used for code development?
DEBUG_CFLAGS="-g -fverbose-asm"
@ -34,8 +43,19 @@ DEBUG_CPPFLAGS="-DH5F_OPT_SEEK=0 -DH5F_LOW_DFLT=H5F_LOW_SEC2"
# What compiler flags should be used for building a production
# library?
PROD_CFLAGS="-mcpu=pentiumpro -march=pentiumpro -O3 -finline-functions -malign-double -fomit-frame-pointer -fschedule-insns2"
PROD_CPPFLAGS=
if [ "gcc" = "$CCVENDOR" ]; then
PROD_CFLAGS="-mcpu=pentiumpro -march=pentiumpro -O3 -finline-functions -malign-double -fomit-frame-pointer -fschedule-insns2"
PROD_CPPFLAGS=
elif [ "egcs" = "$CCVENDOR" ]; then
PROD_CFLAGS="-mcpu=pentiumpro -march=pentiumpro -O6 -malign-double"
PROD_CPPFLAGS=
elif [ "pgcc" = "$CCVENDOR" ]; then
PROD_CFLAGS="-mcpu=pentiumpro -march=pentiumpro -O6 -malign-double"
PROD_CPPFLAGS=
else
PROD_CFLAGS=
PROD_CPPFLAGS=
fi
# What compiler flags enable code profiling?
PROFILE_CFLAGS=-pg

2
configure vendored
View File

@ -2115,7 +2115,7 @@ else
fi
done
for ac_func in gettimeofday BSDgettimeofday
for ac_func in gettimeofday BSDgettimeofday difftime
do
echo $ac_n "checking for $ac_func""... $ac_c" 1>&6
echo "configure:2122: checking for $ac_func" >&5

View File

@ -213,7 +213,7 @@ dnl ----------------------------------------------------------------------
dnl Check for functions.
dnl
AC_CHECK_FUNCS(getpwuid gethostname system getrusage fork waitpid)
AC_CHECK_FUNCS(gettimeofday BSDgettimeofday)
AC_CHECK_FUNCS(gettimeofday BSDgettimeofday difftime)
AC_TRY_COMPILE([#include<sys/types.h>],
[off64_t n = 0;],

View File

@ -54,8 +54,8 @@
Name: Reserved - not assigned yet</a>
<li><a href="#ReservedMessage_000A"> <!-- 0x000a -->
Name: Reserved - not assigned yet</a>
<li><a href="#CompressionMessage"> <!-- 0x000b -->
Name: Data Storage - Compressed</a>
<li><a href="#FilterMessage"> <!-- 0x000b -->
Name: Data Storage - Filter Pipeline</a>
<li><a href="#AttributeMessage"> <!-- 0x000c -->
Name: Attribute</a>
<li><a href="#NameMessage"> <!-- 0x000d -->
@ -2433,18 +2433,20 @@
<ol>
<li>The array can be stored in one contiguous area of the file.
The layout requires that the size of the array be constant and
does not permit chunking or compression. The message stores
the total size of the array and the offset of an element from
the beginning of the storage area is computed as in C.
does not permit chunking, compression, checksums, encryption,
etc. The message stores the total size of the array and the
offset of an element from the beginning of the storage area is
computed as in C.
<li>The array domain can be regularly decomposed into chunks and
each chunk is allocated separately. This layout supports
arbitrary element traversals and compression and the chunks
can be distributed across external raw data files (these
features are described in other messages). The message stores
the size of a chunk instead of the size of the entire array;
the size of the entire array can be calculated by traversing
the B-tree that stores the chunk addresses.
arbitrary element traversals, compression, encryption, and
checksums, and the chunks can be distributed across external
raw data files (these features are described in other
messages). The message stores the size of a chunk instead of
the size of the entire array; the size of the entire array can
be calculated by traversing the B-tree that stores the chunk
addresses.
</ol>
<p>
@ -2555,22 +2557,21 @@
<b>Format of Data:</b> N/A
<hr>
<h3><a name="CompressionMessage">Name: Data Storage - Compressed</a></h3>
<h3><a name="FilterMessage">Name: Data Storage - Filter Pipeline</a></h3>
<b>Type:</b> 0x000B<BR>
<b>Length:</b> varies<BR>
<b>Status:</b> Optional, may not be repeated.
<p><b>Purpose and Description:</b> Compressed objects are
datasets which are stored in an HDF file after they have been
compressed. The encoding algorithm and its parameters are
stored in a Compression Message in the object header of the
dataset.
<p><b>Purpose and Description:</b> This message describes the
filter pipeline which should be applied to the data stream by
providing filter identification numbers, flags, a name, an
client data.
<p>
<center>
<table border align=center cellpadding=4 witdh="80%">
<caption align=top>
<b>Compression Message</b>
<b>Filter Pipeline Message</b>
</caption>
<tr align=center>
@ -2581,13 +2582,17 @@
</tr>
<tr align=center>
<td>Method</td>
<td>Flags</td>
<td colspan=2>Client Data Size</td>
<td>Version</td>
<td>Number of Filters</td>
<td colspan=2>Reserved</td>
</tr>
<tr align=center>
<td colspan=4><br>Client Data<br><br></td>
<td colspan=4>Reserved</td>
</tr>
<tr align=center>
<td colspan=4><br>Filter List<br><br></td>
</tr>
</table>
</center>
@ -2601,106 +2606,138 @@
</tr>
<tr valign=top>
<td>Method</td>
<td>The compression method is a value between zero and 255,
inclusive, that is used as a index into a compression
method lookup table. The value zero indicates no
compression. The values one through 15, inclusive, are
reserved for methods defined by NCSA. All other values
are user-defined compression methods.</td>
<td>Version</td>
<td>The version number for this message. This document
describes version one.</td>
</tr>
<tr valign=top>
<td>Number of Filters</td>
<td>The total number of filters described by this
message. The maximum possible number of filters in a
message is 32.</td>
</tr>
<tr valign=top>
<td>Filter List</td>
<td>A description of each filter. A filter description
appears in the next table.</td>
</tr>
</table>
</center>
<p>
<center>
<table border align=center cellpadding=4 witdh="80%">
<caption align=top>
<b>Filter Pipeline Message</b>
</caption>
<tr align=center>
<th width="25%">byte</th>
<th width="25%">byte</th>
<th width="25%">byte</th>
<th width="25%">byte</th>
</tr>
<tr align=center>
<td colspan=2>Filter Identification</td>
<td colspan=2>Name Length</td>
</tr>
<tr align=center>
<td colspan=2>Flags</td>
<td colspan=2>Client Data Number of Values</td>
</tr>
<tr align=center>
<td colspan=4><br>Name<br><br></td>
</tr>
<tr align=center>
<td colspan=4><br>Client Data<br><br></td>
</tr>
<tr align=center>
<td colspan=4>Padding</td>
</tr>
</table>
</center>
<p>
<center>
<table align=center width="80%">
<tr>
<th width="30%">Field Name</th>
<th width="70%">Description</th>
</tr>
<tr valign=top>
<td>Filter Identification</td>
<td>This is a unique (except in the case of testing)
identifier for the filter. Values from zero through 255
are reserved for filters defined by the NCSA HDF5
library. Values 256 through 511 have been set aside for
use when developing/testing new filters. The remaining
values are allocated to specific filters by contacting the
<a href="mailto:hdf5dev@ncsa.uiuc.edu">HDF5 Development
Team</a>.</td>
</tr>
<tr valign=top>
<td>Name Length</td>
<td>Each filter has an optional null-terminated ASCII name
and this field holds the length of the name including the
null termination padded with nulls to be a multiple of
eight. If the filter has no name then a value of zero is
stored in this field.</td>
</tr>
<tr valign=top>
<td>Flags</td>
<td>Eight bits of flags which are passed to the compression
algorithm. There meaning depends on the compression
method.</td>
<td>The flags indicate certain properties for a filter. The
bit values defined so far are:
<dl>
<dt><code>bit 1</code>
<dd>If set then the filter is an optional filter.
During output, if an optional filter fails it will be
silently removed from the pipeline.
</dl>
</tr>
<tr valign=top>
<td>Client Data Size</td>
<td>The size in bytes of the optional Client Data
field.</td>
<td>Client Data Number of Values</td>
<td>Each filter can store a few integer values to control
how the filter operates. The number of entries in the
Client Data array is stored in this field.</td>
</tr>
<tr valign=top>
<td>Name</td>
<td>If the Name Length field is non-zero then it will
contain the size of this field, a multiple of eight. This
field contains a null-terminated, ASCII character
string to serve as a comment/name for the filter.</td>
</tr>
<tr valign=top>
<td>Client Data</td>
<td>Additional information needed by the compression method
can be stored in this field. The data will be passed to
the compression algorithm as a void pointer.</td>
<td>This is an array of four-byte integers which will be
passed to the filter function. The Client Data Number of
Values determines the number of elements in the
array.</td>
</tr>
<tr valign=top>
<td>Padding</td>
<td>Four bytes of zeros are added to the message at this
point if the Client Data Number of Values field contains
an odd number.</td>
</tr>
</table>
</center>
<p>Sometimes additional redundancy can be added to the data before
it's compressed to result in a better compression ratio. The
library doesn't specifically support modeling methods to add
redundancy, but the effect can be achieved through the use of
user-defined data types.
<p>The library uses the following compression methods.
<center>
<table align=center width="80%">
<tr valign=top>
<td><code>0</code></td>
<td>No compression: The blocks of data are stored in
their raw format.</td>
</tr>
<tr valign=top>
<td><code>1</code></td>
<td>Deflation: This is the same algorithm used by
GNU gzip which is a combination Huffman and LZ77
dictionary encoder. The <code>libz</code> library version
1.1.2 or later must be available.</td>
</tr>
<tr valign=top>
<td><code>2</code></td>
<td>Run length encoding: Not implemented yet.</td>
</tr>
<tr valign=top>
<td><code>3</code></td>
<td>Adaptive Huffman: Not implemented yet.</td>
</tr>
<tr valign=top>
<td><code>4</code></td>
<td>Adaptive Arithmetic: Not implemented yet.</td>
</tr>
<tr valign=top>
<td><code>5</code></td>
<td>LZ78 Dictionary Encoding: Not implemented yet.</td>
</tr>
<tr valign=top>
<td><code>6</code></td>
<td>Adaptive Lempel-Ziv: Similar to Unix
<code>compress</code>. Not implemented yet.</td>
</tr>
<tr valign=top>
<td><code>7-15</code></td>
<td>Reserved for future use.</td>
</tr>
<tr valign=top>
<td><code>16-255</code></td>
<td>User-defined.</td>
</tr>
</table>
</center>
<p>The compression is applied independently to each chunk of
storage (after data space and data type conversions). If the
compression is unable to make the chunk smaller than it would
normally be, the chunk is stored without compression. At the
library's discretion, chunks which fail the compression can also
be stored in their raw format.
<hr>
<h3><a name="AttributeMessage">Name: Attribute</a></h3>
<b>Type:</b> 0x000C<BR>
@ -3255,7 +3292,7 @@ data-type.
<address><a href="mailto:koziol@ncsa.uiuc.edu">Quincey Koziol</a></address>
<address><a href="mailto:matzke@llnl.gov">Robb Matzke</a></address>
<!-- hhmts start -->
Last modified: Fri Jul 24 15:10:57 EDT 1998
Last modified: Tue Aug 4 10:04:40 EDT 1998
<!-- hhmts end -->
</body>
</html>

View File

@ -28,6 +28,8 @@
A guide to the H5P interface.
<li><a href="Errors.html">Error handling</a> -
A guide to the H5E interface.
<li><a href="Filters.html">Filters</a> -
A guide to the H5Z interface.
<li><a href="Caching.html">Caching</a> -
A guide for meta and raw data caching.
<li><a href="tracing.html">API Tracing</a> -
@ -44,8 +46,6 @@
A description of HDF5 version numbers.
<li><a href="IOPipe.html">I/O Pipeline</a> -
A description of the raw data I/O pipeline.
<li><a href="Compression.html">Compression</a> -
A guide to the use of compression for datasets.
<li><a href="ExternalFiles.html">Working with external files</a> -
A guide to the use of multiple files with HDF5.
<li><a href="Big.html">Large Datasets</a> -
@ -66,7 +66,7 @@
<address><a href="mailto:koziol@ncsa.uiuc.edu">Quincey Koziol</a></address>
<address><a href="mailto:matzke@llnl.gov">Robb Matzke</a></address>
<!-- hhmts start -->
Last modified: Wed Jul 8 14:10:50 EDT 1998
Last modified: Wed Aug 5 17:02:57 EDT 1998
<!-- hhmts end -->
</body>

View File

@ -819,6 +819,29 @@ H5P.o: \
H5Eprivate.h \
H5Epublic.h \
H5MMprivate.h
H5S.o: \
H5S.c \
H5private.h \
H5public.h \
H5config.h \
H5Iprivate.h \
H5Ipublic.h \
H5Eprivate.h \
H5Epublic.h \
H5MMprivate.h \
H5MMpublic.h \
H5Oprivate.h \
H5Opublic.h \
H5Fprivate.h \
H5Fpublic.h \
H5Dpublic.h \
H5Gprivate.h \
H5Gpublic.h \
H5Bprivate.h \
H5Bpublic.h \
H5HGprivate.h \
H5HGpublic.h \
H5Tprivate.h
H5Sall.o: \
H5Sall.c \
H5private.h \
@ -1084,26 +1107,3 @@ H5Z.o: \
H5Spublic.h \
H5Zprivate.h \
H5Zpublic.h
H5S.o: \
H5S.c \
H5private.h \
H5public.h \
H5config.h \
H5Iprivate.h \
H5Ipublic.h \
H5Eprivate.h \
H5Epublic.h \
H5MMprivate.h \
H5MMpublic.h \
H5Oprivate.h \
H5Opublic.h \
H5Fprivate.h \
H5Fpublic.h \
H5Dpublic.h \
H5Gprivate.h \
H5Gpublic.h \
H5Bprivate.h \
H5Bpublic.h \
H5HGprivate.h \
H5HGpublic.h \
H5Tprivate.h

View File

@ -55,7 +55,7 @@ FILE *fdopen(int fd, const char *mode);
#include <H5Pprivate.h> /*property lists */
#include <H5Sprivate.h> /*data spaces */
#include <H5Tprivate.h> /*data types */
#include <H5Zprivate.h> /*compression */
#include <H5Zprivate.h> /*filters */
#define PABLO_MASK H5_mask
@ -113,7 +113,7 @@ H5_init_library(void)
/* Turn on tracing? */
const char *s = getenv ("HDF5_TRACE");
if (s && isdigit(*s)) {
int fd = HDstrtol (s, NULL, 0);
int fd = (int)HDstrtol (s, NULL, 0);
H5_trace_g = HDfdopen (fd, "w");
}
}
@ -795,7 +795,7 @@ HDstrtoll (const char *s, const char **rest, int base)
(*s>='a' && *s<'a'+base-10) ||
(*s>='A' && *s<'A'+base-10)))) {
if (!overflow) {
int64 digit;
int64 digit = 0;
if (*s>='0' && *s<='9') digit = *s - '0';
else if (*s>='a' && *s<='z') digit = *s-'a'+10;
else digit = *s-'A'+10;
@ -925,7 +925,74 @@ H5_timer_end (H5_timer_t *sum/*in,out*/, H5_timer_t *timer/*in,out*/)
sum->etime += timer->etime;
}
}
/*-------------------------------------------------------------------------
* Function: H5_bandwidth
*
* Purpose: Prints the bandwidth (bytes per second) in a field 10
* characters wide widh four digits of precision like this:
*
* NaN If <=0 seconds
* 1234. TB/s
* 123.4 TB/s
* 12.34 GB/s
* 1.234 MB/s
* 4.000 kB/s
* 1.000 B/s
* 0.000 B/s If NBYTES==0
* 1.2345e-10 For bandwidth less than 1
* 6.7893e+94 For exceptionally large values
* 6.678e+106 For really big values
*
* Return: void
*
* Programmer: Robb Matzke
* Wednesday, August 5, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
void
H5_bandwidth(char *buf/*out*/, double nbytes, double nseconds)
{
double bw;
if (nseconds<=0.0) {
strcpy(buf, " NaN");
} else {
bw = nbytes/nseconds;
if (bw==0.0) {
strcpy(buf, "0.000 B/s");
} else if (bw<1.0) {
sprintf(buf, "%10.4e", bw);
} else if (bw<1024.0) {
sprintf(buf, "%05.4f", bw);
strcpy(buf+5, " B/s");
} else if (bw<1024.0*1024.0) {
sprintf(buf, "%05.4f", bw/1024.0);
strcpy(buf+5, " kB/s");
} else if (bw<1024.0*1024.0*1024.0) {
sprintf(buf, "%05.4f", bw/(1024.0*1024.0));
strcpy(buf+5, " MB/s");
} else if (bw<1024.0*1024.0*1024.0*1024.0) {
sprintf(buf, "%05.4f",
bw/(1024.0*1024.0*1024.0));
strcpy(buf+5, " GB/s");
} else if (bw<1024.0*1024.0*1024.0*1024.0*1024.0) {
sprintf(buf, "%05.4f",
bw/(1024.0*1024.0*1024.0*1024.0));
strcpy(buf+5, " TB/s");
} else {
sprintf(buf, "%10.4e", bw);
if (strlen(buf)>10) {
sprintf(buf, "%10.3e", bw);
}
}
}
}
/*-------------------------------------------------------------------------
* Function: H5_trace
@ -989,7 +1056,7 @@ H5_trace (hbool_t returning, const char *func, const char *type, ...)
for (ptr=0; '*'==*type; type++) ptr++;
if ('['==*type) {
if ('a'==type[1]) {
asize_idx = strtol(type+2, &rest, 10);
asize_idx = (int)strtol(type+2, &rest, 10);
assert(']'==*rest);
type = rest+1;
} else {
@ -1959,7 +2026,7 @@ H5_trace (hbool_t returning, const char *func, const char *type, ...)
case 'Z':
switch (type[1]) {
case 'm':
case 'f':
if (ptr) {
if (vp) {
fprintf (out, "0x%lx", (unsigned long)vp);
@ -1967,19 +2034,11 @@ H5_trace (hbool_t returning, const char *func, const char *type, ...)
fprintf(out, "NULL");
}
} else {
H5Z_method_t zmeth = va_arg (ap, H5Z_method_t);
if (zmeth<0) {
fprintf (out, "%d (range)", (int)zmeth);
} else if (H5Z_NONE==zmeth) {
fprintf (out, "H5Z_NONE");
} else if (H5Z_DEFLATE==zmeth) {
fprintf (out, "H5Z_DEFLATE");
} else if (zmeth<H5Z_USERDEF_MIN) {
fprintf (out, "H5Z_RES_%d", (int)zmeth);
} else if (zmeth<=H5Z_USERDEF_MAX) {
fprintf (out, "%d", (int)zmeth);
H5Z_filter_t id = va_arg (ap, H5Z_filter_t);
if (H5Z_FILTER_DEFLATE==id) {
fprintf (out, "H5Z_FILTER_DEFLATE");
} else {
fprintf (out, "%d (range)", (int)zmeth);
fprintf (out, "%ld", (long)id);
}
}
break;

View File

@ -27,7 +27,7 @@ static char RcsId[] = "@(#)$Revision$";
#include <H5MMprivate.h> /* Memory management */
#include <H5Oprivate.h> /* Object headers */
#include <H5Pprivate.h> /* Property lists */
#include <H5Zprivate.h> /* Data compression */
#include <H5Zprivate.h> /* Data filters */
#ifdef QAK
int qak_debug=0;
@ -66,10 +66,8 @@ const H5D_create_t H5D_create_dflt = {
0, /*...slots used */
NULL}, /*...slot array */
/* Compression */
{H5Z_NONE, /* No compression */
0, /*...flags */
0, NULL} /*...client data */
/* Filters */
{0, 0, NULL} /* No filters in pipeline */
};
/* Default dataset transfer property list */
@ -805,17 +803,17 @@ H5D_create(H5G_t *loc, const char *name, const H5T_t *type, const H5S_t *space,
assert (space);
assert (create_parms);
#ifdef HAVE_PARALLEL
/* If MPIO is used, no compression support yet. */
/* If MPIO is used, no filter support yet. */
if (f->shared->access_parms->driver == H5F_LOW_MPIO &&
H5Z_NONE!=create_parms->compress.method){
create_parms->pline.nfilters>0) {
HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, NULL,
"Parallel IO does not support compression yet");
"Parallel IO does not support filters yet");
}
#endif
if (H5Z_NONE!=create_parms->compress.method &&
if (create_parms->pline.nfilters>0 &&
H5D_CHUNKED!=create_parms->layout) {
HGOTO_ERROR (H5E_DATASET, H5E_BADVALUE, NULL,
"compression can only be used with chunked layout");
"filters can only be used with chunked layout");
}
/* Initialize the dataset object */
@ -909,12 +907,12 @@ H5D_create(H5G_t *loc, const char *name, const H5T_t *type, const H5S_t *space,
"unable to update type or space header messages");
}
/* Update the compression message */
if (H5Z_NONE!=new_dset->create_parms->compress.method &&
H5O_modify (&(new_dset->ent), H5O_COMPRESS, 0, H5O_FLAG_CONSTANT,
&(new_dset->create_parms->compress))<0) {
/* Update the filters message */
if (new_dset->create_parms->pline.nfilters>0 &&
H5O_modify (&(new_dset->ent), H5O_PLINE, 0, H5O_FLAG_CONSTANT,
&(new_dset->create_parms->pline))<0) {
HGOTO_ERROR (H5E_DATASET, H5E_CANTINIT, NULL,
"unable to update compression header message");
"unable to update filter header message");
}
/*
@ -1065,21 +1063,21 @@ H5D_open(H5G_t *loc, const char *name)
"unable to read data space info from dataset header");
}
/* Get the optional compression message */
if (NULL==H5O_read (&(dataset->ent), H5O_COMPRESS, 0,
&(dataset->create_parms->compress))) {
/* Get the optional filters message */
if (NULL==H5O_read (&(dataset->ent), H5O_PLINE, 0,
&(dataset->create_parms->pline))) {
H5E_clear ();
HDmemset (&(dataset->create_parms->compress), 0,
sizeof(dataset->create_parms->compress));
HDmemset (&(dataset->create_parms->pline), 0,
sizeof(dataset->create_parms->pline));
}
#ifdef HAVE_PARALLEL
f = H5G_fileof (loc);
/* If MPIO is used, no compression support yet. */
/* If MPIO is used, no filter support yet. */
if (f->shared->access_parms->driver == H5F_LOW_MPIO &&
H5Z_NONE!=dataset->create_parms->compress.method){
dataset->create_parms->pline.nfilters>0){
HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, NULL,
"Parallel IO does not support compression yet");
"Parallel IO does not support filters yet");
}
#endif
@ -1356,7 +1354,7 @@ H5D_read(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
if (H5T_conv_noop==tconv_func &&
NULL!=sconv_func.read) {
status = (sconv_func.read)(dataset->ent.file, &(dataset->layout),
&(dataset->create_parms->compress),
&(dataset->create_parms->pline),
&(dataset->create_parms->efl),
H5T_get_size (dataset->type),
file_space, mem_space,
@ -1376,7 +1374,7 @@ H5D_read(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
if (H5D_OPTIMIZE_PIPE && H5T_conv_noop==tconv_func &&
NULL!=sconv_func.read) {
status = (sconv_func.read)(dataset->ent.file, &(dataset->layout),
&(dataset->create_parms->compress),
&(dataset->create_parms->pline),
&(dataset->create_parms->efl),
H5T_get_size (dataset->type), file_space,
mem_space, xfer_parms->xfer_mode,
@ -1474,7 +1472,7 @@ H5D_read(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
* if necessary.
*/
if ((sconv_func.fgath)(dataset->ent.file, &(dataset->layout),
&(dataset->create_parms->compress),
&(dataset->create_parms->pline),
&(dataset->create_parms->efl),
H5T_get_size (dataset->type), file_space, &file_iter,
smine_nelmts, xfer_parms->xfer_mode,
@ -1703,7 +1701,7 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
NULL!=sconv_func.write) {
status = (sconv_func.write)(dataset->ent.file,
&(dataset->layout),
&(dataset->create_parms->compress),
&(dataset->create_parms->pline),
&(dataset->create_parms->efl),
H5T_get_size (dataset->type),
file_space, mem_space,
@ -1724,7 +1722,7 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
H5T_conv_noop==tconv_func &&
NULL!=sconv_func.write) {
status = (sconv_func.write)(dataset->ent.file, &(dataset->layout),
&(dataset->create_parms->compress),
&(dataset->create_parms->pline),
&(dataset->create_parms->efl),
H5T_get_size (dataset->type), file_space,
mem_space, xfer_parms->xfer_mode, buf);
@ -1842,7 +1840,7 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
if ((H5D_OPTIMIZE_PIPE && H5T_BKG_YES==need_bkg) ||
(!H5D_OPTIMIZE_PIPE && need_bkg)) {
if ((sconv_func.fgath)(dataset->ent.file, &(dataset->layout),
&(dataset->create_parms->compress),
&(dataset->create_parms->pline),
&(dataset->create_parms->efl),
H5T_get_size (dataset->type), file_space,
&bkg_iter, smine_nelmts, xfer_parms->xfer_mode,
@ -1873,7 +1871,7 @@ H5D_write(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *mem_space,
* Scatter the data out to the file.
*/
if ((sconv_func.fscat)(dataset->ent.file, &(dataset->layout),
&(dataset->create_parms->compress),
&(dataset->create_parms->pline),
&(dataset->create_parms->efl),
H5T_get_size (dataset->type), file_space,
&file_iter, smine_nelmts,
@ -2101,7 +2099,7 @@ H5D_allocate (H5D_t *dataset)
if (H5F_istore_allocate(dataset->ent.file,
(layout), space_dim,
&(dataset->create_parms->compress))==FAIL){
&(dataset->create_parms->pline))==FAIL){
HRETURN(FAIL);
}
break;

View File

@ -33,12 +33,13 @@ typedef struct H5F_rdcc_ent_t {
hbool_t locked; /*entry is locked in cache */
hbool_t dirty; /*needs to be written to disk? */
H5O_layout_t *layout; /*the layout message */
H5O_compress_t *comp; /*compression message */
H5O_pline_t *pline; /*filter pipeline message */
hssize_t offset[H5O_LAYOUT_NDIMS]; /*chunk name */
size_t chunk_size; /*size of a chunk */
size_t rd_count; /*bytes remaining to be read */
size_t wr_count; /*bytes remaining to be written */
uint8 *chunk; /*the uncompressed chunk data */
size_t chunk_size; /*size of a chunk */
size_t alloc_size; /*amount allocated for the chunk */
uint8 *chunk; /*the unfiltered chunk data */
} H5F_rdcc_ent_t;
/* Private prototypes */
@ -83,8 +84,9 @@ static herr_t H5F_istore_get_addr (H5F_t *f, const H5O_layout_t *layout,
* The chunk's file address is part of the B-tree and not part of the key.
*/
typedef struct H5F_istore_key_t {
hsize_t nbytes; /*size of stored data */
size_t nbytes; /*size of stored data */
hssize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/
uintn filter_mask; /*excluded filters */
} H5F_istore_key_t;
typedef struct H5F_istore_ud1_t {
@ -141,6 +143,7 @@ H5F_istore_sizeof_rkey(H5F_t __unused__ *f, const void *_udata)
assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS);
nbytes = 4 + /*storage size */
4 + /*filter mask */
udata->mesg.ndims * 4; /*dimension indices */
return nbytes;
@ -180,7 +183,8 @@ H5F_istore_decode_key(H5F_t __unused__ *f, H5B_t *bt, uint8 *raw, void *_key)
assert(ndims > 0 && ndims <= H5O_LAYOUT_NDIMS);
/* decode */
UINT32DECODE (raw, key->nbytes);
UINT32DECODE(raw, key->nbytes);
UINT32DECODE(raw, key->filter_mask);
for (i = 0; i < ndims; i++) {
UINT32DECODE(raw, key->offset[i]);
}
@ -222,7 +226,8 @@ H5F_istore_encode_key(H5F_t __unused__ *f, H5B_t *bt, uint8 *raw, void *_key)
assert(ndims > 0 && ndims <= H5O_LAYOUT_NDIMS);
/* encode */
UINT32ENCODE (raw, key->nbytes);
UINT32ENCODE(raw, key->nbytes);
UINT32ENCODE(raw, key->filter_mask);
for (i = 0; i < ndims; i++) {
UINT32ENCODE(raw, key->offset[i]);
}
@ -258,10 +263,12 @@ H5F_istore_debug_key (FILE *stream, intn indent, intn fwidth,
FUNC_ENTER (H5F_istore_debug_key, FAIL);
assert (key);
HDfprintf (stream, "%*s%-*s %Hd bytes\n", indent, "", fwidth,
"Chunk size:", key->nbytes);
HDfprintf (stream, "%*s%-*s {", indent, "", fwidth,
"Logical offset:");
HDfprintf(stream, "%*s%-*s %Zd bytes\n", indent, "", fwidth,
"Chunk size:", key->nbytes);
HDfprintf(stream, "%*s%-*s 0x%08x\n", indent, "", fwidth,
"Filter mask:", key->filter_mask);
HDfprintf(stream, "%*s%-*s {", indent, "", fwidth,
"Logical offset:");
for (i=0; i<udata->mesg.ndims; i++) {
HDfprintf (stream, "%s%Hd", i?", ":"", key->offset[i]);
}
@ -429,6 +436,7 @@ H5F_istore_new_node(H5F_t *f, H5B_ins_t op,
* inserted into the tree.
*/
lt_key->nbytes = udata->key.nbytes;
lt_key->filter_mask = udata->key.filter_mask;
for (i=0; i<udata->mesg.ndims; i++) {
lt_key->offset[i] = udata->key.offset[i];
}
@ -439,6 +447,7 @@ H5F_istore_new_node(H5F_t *f, H5B_ins_t op,
*/
if (H5B_INS_LEFT != op) {
rt_key->nbytes = 0;
rt_key->filter_mask = 0;
for (i=0; i<udata->mesg.ndims; i++) {
assert (udata->mesg.dim[i] < MAX_HSSIZET);
assert (udata->key.offset[i]+(hssize_t)(udata->mesg.dim[i]) >
@ -494,6 +503,7 @@ H5F_istore_found(H5F_t __unused__ *f, const haddr_t *addr,
/* Initialize return values */
udata->addr = *addr;
udata->key.nbytes = lt_key->nbytes;
udata->key.filter_mask = lt_key->filter_mask;
assert (lt_key->nbytes>0);
for (i = 0; i < udata->mesg.ndims; i++) {
udata->key.offset[i] = lt_key->offset[i];
@ -589,6 +599,7 @@ H5F_istore_insert(H5F_t *f, const haddr_t *addr, void *_lt_key,
"unable to reallocate chunk storage");
}
lt_key->nbytes = udata->key.nbytes;
lt_key->filter_mask = udata->key.filter_mask;
*lt_key_changed = TRUE;
udata->addr = *new_node;
ret_value = H5B_INS_CHANGE;
@ -608,6 +619,7 @@ H5F_istore_insert(H5F_t *f, const haddr_t *addr, void *_lt_key,
* current node. The MD_KEY is where the split occurs.
*/
md_key->nbytes = udata->key.nbytes;
md_key->filter_mask = udata->key.filter_mask;
for (i=0; i<udata->mesg.ndims; i++) {
assert(0 == udata->key.offset[i] % udata->mesg.dim[i]);
md_key->offset[i] = udata->key.offset[i];
@ -677,7 +689,10 @@ H5F_istore_init (H5F_t *f)
/*-------------------------------------------------------------------------
* Function: H5F_istore_flush_entry
*
* Purpose: Writes a chunk to disk.
* Purpose: Writes a chunk to disk. If RESET is non-zero then the
* entry is cleared -- it's slightly faster to flush a chunk if
* the RESET flag is turned on because it results in one fewer
* memory copy.
*
* Return: Success: SUCCEED
*
@ -691,65 +706,106 @@ H5F_istore_init (H5F_t *f)
*-------------------------------------------------------------------------
*/
static herr_t
H5F_istore_flush_entry (H5F_t *f, H5F_rdcc_ent_t *ent)
H5F_istore_flush_entry (H5F_t *f, H5F_rdcc_ent_t *ent, hbool_t reset)
{
void *c_buf = NULL; /*temp compression buffer */
void *out_ptr = NULL; /*ptr to output buffer */
size_t nbytes; /*size of output buffer */
herr_t ret_value = FAIL; /*return value */
H5F_istore_ud1_t udata; /*pass through B-tree */
intn i;
herr_t ret_value=FAIL; /*return value */
H5F_istore_ud1_t udata; /*pass through B-tree */
intn i; /*counters */
void *buf=NULL; /*temporary buffer */
size_t alloc; /*bytes allocated for BUF */
hbool_t point_of_no_return = FALSE;
FUNC_ENTER (H5F_istore_flush_entry, FAIL);
assert (ent);
assert(f);
assert(ent);
assert (!ent->locked);
if (!ent->dirty) HRETURN (SUCCEED);
/* Should the chunk be compressed before writing it to disk? */
if (ent->comp && H5Z_NONE!=ent->comp->method) {
if (NULL==(c_buf = H5MM_malloc (ent->chunk_size))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL,
"memory allocation failed for data compression");
buf = ent->chunk;
if (ent->dirty) {
udata.mesg = *(ent->layout);
udata.key.filter_mask = 0;
H5F_addr_undef(&(udata.addr));
udata.key.nbytes = ent->chunk_size;
for (i=0; i<ent->layout->ndims; i++) {
udata.key.offset[i] = ent->offset[i];
}
nbytes = H5Z_compress (ent->comp, ent->chunk_size, ent->chunk, c_buf);
if (nbytes && nbytes<ent->chunk_size) {
out_ptr = c_buf;
} else {
out_ptr = ent->chunk;
nbytes = ent->chunk_size;
alloc = ent->alloc_size;
/* Should the chunk be filtered before writing it to disk? */
if (ent->pline && ent->pline->nfilters) {
if (!reset) {
/*
* Copy the chunk to a new buffer before running it through
* the pipeline because we'll want to save the original buffer
* for later.
*/
alloc = ent->chunk_size;
if (NULL==(buf = H5MM_malloc(alloc))) {
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
"memory allocation failed for pipeline");
}
HDmemcpy(buf, ent->chunk, ent->chunk_size);
} else {
/*
* If we are reseting and something goes wrong after this
* point then it's too late to recover because we may have
* destroyed the original data by calling H5Z_pipeline().
* The only safe option is to continue with the reset
* even if we can't write the data to disk.
*/
point_of_no_return = TRUE;
}
if (H5Z_pipeline(f, ent->pline, 0, &(udata.key.filter_mask),
&(udata.key.nbytes), &alloc, &buf)<0) {
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL,
"output pipeline failed");
}
}
} else {
out_ptr = ent->chunk;
nbytes = ent->chunk_size;
/*
* Create the chunk it if it doesn't exist, or reallocate the chunk if
* its size changed. Then write the data into the file.
*/
if (H5B_insert(f, H5B_ISTORE, &(ent->layout->addr), &udata)<0) {
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"unable to allocate chunk");
}
if (H5F_block_write (f, &(udata.addr), udata.key.nbytes, H5D_XFER_DFLT,
buf)<0) {
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"unable to write raw data to file");
}
/* Mark cache entry as clean */
ent->dirty = FALSE;
f->shared->rdcc.nflushes++;
}
/*
* Create the chunk it if it doesn't exist, or reallocate the chunk if its
* size changed. Then write the data into the file.
*/
udata.mesg = *(ent->layout);
H5F_addr_undef(&(udata.addr));
udata.key.nbytes = nbytes;
for (i=0; i<ent->layout->ndims; i++) {
udata.key.offset[i] = ent->offset[i];
/* Reset */
if (reset) {
point_of_no_return = FALSE;
ent->layout = H5O_free(H5O_LAYOUT, ent->layout);
ent->pline = H5O_free(H5O_PLINE, ent->pline);
if (buf==ent->chunk) buf = NULL;
ent->chunk = H5MM_xfree(ent->chunk);
}
if (H5B_insert(f, H5B_ISTORE, &(ent->layout->addr), &udata)<0) {
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"unable to allocate chunk");
}
if (H5F_block_write (f, &(udata.addr), nbytes, H5D_XFER_DFLT, out_ptr)<0) {
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"unable to write raw data to file");
}
/* Mark cache entry as clean */
ent->dirty = FALSE;
f->shared->rdcc.nflushes++;
ret_value = SUCCEED;
done:
H5MM_xfree (c_buf);
/* Free the temp buffer only if it's different than the entry chunk */
if (buf!=ent->chunk) H5MM_xfree(buf);
/*
* If we reached the point of no return then we have no choice but to
* reset the entry. This can only happen if RESET is true but the
* output pipeline failed.
*/
if (ret_value<0 && point_of_no_return) {
ent->layout = H5O_free(H5O_LAYOUT, ent->layout);
ent->pline = H5O_free(H5O_PLINE, ent->pline);
ent->chunk = H5MM_xfree(ent->chunk);
}
FUNC_LEAVE (ret_value);
}
@ -780,7 +836,7 @@ H5F_istore_flush (H5F_t *f)
FUNC_ENTER (H5F_istore_flush, FAIL);
for (i=0; i<rdcc->nused; i++) {
if (H5F_istore_flush_entry (f, rdcc->slot+i)<0) {
if (H5F_istore_flush_entry (f, rdcc->slot+i, FALSE)<0) {
nerrors++;
}
}
@ -791,18 +847,17 @@ H5F_istore_flush (H5F_t *f)
FUNC_LEAVE (SUCCEED);
}
/*-------------------------------------------------------------------------
* Function: H5F_istore_preempt
* Function: H5F_istore_preempt
*
* Purpose: Preempts the specified entry from the cache, flushing it to
* disk if necessary.
* Purpose: Preempts the specified entry from the cache, flushing it to
* disk if necessary.
*
* Return: Success: SUCCEED
* Return: Success: SUCCEED
*
* Failure: FAIL
* Failure: FAIL
*
* Programmer: Robb Matzke
* Programmer: Robb Matzke
* Thursday, May 21, 1998
*
* Modifications:
@ -812,21 +867,18 @@ H5F_istore_flush (H5F_t *f)
static herr_t
H5F_istore_preempt (H5F_t *f, intn idx)
{
H5F_rdcc_t *rdcc = &(f->shared->rdcc);
H5F_rdcc_ent_t *ent = rdcc->slot + idx;
H5F_rdcc_t *rdcc = &(f->shared->rdcc);
H5F_rdcc_ent_t *ent = rdcc->slot + idx;
FUNC_ENTER (H5F_istore_preempt, FAIL);
assert (idx>=0 && idx<rdcc->nused);
assert (!ent->locked);
if (ent->dirty) H5F_istore_flush_entry (f, ent);
H5O_free (H5O_LAYOUT, ent->layout);
H5O_free (H5O_COMPRESS, ent->comp);
H5MM_xfree (ent->chunk);
H5F_istore_flush_entry (f, ent, TRUE);
HDmemmove (rdcc->slot+idx, rdcc->slot+idx+1,
(rdcc->nused-idx) * sizeof(H5F_rdcc_ent_t));
rdcc->nused -= 1;
rdcc->nbytes -= ent->chunk_size;
HDmemmove (rdcc->slot+idx, rdcc->slot+idx+1,
(rdcc->nused-idx) * sizeof(H5F_rdcc_ent_t));
FUNC_LEAVE (SUCCEED);
}
@ -858,10 +910,10 @@ H5F_istore_dest (H5F_t *f)
FUNC_ENTER (H5F_istore_dest, FAIL);
for (i=rdcc->nused-1; i>=0; --i) {
if (H5F_istore_flush_entry (f, rdcc->slot+i)<0) {
if (H5F_istore_flush_entry (f, rdcc->slot+i, TRUE)<0) {
nerrors++;
}
if (H5F_istore_preempt (f, i)<0) {
if (H5F_istore_preempt(f, i)<0) {
nerrors++;
}
}
@ -951,7 +1003,7 @@ H5F_istore_prune (H5F_t *f, size_t size)
/*-------------------------------------------------------------------------
* Function: H5F_istore_lock
*
* Purpose: Return a pointer to an uncompressed chunk. The pointer
* Purpose: Return a pointer to a file chunk chunk. The pointer
* points directly into the chunk cache and should not be freed
* by the caller but will be valid until it is unlocked. The
* input value IDX_HINT is used to speed up cache lookups and
@ -963,7 +1015,7 @@ H5F_istore_prune (H5F_t *f, size_t size)
* for output functions that are about to overwrite the entire
* chunk.
*
* Return: Success: Ptr to an uncompressed chunk.
* Return: Success: Ptr to a file chunk.
*
* Failure: NULL
*
@ -976,7 +1028,7 @@ H5F_istore_prune (H5F_t *f, size_t size)
*/
static void *
H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
const H5O_compress_t *comp, const hssize_t offset[],
const H5O_pline_t *pline, const hssize_t offset[],
hbool_t relax, intn *idx_hint/*in,out*/)
{
H5F_rdcc_t *rdcc = &(f->shared->rdcc);
@ -984,8 +1036,9 @@ H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
intn i, j, found = -1;
H5F_istore_ud1_t udata; /*B-tree pass-through */
size_t chunk_size=0; /*size of a chunk */
size_t chunk_alloc=0; /*allocated chunk size */
herr_t status; /*func return status */
void *chunk=NULL; /*the uncompressed chunk*/
void *chunk=NULL; /*the file chunk */
void *temp=NULL; /*temporary chunk buffer*/
void *ret_value=NULL; /*return value */
@ -1031,7 +1084,8 @@ H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
for (i=0, chunk_size=1; i<layout->ndims; i++) {
chunk_size *= layout->dim[i];
}
if (NULL==(chunk=H5MM_malloc (chunk_size))) {
chunk_alloc = chunk_size;
if (NULL==(chunk=H5MM_malloc (chunk_alloc))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed for raw data chunk");
}
@ -1045,39 +1099,30 @@ H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
udata.key.offset[i] = offset[i];
chunk_size *= layout->dim[i];
}
chunk_alloc = chunk_size;
udata.mesg = *layout;
H5F_addr_undef (&(udata.addr));
status = H5B_find (f, H5B_ISTORE, &(layout->addr), &udata);
H5E_clear ();
if (NULL==(chunk = H5MM_malloc (chunk_size))) {
if (NULL==(chunk = H5MM_malloc (chunk_alloc))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed for raw data chunk");
}
if (status>=0 && H5F_addr_defined (&(udata.addr))) {
/*
* The chunk exists on disk but might be compressed. Instead of
* allocating the exact size for the compressed chunk we allocate
* the entire chunk size -- it reduces strain on the malloc()
* subsystem.
* The chunk exists on disk.
*/
if (H5F_block_read (f, &(udata.addr), udata.key.nbytes,
H5D_XFER_DFLT, chunk)<0) {
HGOTO_ERROR (H5E_IO, H5E_READERROR, NULL,
"unable to read raw data chunk");
}
if (udata.key.nbytes<chunk_size) {
if (NULL==(temp = H5MM_malloc (chunk_size))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed for uncompress");
}
if (chunk_size!=H5Z_uncompress (comp, udata.key.nbytes,
chunk, chunk_size, temp)) {
HGOTO_ERROR (H5E_IO, H5E_READERROR, NULL,
"unable to uncompress raw data chunk");
}
H5MM_xfree (chunk);
chunk = temp;
temp = NULL;
if (H5Z_pipeline(f, pline, H5Z_FLAG_REVERSE,
&(udata.key.filter_mask), &(udata.key.nbytes),
&chunk_alloc, &chunk)<0 ||
udata.key.nbytes!=chunk_size) {
HGOTO_ERROR(H5E_PLINE, H5E_READERROR, NULL,
"data pipeline read failed");
}
rdcc->nmisses++;
} else {
@ -1117,8 +1162,9 @@ H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
ent->locked = 0;
ent->dirty = FALSE;
ent->chunk_size = chunk_size;
ent->alloc_size = chunk_alloc;
ent->layout = H5O_copy (H5O_LAYOUT, layout);
ent->comp = H5O_copy (H5O_COMPRESS, comp);
ent->pline = H5O_copy (H5O_PLINE, pline);
for (i=0; i<layout->ndims; i++) {
ent->offset[i] = offset[i];
}
@ -1191,7 +1237,7 @@ H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
*/
static herr_t
H5F_istore_unlock (H5F_t *f, const H5O_layout_t *layout,
const H5O_compress_t *comp, hbool_t dirty,
const H5O_pline_t *pline, hbool_t dirty,
const hssize_t offset[], intn *idx_hint,
uint8 *chunk, size_t naccessed)
{
@ -1215,7 +1261,7 @@ H5F_istore_unlock (H5F_t *f, const H5O_layout_t *layout,
/*
* It's not in the cache, probably because it's too big. If it's
* dirty then flush it to disk. In any case, free the chunk.
* Note: we have to copy the layout and compression messages so we
* Note: we have to copy the layout and filter messages so we
* don't discard the `const' qualifier.
*/
if (dirty) {
@ -1223,17 +1269,16 @@ H5F_istore_unlock (H5F_t *f, const H5O_layout_t *layout,
HDmemset (&x, 0, sizeof x);
x.dirty = TRUE;
x.layout = H5O_copy (H5O_LAYOUT, layout);
x.comp = H5O_copy (H5O_COMPRESS, comp);
x.pline = H5O_copy (H5O_PLINE, pline);
for (i=0, x.chunk_size=1; i<layout->ndims; i++) {
x.offset[i] = offset[i];
x.chunk_size *= layout->dim[i];
}
x.chunk = chunk;
H5F_istore_flush_entry (f, &x);
H5O_free (H5O_LAYOUT, x.layout);
H5O_free (H5O_COMPRESS, x.comp);
H5F_istore_flush_entry (f, &x, TRUE);
} else {
H5MM_xfree (chunk);
}
H5MM_xfree (chunk);
} else {
/*
* It's in the cache so unlock it.
@ -1272,7 +1317,7 @@ H5F_istore_unlock (H5F_t *f, const H5O_layout_t *layout,
*/
herr_t
H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
const H5O_compress_t *comp, const hssize_t offset_f[],
const H5O_pline_t *pline, const hssize_t offset_f[],
const hsize_t size[], void *buf)
{
hssize_t offset_m[H5O_LAYOUT_NDIMS];
@ -1375,7 +1420,7 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
l.dim[i] = layout->dim[i];
l.addr = udata.addr;
if (H5F_arr_read(f, &l,
comp, NULL /* no efl */,
pline, NULL /* no efl */,
sub_size, size_m,
sub_offset_m, offset_wrt_chunk,
H5D_XFER_DFLT, buf)==FAIL){
@ -1397,15 +1442,15 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
* Lock the chunk, transfer data to the application, then unlock the
* chunk.
*/
if (NULL==(chunk=H5F_istore_lock (f, layout, comp, chunk_offset,
if (NULL==(chunk=H5F_istore_lock (f, layout, pline, chunk_offset,
FALSE, &idx_hint))) {
HRETURN_ERROR (H5E_IO, H5E_READERROR, FAIL,
"unable to read raw data chunk");
}
H5V_hyper_copy(layout->ndims, sub_size, size_m, sub_offset_m,
(void*)buf, layout->dim, offset_wrt_chunk, chunk);
if (H5F_istore_unlock (f, layout, comp, FALSE, chunk_offset, &idx_hint,
chunk, naccessed)<0) {
if (H5F_istore_unlock (f, layout, pline, FALSE, chunk_offset,
&idx_hint, chunk, naccessed)<0) {
HRETURN_ERROR (H5E_IO, H5E_READERROR, FAIL,
"unable to unlock raw data chunk");
}
@ -1443,7 +1488,7 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
*/
herr_t
H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
const H5O_compress_t *comp, const hssize_t offset_f[],
const H5O_pline_t *pline, const hssize_t offset_f[],
const hsize_t size[], const void *buf)
{
hssize_t offset_m[H5O_LAYOUT_NDIMS];
@ -1550,7 +1595,7 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
l.dim[i] = layout->dim[i];
l.addr = udata.addr;
if (H5F_arr_write(f, &l,
comp, NULL /* no efl */,
pline, NULL /* no efl */,
sub_size, size_m,
sub_offset_m, offset_wrt_chunk,
H5D_XFER_DFLT, buf)==FAIL){
@ -1571,7 +1616,7 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
* Lock the chunk, copy from application to chunk, then unlock the
* chunk.
*/
if (NULL==(chunk=H5F_istore_lock (f, layout, comp, chunk_offset,
if (NULL==(chunk=H5F_istore_lock (f, layout, pline, chunk_offset,
naccessed==chunk_size,
&idx_hint))) {
HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
@ -1580,7 +1625,7 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
H5V_hyper_copy(layout->ndims, sub_size,
layout->dim, offset_wrt_chunk, chunk,
size_m, sub_offset_m, buf);
if (H5F_istore_unlock (f, layout, comp, TRUE, chunk_offset,
if (H5F_istore_unlock (f, layout, pline, TRUE, chunk_offset,
&idx_hint, chunk, naccessed)<0) {
HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"uanble to unlock raw data chunk");
@ -1826,7 +1871,7 @@ H5F_istore_get_addr (H5F_t *f, const H5O_layout_t *layout,
*/
herr_t
H5F_istore_allocate (H5F_t *f, const H5O_layout_t *layout,
const hsize_t *space_dim, const H5O_compress_t *comp)
const hsize_t *space_dim, const H5O_pline_t *pline)
{
intn i, carry;
@ -1846,7 +1891,7 @@ H5F_istore_allocate (H5F_t *f, const H5O_layout_t *layout,
/* Check args */
assert(f);
assert(space_dim);
assert(comp);
assert(pline);
assert(layout && H5D_CHUNKED==layout->type);
assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(&(layout->addr)));
@ -1885,13 +1930,13 @@ H5F_istore_allocate (H5F_t *f, const H5O_layout_t *layout,
* Lock the chunk, copy from application to chunk, then unlock the
* chunk.
*/
if (NULL==(chunk=H5F_istore_lock (f, layout, comp, chunk_offset,
if (NULL==(chunk=H5F_istore_lock (f, layout, pline, chunk_offset,
FALSE, &idx_hint))) {
HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"unable to read raw data chunk");
}
if (H5F_istore_unlock (f, layout, comp, TRUE, chunk_offset, &idx_hint,
chunk, chunk_size)<0) {
if (H5F_istore_unlock (f, layout, pline, TRUE, chunk_offset,
&idx_hint, chunk, chunk_size)<0) {
HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"uanble to unlock raw data chunk");
}

View File

@ -46,7 +46,7 @@ typedef struct H5D_create_t {
intn chunk_ndims; /*chunk dimensionality */
hsize_t chunk_size[32]; /*chunk size if chunked storage */
H5O_efl_t efl; /*external file list */
H5O_compress_t compress; /*data compression parameters */
H5O_pline_t pline; /*data filter pipeline */
} H5D_create_t;
/* Dataset transfer property list */

View File

@ -55,7 +55,7 @@ static const H5E_major_mesg_t H5E_major_mesg_g[] = {
{H5E_STORAGE, "Data storage layer"},
{H5E_TEMPLATE, "Property list interface"},
{H5E_ATTR, "Attribute layer"},
{H5E_COMP, "Data compression layer"},
{H5E_PLINE, "Data filters layer"},
{H5E_EFL, "External file list"},
};
@ -85,7 +85,7 @@ static const H5E_minor_mesg_t H5E_minor_mesg_g[] = {
{H5E_ALREADYINIT, "Object already initialized"},
{H5E_BADATOM, "Unable to find atom information (already closed?)"},
{H5E_CANTREGISTER, "Unable to register new atom"},
{H5E_CANTFLUSH, "Unable to flush meta data from cache"},
{H5E_CANTFLUSH, "Unable to flush data from cache"},
{H5E_CANTLOAD, "Unable to load meta data into cache"},
{H5E_PROTECT, "Protected meta data error"},
{H5E_NOTCACHED, "Meta data not currently cached"},

View File

@ -72,7 +72,7 @@ typedef enum H5E_major_t {
H5E_STORAGE, /*data storage */
H5E_TEMPLATE, /*Property lists */
H5E_ATTR, /*Attribute */
H5E_COMP, /*Data compression */
H5E_PLINE, /*Data filters */
H5E_EFL /*External file list */
} H5E_major_t;

View File

@ -114,7 +114,7 @@ H5F_arr_create (H5F_t *f, struct H5O_layout_t *layout/*in,out*/)
*/
herr_t
H5F_arr_read (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
const struct H5O_pline_t *pline, const struct H5O_efl_t *efl,
const hsize_t _hslab_size[], const hsize_t mem_size[],
const hssize_t mem_offset[], const hssize_t file_offset[],
const H5D_transfer_t xfer_mode, void *_buf/*out*/)
@ -182,11 +182,11 @@ H5F_arr_read (H5F_t *f, const struct H5O_layout_t *layout,
}
/*
* Compression cannot be used for contiguous data.
* Filters cannot be used for contiguous data.
*/
if (comp && H5Z_NONE!=comp->method) {
if (pline && pline->nfilters>0) {
HRETURN_ERROR (H5E_IO, H5E_READERROR, FAIL,
"compression is not allowed for contiguous data");
"filters are not allowed for contiguous data");
}
/*
@ -286,7 +286,7 @@ H5F_arr_read (H5F_t *f, const struct H5O_layout_t *layout,
"unable to copy into a proper hyperslab");
}
}
if (H5F_istore_read (f, layout, comp, file_offset, hslab_size,
if (H5F_istore_read (f, layout, pline, file_offset, hslab_size,
buf)<0) {
HRETURN_ERROR (H5E_IO, H5E_READERROR, FAIL, "chunked read failed");
}
@ -332,10 +332,11 @@ H5F_arr_read (H5F_t *f, const struct H5O_layout_t *layout,
*/
herr_t
H5F_arr_write (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
const hsize_t _hslab_size[], const hsize_t mem_size[],
const hssize_t mem_offset[], const hssize_t file_offset[],
const H5D_transfer_t xfer_mode, const void *_buf)
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, const hsize_t _hslab_size[],
const hsize_t mem_size[], const hssize_t mem_offset[],
const hssize_t file_offset[], const H5D_transfer_t xfer_mode,
const void *_buf)
{
const uint8 *buf = (const uint8 *)_buf; /*cast for arithmetic */
hssize_t file_stride[H5O_LAYOUT_NDIMS]; /*strides through file */
@ -399,11 +400,11 @@ H5F_arr_write (H5F_t *f, const struct H5O_layout_t *layout,
}
/*
* Compression cannot be used for contiguous data
* Filters cannot be used for contiguous data
*/
if (comp && H5Z_NONE!=comp->method) {
if (pline && pline->nfilters>0) {
HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"compression is not allowed for contiguous data");
"filters are not allowed for contiguous data");
}
/*
@ -503,7 +504,7 @@ printf("nelmts=%lu, min=%lu, max=%lu\n", temp, min, max);
"unable to copy from a proper hyperslab");
}
}
if (H5F_istore_write (f, layout, comp, file_offset, hslab_size,
if (H5F_istore_write (f, layout, pline, file_offset, hslab_size,
buf)<0) {
HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"chunked write failed");

View File

@ -33,12 +33,13 @@ typedef struct H5F_rdcc_ent_t {
hbool_t locked; /*entry is locked in cache */
hbool_t dirty; /*needs to be written to disk? */
H5O_layout_t *layout; /*the layout message */
H5O_compress_t *comp; /*compression message */
H5O_pline_t *pline; /*filter pipeline message */
hssize_t offset[H5O_LAYOUT_NDIMS]; /*chunk name */
size_t chunk_size; /*size of a chunk */
size_t rd_count; /*bytes remaining to be read */
size_t wr_count; /*bytes remaining to be written */
uint8 *chunk; /*the uncompressed chunk data */
size_t chunk_size; /*size of a chunk */
size_t alloc_size; /*amount allocated for the chunk */
uint8 *chunk; /*the unfiltered chunk data */
} H5F_rdcc_ent_t;
/* Private prototypes */
@ -83,8 +84,9 @@ static herr_t H5F_istore_get_addr (H5F_t *f, const H5O_layout_t *layout,
* The chunk's file address is part of the B-tree and not part of the key.
*/
typedef struct H5F_istore_key_t {
hsize_t nbytes; /*size of stored data */
size_t nbytes; /*size of stored data */
hssize_t offset[H5O_LAYOUT_NDIMS]; /*logical offset to start*/
uintn filter_mask; /*excluded filters */
} H5F_istore_key_t;
typedef struct H5F_istore_ud1_t {
@ -141,6 +143,7 @@ H5F_istore_sizeof_rkey(H5F_t __unused__ *f, const void *_udata)
assert(udata->mesg.ndims > 0 && udata->mesg.ndims <= H5O_LAYOUT_NDIMS);
nbytes = 4 + /*storage size */
4 + /*filter mask */
udata->mesg.ndims * 4; /*dimension indices */
return nbytes;
@ -180,7 +183,8 @@ H5F_istore_decode_key(H5F_t __unused__ *f, H5B_t *bt, uint8 *raw, void *_key)
assert(ndims > 0 && ndims <= H5O_LAYOUT_NDIMS);
/* decode */
UINT32DECODE (raw, key->nbytes);
UINT32DECODE(raw, key->nbytes);
UINT32DECODE(raw, key->filter_mask);
for (i = 0; i < ndims; i++) {
UINT32DECODE(raw, key->offset[i]);
}
@ -222,7 +226,8 @@ H5F_istore_encode_key(H5F_t __unused__ *f, H5B_t *bt, uint8 *raw, void *_key)
assert(ndims > 0 && ndims <= H5O_LAYOUT_NDIMS);
/* encode */
UINT32ENCODE (raw, key->nbytes);
UINT32ENCODE(raw, key->nbytes);
UINT32ENCODE(raw, key->filter_mask);
for (i = 0; i < ndims; i++) {
UINT32ENCODE(raw, key->offset[i]);
}
@ -258,10 +263,12 @@ H5F_istore_debug_key (FILE *stream, intn indent, intn fwidth,
FUNC_ENTER (H5F_istore_debug_key, FAIL);
assert (key);
HDfprintf (stream, "%*s%-*s %Hd bytes\n", indent, "", fwidth,
"Chunk size:", key->nbytes);
HDfprintf (stream, "%*s%-*s {", indent, "", fwidth,
"Logical offset:");
HDfprintf(stream, "%*s%-*s %Zd bytes\n", indent, "", fwidth,
"Chunk size:", key->nbytes);
HDfprintf(stream, "%*s%-*s 0x%08x\n", indent, "", fwidth,
"Filter mask:", key->filter_mask);
HDfprintf(stream, "%*s%-*s {", indent, "", fwidth,
"Logical offset:");
for (i=0; i<udata->mesg.ndims; i++) {
HDfprintf (stream, "%s%Hd", i?", ":"", key->offset[i]);
}
@ -429,6 +436,7 @@ H5F_istore_new_node(H5F_t *f, H5B_ins_t op,
* inserted into the tree.
*/
lt_key->nbytes = udata->key.nbytes;
lt_key->filter_mask = udata->key.filter_mask;
for (i=0; i<udata->mesg.ndims; i++) {
lt_key->offset[i] = udata->key.offset[i];
}
@ -439,6 +447,7 @@ H5F_istore_new_node(H5F_t *f, H5B_ins_t op,
*/
if (H5B_INS_LEFT != op) {
rt_key->nbytes = 0;
rt_key->filter_mask = 0;
for (i=0; i<udata->mesg.ndims; i++) {
assert (udata->mesg.dim[i] < MAX_HSSIZET);
assert (udata->key.offset[i]+(hssize_t)(udata->mesg.dim[i]) >
@ -494,6 +503,7 @@ H5F_istore_found(H5F_t __unused__ *f, const haddr_t *addr,
/* Initialize return values */
udata->addr = *addr;
udata->key.nbytes = lt_key->nbytes;
udata->key.filter_mask = lt_key->filter_mask;
assert (lt_key->nbytes>0);
for (i = 0; i < udata->mesg.ndims; i++) {
udata->key.offset[i] = lt_key->offset[i];
@ -589,6 +599,7 @@ H5F_istore_insert(H5F_t *f, const haddr_t *addr, void *_lt_key,
"unable to reallocate chunk storage");
}
lt_key->nbytes = udata->key.nbytes;
lt_key->filter_mask = udata->key.filter_mask;
*lt_key_changed = TRUE;
udata->addr = *new_node;
ret_value = H5B_INS_CHANGE;
@ -608,6 +619,7 @@ H5F_istore_insert(H5F_t *f, const haddr_t *addr, void *_lt_key,
* current node. The MD_KEY is where the split occurs.
*/
md_key->nbytes = udata->key.nbytes;
md_key->filter_mask = udata->key.filter_mask;
for (i=0; i<udata->mesg.ndims; i++) {
assert(0 == udata->key.offset[i] % udata->mesg.dim[i]);
md_key->offset[i] = udata->key.offset[i];
@ -677,7 +689,10 @@ H5F_istore_init (H5F_t *f)
/*-------------------------------------------------------------------------
* Function: H5F_istore_flush_entry
*
* Purpose: Writes a chunk to disk.
* Purpose: Writes a chunk to disk. If RESET is non-zero then the
* entry is cleared -- it's slightly faster to flush a chunk if
* the RESET flag is turned on because it results in one fewer
* memory copy.
*
* Return: Success: SUCCEED
*
@ -691,65 +706,106 @@ H5F_istore_init (H5F_t *f)
*-------------------------------------------------------------------------
*/
static herr_t
H5F_istore_flush_entry (H5F_t *f, H5F_rdcc_ent_t *ent)
H5F_istore_flush_entry (H5F_t *f, H5F_rdcc_ent_t *ent, hbool_t reset)
{
void *c_buf = NULL; /*temp compression buffer */
void *out_ptr = NULL; /*ptr to output buffer */
size_t nbytes; /*size of output buffer */
herr_t ret_value = FAIL; /*return value */
H5F_istore_ud1_t udata; /*pass through B-tree */
intn i;
herr_t ret_value=FAIL; /*return value */
H5F_istore_ud1_t udata; /*pass through B-tree */
intn i; /*counters */
void *buf=NULL; /*temporary buffer */
size_t alloc; /*bytes allocated for BUF */
hbool_t point_of_no_return = FALSE;
FUNC_ENTER (H5F_istore_flush_entry, FAIL);
assert (ent);
assert(f);
assert(ent);
assert (!ent->locked);
if (!ent->dirty) HRETURN (SUCCEED);
/* Should the chunk be compressed before writing it to disk? */
if (ent->comp && H5Z_NONE!=ent->comp->method) {
if (NULL==(c_buf = H5MM_malloc (ent->chunk_size))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL,
"memory allocation failed for data compression");
buf = ent->chunk;
if (ent->dirty) {
udata.mesg = *(ent->layout);
udata.key.filter_mask = 0;
H5F_addr_undef(&(udata.addr));
udata.key.nbytes = ent->chunk_size;
for (i=0; i<ent->layout->ndims; i++) {
udata.key.offset[i] = ent->offset[i];
}
nbytes = H5Z_compress (ent->comp, ent->chunk_size, ent->chunk, c_buf);
if (nbytes && nbytes<ent->chunk_size) {
out_ptr = c_buf;
} else {
out_ptr = ent->chunk;
nbytes = ent->chunk_size;
alloc = ent->alloc_size;
/* Should the chunk be filtered before writing it to disk? */
if (ent->pline && ent->pline->nfilters) {
if (!reset) {
/*
* Copy the chunk to a new buffer before running it through
* the pipeline because we'll want to save the original buffer
* for later.
*/
alloc = ent->chunk_size;
if (NULL==(buf = H5MM_malloc(alloc))) {
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
"memory allocation failed for pipeline");
}
HDmemcpy(buf, ent->chunk, ent->chunk_size);
} else {
/*
* If we are reseting and something goes wrong after this
* point then it's too late to recover because we may have
* destroyed the original data by calling H5Z_pipeline().
* The only safe option is to continue with the reset
* even if we can't write the data to disk.
*/
point_of_no_return = TRUE;
}
if (H5Z_pipeline(f, ent->pline, 0, &(udata.key.filter_mask),
&(udata.key.nbytes), &alloc, &buf)<0) {
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL,
"output pipeline failed");
}
}
} else {
out_ptr = ent->chunk;
nbytes = ent->chunk_size;
/*
* Create the chunk it if it doesn't exist, or reallocate the chunk if
* its size changed. Then write the data into the file.
*/
if (H5B_insert(f, H5B_ISTORE, &(ent->layout->addr), &udata)<0) {
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"unable to allocate chunk");
}
if (H5F_block_write (f, &(udata.addr), udata.key.nbytes, H5D_XFER_DFLT,
buf)<0) {
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"unable to write raw data to file");
}
/* Mark cache entry as clean */
ent->dirty = FALSE;
f->shared->rdcc.nflushes++;
}
/*
* Create the chunk it if it doesn't exist, or reallocate the chunk if its
* size changed. Then write the data into the file.
*/
udata.mesg = *(ent->layout);
H5F_addr_undef(&(udata.addr));
udata.key.nbytes = nbytes;
for (i=0; i<ent->layout->ndims; i++) {
udata.key.offset[i] = ent->offset[i];
/* Reset */
if (reset) {
point_of_no_return = FALSE;
ent->layout = H5O_free(H5O_LAYOUT, ent->layout);
ent->pline = H5O_free(H5O_PLINE, ent->pline);
if (buf==ent->chunk) buf = NULL;
ent->chunk = H5MM_xfree(ent->chunk);
}
if (H5B_insert(f, H5B_ISTORE, &(ent->layout->addr), &udata)<0) {
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"unable to allocate chunk");
}
if (H5F_block_write (f, &(udata.addr), nbytes, H5D_XFER_DFLT, out_ptr)<0) {
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"unable to write raw data to file");
}
/* Mark cache entry as clean */
ent->dirty = FALSE;
f->shared->rdcc.nflushes++;
ret_value = SUCCEED;
done:
H5MM_xfree (c_buf);
/* Free the temp buffer only if it's different than the entry chunk */
if (buf!=ent->chunk) H5MM_xfree(buf);
/*
* If we reached the point of no return then we have no choice but to
* reset the entry. This can only happen if RESET is true but the
* output pipeline failed.
*/
if (ret_value<0 && point_of_no_return) {
ent->layout = H5O_free(H5O_LAYOUT, ent->layout);
ent->pline = H5O_free(H5O_PLINE, ent->pline);
ent->chunk = H5MM_xfree(ent->chunk);
}
FUNC_LEAVE (ret_value);
}
@ -780,7 +836,7 @@ H5F_istore_flush (H5F_t *f)
FUNC_ENTER (H5F_istore_flush, FAIL);
for (i=0; i<rdcc->nused; i++) {
if (H5F_istore_flush_entry (f, rdcc->slot+i)<0) {
if (H5F_istore_flush_entry (f, rdcc->slot+i, FALSE)<0) {
nerrors++;
}
}
@ -791,18 +847,17 @@ H5F_istore_flush (H5F_t *f)
FUNC_LEAVE (SUCCEED);
}
/*-------------------------------------------------------------------------
* Function: H5F_istore_preempt
* Function: H5F_istore_preempt
*
* Purpose: Preempts the specified entry from the cache, flushing it to
* disk if necessary.
* Purpose: Preempts the specified entry from the cache, flushing it to
* disk if necessary.
*
* Return: Success: SUCCEED
* Return: Success: SUCCEED
*
* Failure: FAIL
* Failure: FAIL
*
* Programmer: Robb Matzke
* Programmer: Robb Matzke
* Thursday, May 21, 1998
*
* Modifications:
@ -812,21 +867,18 @@ H5F_istore_flush (H5F_t *f)
static herr_t
H5F_istore_preempt (H5F_t *f, intn idx)
{
H5F_rdcc_t *rdcc = &(f->shared->rdcc);
H5F_rdcc_ent_t *ent = rdcc->slot + idx;
H5F_rdcc_t *rdcc = &(f->shared->rdcc);
H5F_rdcc_ent_t *ent = rdcc->slot + idx;
FUNC_ENTER (H5F_istore_preempt, FAIL);
assert (idx>=0 && idx<rdcc->nused);
assert (!ent->locked);
if (ent->dirty) H5F_istore_flush_entry (f, ent);
H5O_free (H5O_LAYOUT, ent->layout);
H5O_free (H5O_COMPRESS, ent->comp);
H5MM_xfree (ent->chunk);
H5F_istore_flush_entry (f, ent, TRUE);
HDmemmove (rdcc->slot+idx, rdcc->slot+idx+1,
(rdcc->nused-idx) * sizeof(H5F_rdcc_ent_t));
rdcc->nused -= 1;
rdcc->nbytes -= ent->chunk_size;
HDmemmove (rdcc->slot+idx, rdcc->slot+idx+1,
(rdcc->nused-idx) * sizeof(H5F_rdcc_ent_t));
FUNC_LEAVE (SUCCEED);
}
@ -858,10 +910,10 @@ H5F_istore_dest (H5F_t *f)
FUNC_ENTER (H5F_istore_dest, FAIL);
for (i=rdcc->nused-1; i>=0; --i) {
if (H5F_istore_flush_entry (f, rdcc->slot+i)<0) {
if (H5F_istore_flush_entry (f, rdcc->slot+i, TRUE)<0) {
nerrors++;
}
if (H5F_istore_preempt (f, i)<0) {
if (H5F_istore_preempt(f, i)<0) {
nerrors++;
}
}
@ -951,7 +1003,7 @@ H5F_istore_prune (H5F_t *f, size_t size)
/*-------------------------------------------------------------------------
* Function: H5F_istore_lock
*
* Purpose: Return a pointer to an uncompressed chunk. The pointer
* Purpose: Return a pointer to a file chunk chunk. The pointer
* points directly into the chunk cache and should not be freed
* by the caller but will be valid until it is unlocked. The
* input value IDX_HINT is used to speed up cache lookups and
@ -963,7 +1015,7 @@ H5F_istore_prune (H5F_t *f, size_t size)
* for output functions that are about to overwrite the entire
* chunk.
*
* Return: Success: Ptr to an uncompressed chunk.
* Return: Success: Ptr to a file chunk.
*
* Failure: NULL
*
@ -976,7 +1028,7 @@ H5F_istore_prune (H5F_t *f, size_t size)
*/
static void *
H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
const H5O_compress_t *comp, const hssize_t offset[],
const H5O_pline_t *pline, const hssize_t offset[],
hbool_t relax, intn *idx_hint/*in,out*/)
{
H5F_rdcc_t *rdcc = &(f->shared->rdcc);
@ -984,8 +1036,9 @@ H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
intn i, j, found = -1;
H5F_istore_ud1_t udata; /*B-tree pass-through */
size_t chunk_size=0; /*size of a chunk */
size_t chunk_alloc=0; /*allocated chunk size */
herr_t status; /*func return status */
void *chunk=NULL; /*the uncompressed chunk*/
void *chunk=NULL; /*the file chunk */
void *temp=NULL; /*temporary chunk buffer*/
void *ret_value=NULL; /*return value */
@ -1031,7 +1084,8 @@ H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
for (i=0, chunk_size=1; i<layout->ndims; i++) {
chunk_size *= layout->dim[i];
}
if (NULL==(chunk=H5MM_malloc (chunk_size))) {
chunk_alloc = chunk_size;
if (NULL==(chunk=H5MM_malloc (chunk_alloc))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed for raw data chunk");
}
@ -1045,39 +1099,30 @@ H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
udata.key.offset[i] = offset[i];
chunk_size *= layout->dim[i];
}
chunk_alloc = chunk_size;
udata.mesg = *layout;
H5F_addr_undef (&(udata.addr));
status = H5B_find (f, H5B_ISTORE, &(layout->addr), &udata);
H5E_clear ();
if (NULL==(chunk = H5MM_malloc (chunk_size))) {
if (NULL==(chunk = H5MM_malloc (chunk_alloc))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed for raw data chunk");
}
if (status>=0 && H5F_addr_defined (&(udata.addr))) {
/*
* The chunk exists on disk but might be compressed. Instead of
* allocating the exact size for the compressed chunk we allocate
* the entire chunk size -- it reduces strain on the malloc()
* subsystem.
* The chunk exists on disk.
*/
if (H5F_block_read (f, &(udata.addr), udata.key.nbytes,
H5D_XFER_DFLT, chunk)<0) {
HGOTO_ERROR (H5E_IO, H5E_READERROR, NULL,
"unable to read raw data chunk");
}
if (udata.key.nbytes<chunk_size) {
if (NULL==(temp = H5MM_malloc (chunk_size))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed for uncompress");
}
if (chunk_size!=H5Z_uncompress (comp, udata.key.nbytes,
chunk, chunk_size, temp)) {
HGOTO_ERROR (H5E_IO, H5E_READERROR, NULL,
"unable to uncompress raw data chunk");
}
H5MM_xfree (chunk);
chunk = temp;
temp = NULL;
if (H5Z_pipeline(f, pline, H5Z_FLAG_REVERSE,
&(udata.key.filter_mask), &(udata.key.nbytes),
&chunk_alloc, &chunk)<0 ||
udata.key.nbytes!=chunk_size) {
HGOTO_ERROR(H5E_PLINE, H5E_READERROR, NULL,
"data pipeline read failed");
}
rdcc->nmisses++;
} else {
@ -1117,8 +1162,9 @@ H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
ent->locked = 0;
ent->dirty = FALSE;
ent->chunk_size = chunk_size;
ent->alloc_size = chunk_alloc;
ent->layout = H5O_copy (H5O_LAYOUT, layout);
ent->comp = H5O_copy (H5O_COMPRESS, comp);
ent->pline = H5O_copy (H5O_PLINE, pline);
for (i=0; i<layout->ndims; i++) {
ent->offset[i] = offset[i];
}
@ -1191,7 +1237,7 @@ H5F_istore_lock (H5F_t *f, const H5O_layout_t *layout,
*/
static herr_t
H5F_istore_unlock (H5F_t *f, const H5O_layout_t *layout,
const H5O_compress_t *comp, hbool_t dirty,
const H5O_pline_t *pline, hbool_t dirty,
const hssize_t offset[], intn *idx_hint,
uint8 *chunk, size_t naccessed)
{
@ -1215,7 +1261,7 @@ H5F_istore_unlock (H5F_t *f, const H5O_layout_t *layout,
/*
* It's not in the cache, probably because it's too big. If it's
* dirty then flush it to disk. In any case, free the chunk.
* Note: we have to copy the layout and compression messages so we
* Note: we have to copy the layout and filter messages so we
* don't discard the `const' qualifier.
*/
if (dirty) {
@ -1223,17 +1269,16 @@ H5F_istore_unlock (H5F_t *f, const H5O_layout_t *layout,
HDmemset (&x, 0, sizeof x);
x.dirty = TRUE;
x.layout = H5O_copy (H5O_LAYOUT, layout);
x.comp = H5O_copy (H5O_COMPRESS, comp);
x.pline = H5O_copy (H5O_PLINE, pline);
for (i=0, x.chunk_size=1; i<layout->ndims; i++) {
x.offset[i] = offset[i];
x.chunk_size *= layout->dim[i];
}
x.chunk = chunk;
H5F_istore_flush_entry (f, &x);
H5O_free (H5O_LAYOUT, x.layout);
H5O_free (H5O_COMPRESS, x.comp);
H5F_istore_flush_entry (f, &x, TRUE);
} else {
H5MM_xfree (chunk);
}
H5MM_xfree (chunk);
} else {
/*
* It's in the cache so unlock it.
@ -1272,7 +1317,7 @@ H5F_istore_unlock (H5F_t *f, const H5O_layout_t *layout,
*/
herr_t
H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
const H5O_compress_t *comp, const hssize_t offset_f[],
const H5O_pline_t *pline, const hssize_t offset_f[],
const hsize_t size[], void *buf)
{
hssize_t offset_m[H5O_LAYOUT_NDIMS];
@ -1375,7 +1420,7 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
l.dim[i] = layout->dim[i];
l.addr = udata.addr;
if (H5F_arr_read(f, &l,
comp, NULL /* no efl */,
pline, NULL /* no efl */,
sub_size, size_m,
sub_offset_m, offset_wrt_chunk,
H5D_XFER_DFLT, buf)==FAIL){
@ -1397,15 +1442,15 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
* Lock the chunk, transfer data to the application, then unlock the
* chunk.
*/
if (NULL==(chunk=H5F_istore_lock (f, layout, comp, chunk_offset,
if (NULL==(chunk=H5F_istore_lock (f, layout, pline, chunk_offset,
FALSE, &idx_hint))) {
HRETURN_ERROR (H5E_IO, H5E_READERROR, FAIL,
"unable to read raw data chunk");
}
H5V_hyper_copy(layout->ndims, sub_size, size_m, sub_offset_m,
(void*)buf, layout->dim, offset_wrt_chunk, chunk);
if (H5F_istore_unlock (f, layout, comp, FALSE, chunk_offset, &idx_hint,
chunk, naccessed)<0) {
if (H5F_istore_unlock (f, layout, pline, FALSE, chunk_offset,
&idx_hint, chunk, naccessed)<0) {
HRETURN_ERROR (H5E_IO, H5E_READERROR, FAIL,
"unable to unlock raw data chunk");
}
@ -1443,7 +1488,7 @@ H5F_istore_read(H5F_t *f, const H5O_layout_t *layout,
*/
herr_t
H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
const H5O_compress_t *comp, const hssize_t offset_f[],
const H5O_pline_t *pline, const hssize_t offset_f[],
const hsize_t size[], const void *buf)
{
hssize_t offset_m[H5O_LAYOUT_NDIMS];
@ -1550,7 +1595,7 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
l.dim[i] = layout->dim[i];
l.addr = udata.addr;
if (H5F_arr_write(f, &l,
comp, NULL /* no efl */,
pline, NULL /* no efl */,
sub_size, size_m,
sub_offset_m, offset_wrt_chunk,
H5D_XFER_DFLT, buf)==FAIL){
@ -1571,7 +1616,7 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
* Lock the chunk, copy from application to chunk, then unlock the
* chunk.
*/
if (NULL==(chunk=H5F_istore_lock (f, layout, comp, chunk_offset,
if (NULL==(chunk=H5F_istore_lock (f, layout, pline, chunk_offset,
naccessed==chunk_size,
&idx_hint))) {
HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
@ -1580,7 +1625,7 @@ H5F_istore_write(H5F_t *f, const H5O_layout_t *layout,
H5V_hyper_copy(layout->ndims, sub_size,
layout->dim, offset_wrt_chunk, chunk,
size_m, sub_offset_m, buf);
if (H5F_istore_unlock (f, layout, comp, TRUE, chunk_offset,
if (H5F_istore_unlock (f, layout, pline, TRUE, chunk_offset,
&idx_hint, chunk, naccessed)<0) {
HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"uanble to unlock raw data chunk");
@ -1826,7 +1871,7 @@ H5F_istore_get_addr (H5F_t *f, const H5O_layout_t *layout,
*/
herr_t
H5F_istore_allocate (H5F_t *f, const H5O_layout_t *layout,
const hsize_t *space_dim, const H5O_compress_t *comp)
const hsize_t *space_dim, const H5O_pline_t *pline)
{
intn i, carry;
@ -1846,7 +1891,7 @@ H5F_istore_allocate (H5F_t *f, const H5O_layout_t *layout,
/* Check args */
assert(f);
assert(space_dim);
assert(comp);
assert(pline);
assert(layout && H5D_CHUNKED==layout->type);
assert(layout->ndims>0 && layout->ndims<=H5O_LAYOUT_NDIMS);
assert(H5F_addr_defined(&(layout->addr)));
@ -1885,13 +1930,13 @@ H5F_istore_allocate (H5F_t *f, const H5O_layout_t *layout,
* Lock the chunk, copy from application to chunk, then unlock the
* chunk.
*/
if (NULL==(chunk=H5F_istore_lock (f, layout, comp, chunk_offset,
if (NULL==(chunk=H5F_istore_lock (f, layout, pline, chunk_offset,
FALSE, &idx_hint))) {
HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"unable to read raw data chunk");
}
if (H5F_istore_unlock (f, layout, comp, TRUE, chunk_offset, &idx_hint,
chunk, chunk_size)<0) {
if (H5F_istore_unlock (f, layout, pline, TRUE, chunk_offset,
&idx_hint, chunk, chunk_size)<0) {
HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"uanble to unlock raw data chunk");
}

View File

@ -493,7 +493,7 @@ typedef struct H5F_t {
struct H5O_layout_t; /*forward decl for prototype arguments */
struct H5O_efl_t; /*forward decl for prototype arguments */
struct H5O_compress_t; /*forward decl for prototype arguments */
struct H5O_pline_t; /*forward decl for prototype arguments */
/* library variables */
extern const H5F_create_t H5F_create_dflt;
@ -514,13 +514,13 @@ herr_t H5F_istore_debug(H5F_t *f, const haddr_t *addr, FILE * stream,
/* Functions that operate on array storage */
herr_t H5F_arr_create(H5F_t *f, struct H5O_layout_t *layout /*in,out*/);
herr_t H5F_arr_read (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, const hsize_t _hslab_size[],
const hsize_t mem_size[], const hssize_t mem_offset[],
const hssize_t file_offset[],
const H5D_transfer_t xfer_mode, void *_buf/*out*/);
herr_t H5F_arr_write (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, const hsize_t _hslab_size[],
const hsize_t mem_size[], const hssize_t mem_offset[],
const hssize_t file_offset[],
@ -533,17 +533,17 @@ herr_t H5F_istore_dest (H5F_t *f);
herr_t H5F_istore_stats (H5F_t *f, hbool_t headers);
herr_t H5F_istore_create(H5F_t *f, struct H5O_layout_t *layout /*in,out*/);
herr_t H5F_istore_read(H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const hssize_t offset[], const hsize_t size[],
void *buf /*out */ );
herr_t H5F_istore_write(H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const hssize_t offset[], const hsize_t size[],
const void *buf);
herr_t H5F_istore_allocate (H5F_t *f,
const struct H5O_layout_t *layout,
const hsize_t *space_dim,
const struct H5O_compress_t *comp);
const struct H5O_pline_t *pline);
/* Functions that operate on contiguous storage wrt boot block */
herr_t H5F_block_read(H5F_t *f, const haddr_t *addr, hsize_t size,

View File

@ -2176,7 +2176,7 @@ H5G_get_comment(H5G_t *loc, const char *name, size_t bufsize, char *buf)
retval = 0;
} else {
strncpy(buf, comment.s, bufsize);
retval = strlen(comment.s);
retval = (intn)strlen(comment.s);
H5O_reset(H5O_NAME, &comment);
}

View File

@ -62,7 +62,7 @@ static const H5O_class_t *const message_type_g[] = {
H5O_LAYOUT, /*0x0008 Data Layout */
NULL, /*0x0009 Not assigned */
NULL, /*0x000A Not assigned */
H5O_COMPRESS, /*0x000B Data storage -- compressed object */
H5O_PLINE, /*0x000B Data storage -- filter pipeline */
H5O_ATTR, /*0x000C Attribute list */
H5O_NAME, /*0x000D Object name */
H5O_MTIME, /*0x000E Object modification date and time */

View File

@ -5,48 +5,49 @@
* Programmer: Robb Matzke <matzke@llnl.gov>
* Wednesday, April 15, 1998
*
* Purpose: Data compression message.
* Purpose: Data filter pipeline message.
*/
#include <H5private.h>
#include <H5Eprivate.h>
#include <H5MMprivate.h>
#include <H5Oprivate.h>
#define PABLO_MASK H5O_comp_mask
/* PRIVATE PROTOTYPES */
static herr_t H5O_comp_encode (H5F_t *f, uint8 *p, const void *mesg);
static void *H5O_comp_decode (H5F_t *f, const uint8 *p, H5O_shared_t *sh);
static void *H5O_comp_copy (const void *_mesg, void *_dest);
static size_t H5O_comp_size (H5F_t *f, const void *_mesg);
static herr_t H5O_comp_reset (void *_mesg);
static herr_t H5O_comp_debug (H5F_t *f, const void *_mesg,
FILE * stream, intn indent, intn fwidth);
/* This message derives from H5O */
const H5O_class_t H5O_COMPRESS[1] = {{
H5O_COMPRESS_ID, /* message id number */
"compression", /* message name for debugging */
sizeof(H5O_compress_t), /* native message size */
H5O_comp_decode, /* decode message */
H5O_comp_encode, /* encode message */
H5O_comp_copy, /* copy the native value */
H5O_comp_size, /* size of raw message */
H5O_comp_reset, /* reset method */
NULL, /* get share method */
NULL, /* set share method */
H5O_comp_debug, /* debug the message */
}};
/* Interface initialization */
#define PABLO_MASK H5O_pline_mask
static hbool_t interface_initialize_g = FALSE;
#define INTERFACE_INIT NULL
#define H5O_PLINE_VERSION 1
static herr_t H5O_pline_encode (H5F_t *f, uint8 *p, const void *mesg);
static void *H5O_pline_decode (H5F_t *f, const uint8 *p, H5O_shared_t *sh);
static void *H5O_pline_copy (const void *_mesg, void *_dest);
static size_t H5O_pline_size (H5F_t *f, const void *_mesg);
static herr_t H5O_pline_reset (void *_mesg);
static herr_t H5O_pline_debug (H5F_t *f, const void *_mesg,
FILE * stream, intn indent, intn fwidth);
/* This message derives from H5O */
const H5O_class_t H5O_PLINE[1] = {{
H5O_PLINE_ID, /* message id number */
"filter pipeline", /* message name for debugging */
sizeof(H5O_pline_t), /* native message size */
H5O_pline_decode, /* decode message */
H5O_pline_encode, /* encode message */
H5O_pline_copy, /* copy the native value */
H5O_pline_size, /* size of raw message */
H5O_pline_reset, /* reset method */
NULL, /* get share method */
NULL, /* set share method */
H5O_pline_debug, /* debug the message */
}};
/*-------------------------------------------------------------------------
* Function: H5O_comp_decode
* Function: H5O_pline_decode
*
* Purpose: Decodes a compression message.
* Purpose: Decodes a filter pipeline message.
*
* Return: Success: Ptr to the native message.
*
@ -60,46 +61,97 @@ static hbool_t interface_initialize_g = FALSE;
*-------------------------------------------------------------------------
*/
static void *
H5O_comp_decode(H5F_t __unused__ *f, const uint8 *p,
H5O_pline_decode(H5F_t __unused__ *f, const uint8 *p,
H5O_shared_t __unused__ *sh)
{
H5O_compress_t *comp = NULL;
H5O_pline_t *pline = NULL;
void *ret_value = NULL;
uintn version;
size_t i, j, n, name_length;
FUNC_ENTER(H5O_comp_decode, NULL);
FUNC_ENTER(H5O_pline_decode, NULL);
/* check args */
assert(p);
/* Decode */
if (NULL==(comp = H5MM_calloc(sizeof *comp))) {
if (NULL==(pline = H5MM_calloc(sizeof *pline))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed");
}
comp->method = *p++;
comp->flags = *p++;
UINT16DECODE (p, comp->cd_size);
if (comp->cd_size>0) {
if (NULL==(comp->client_data = H5MM_malloc (comp->cd_size))) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed");
}
HDmemcpy (comp->client_data, p, comp->cd_size);
version = *p++;
if (version!=H5O_PLINE_VERSION) {
HGOTO_ERROR(H5E_PLINE, H5E_CANTLOAD, NULL,
"bad version number for filter pipeline message");
}
ret_value = comp;
pline->nfilters = *p++;
if (pline->nfilters>32) {
HGOTO_ERROR(H5E_PLINE, H5E_CANTLOAD, NULL,
"filter pipeline message has too many filters");
}
p += 6; /*reserved*/
pline->nalloc = pline->nfilters;
pline->filter = H5MM_calloc(pline->nalloc*sizeof(pline->filter[0]));
if (NULL==pline->filter) {
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed");
}
for (i=0; i<pline->nfilters; i++) {
UINT16DECODE(p, pline->filter[i].id);
UINT16DECODE(p, name_length);
if (name_length % 8) {
HGOTO_ERROR(H5E_PLINE, H5E_CANTLOAD, NULL,
"filter name length is not a multiple of eight");
}
UINT16DECODE(p, pline->filter[i].flags);
UINT16DECODE(p, pline->filter[i].cd_nelmts);
if (name_length) {
/*
* Get the name, allocating an extra byte for an extra null
* terminator just in case there isn't one in the file (there
* should be, but to be safe...)
*/
pline->filter[i].name = H5MM_malloc(name_length+1);
memcpy(pline->filter[i].name, p, name_length);
pline->filter[i].name[name_length] = '\0';
p += name_length;
}
if ((n=pline->filter[i].cd_nelmts)) {
/*
* Read the client data values and the padding
*/
pline->filter[i].cd_values = H5MM_malloc(n*sizeof(uintn));
if (NULL==pline->filter[i].cd_values) {
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed for client data");
}
for (j=0; j<pline->filter[i].cd_nelmts; j++) {
UINT32DECODE(p, pline->filter[i].cd_values[j]);
}
if (pline->filter[i].cd_nelmts % 2) {
p += 4; /*padding*/
}
}
}
ret_value = pline;
done:
if (NULL==ret_value && comp) {
H5MM_xfree (comp->client_data);
H5MM_xfree (comp);
if (NULL==ret_value && pline) {
if (pline->filter) {
for (i=0; i<pline->nfilters; i++) {
H5MM_xfree(pline->filter[i].name);
H5MM_xfree(pline->filter[i].cd_values);
}
H5MM_xfree(pline->filter);
}
H5MM_xfree(pline);
}
FUNC_LEAVE(ret_value);
}
/*-------------------------------------------------------------------------
* Function: H5O_comp_encode
* Function: H5O_pline_encode
*
* Purpose: Encodes message MESG into buffer P.
*
@ -115,21 +167,47 @@ H5O_comp_decode(H5F_t __unused__ *f, const uint8 *p,
*-------------------------------------------------------------------------
*/
static herr_t
H5O_comp_encode (H5F_t __unused__ *f, uint8 *p/*out*/, const void *mesg)
H5O_pline_encode (H5F_t __unused__ *f, uint8 *p/*out*/, const void *mesg)
{
const H5O_compress_t *comp = (const H5O_compress_t*)mesg;
const H5O_pline_t *pline = (const H5O_pline_t*)mesg;
size_t i, j, name_length;
FUNC_ENTER (H5O_comp_encode, FAIL);
FUNC_ENTER (H5O_pline_encode, FAIL);
/* Check args */
assert (p);
assert (mesg);
*p++ = comp->method;
*p++ = comp->flags;
UINT16ENCODE (p, comp->cd_size);
if (comp->cd_size) {
HDmemcpy (p, comp->client_data, comp->cd_size);
*p++ = H5O_PLINE_VERSION;
*p++ = pline->nfilters;
*p++ = 0; /*reserved 1*/
*p++ = 0; /*reserved 2*/
*p++ = 0; /*reserved 3*/
*p++ = 0; /*reserved 4*/
*p++ = 0; /*reserved 5*/
*p++ = 0; /*reserved 6*/
for (i=0; i<pline->nfilters; i++) {
if (pline->filter[i].name) {
name_length = strlen(pline->filter[i].name)+1;
} else {
name_length = 0;
}
UINT16ENCODE(p, pline->filter[i].id);
UINT16ENCODE(p, H5O_ALIGN(name_length));
UINT16ENCODE(p, pline->filter[i].flags);
UINT16ENCODE(p, pline->filter[i].cd_nelmts);
if (name_length>0) {
memcpy(p, pline->filter[i].name, name_length);
p += name_length;
while (name_length++ % 8) *p++ = 0;
}
for (j=0; j<pline->filter[i].cd_nelmts; j++) {
UINT32ENCODE(p, pline->filter[i].cd_values[j]);
}
if (pline->filter[i].cd_nelmts % 2) {
UINT32ENCODE(p, 0);
}
}
FUNC_LEAVE (SUCCEED);
@ -137,10 +215,10 @@ H5O_comp_encode (H5F_t __unused__ *f, uint8 *p/*out*/, const void *mesg)
/*-------------------------------------------------------------------------
* Function: H5O_comp_copy
* Function: H5O_pline_copy
*
* Purpose: Copies a compression message from SRC to DST allocating DST
* if necessary. If DST is already allocated then we assume
* Purpose: Copies a filter pipeline message from SRC to DST allocating
* DST if necessary. If DST is already allocated then we assume
* that it isn't initialized.
*
* Return: Success: Ptr to DST or allocated result.
@ -155,34 +233,70 @@ H5O_comp_encode (H5F_t __unused__ *f, uint8 *p/*out*/, const void *mesg)
*-------------------------------------------------------------------------
*/
static void *
H5O_comp_copy (const void *_src, void *_dst/*out*/)
H5O_pline_copy (const void *_src, void *_dst/*out*/)
{
const H5O_compress_t *src = (const H5O_compress_t *)_src;
H5O_compress_t *dst = (H5O_compress_t *)_dst;
const H5O_pline_t *src = (const H5O_pline_t *)_src;
H5O_pline_t *dst = (H5O_pline_t *)_dst;
size_t i;
H5O_pline_t *ret_value = NULL;
FUNC_ENTER (H5O_comp_copy, NULL);
FUNC_ENTER (H5O_pline_copy, NULL);
if (!dst && NULL==(dst = H5MM_malloc (sizeof *dst))) {
HRETURN_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed");
}
*dst = *src;
if (src->cd_size>0) {
if (NULL==(dst->client_data = H5MM_malloc (src->cd_size))) {
HRETURN_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed");
dst->nalloc = dst->nfilters;
if (dst->nalloc>0) {
dst->filter = H5MM_calloc(dst->nalloc * sizeof(dst->filter[0]));
if (NULL==dst->filter) {
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed");
}
HDmemcpy (dst->client_data, src->client_data, src->cd_size);
} else {
dst->filter = NULL;
}
for (i=0; i<src->nfilters; i++) {
dst->filter[i] = src->filter[i];
if (src->filter[i].name) {
dst->filter[i].name = H5MM_xstrdup(src->filter[i].name);
}
if (src->filter[i].cd_nelmts>0) {
dst->filter[i].cd_values = H5MM_malloc(src->filter[i].cd_nelmts*
sizeof(uintn));
if (NULL==dst->filter[i].cd_values) {
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL,
"memory allocation failed");
}
HDmemcpy (dst->filter[i].cd_values, src->filter[i].cd_values,
src->filter[i].cd_nelmts * sizeof(uintn));
}
}
ret_value = dst;
done:
if (!ret_value && dst) {
if (dst->filter) {
for (i=0; i<dst->nfilters; i++) {
H5MM_xfree(dst->filter[i].name);
H5MM_xfree(dst->filter[i].cd_values);
}
H5MM_xfree(dst->filter);
}
if (!_dst) H5MM_xfree(dst);
}
FUNC_LEAVE (dst);
FUNC_LEAVE (ret_value);
}
/*-------------------------------------------------------------------------
* Function: H5O_comp_size
* Function: H5O_pline_size
*
* Purpose: Determines the size of a raw compression message.
* Purpose: Determines the size of a raw filter pipeline message.
*
* Return: Success: Size of message.
*
@ -196,20 +310,39 @@ H5O_comp_copy (const void *_src, void *_dst/*out*/)
*-------------------------------------------------------------------------
*/
static size_t
H5O_comp_size (H5F_t __unused__ *f, const void *mesg)
H5O_pline_size (H5F_t __unused__ *f, const void *mesg)
{
const H5O_compress_t *comp = (const H5O_compress_t*)mesg;
FUNC_ENTER (H5O_comp_size, 0);
FUNC_LEAVE (4+comp->cd_size);
const H5O_pline_t *pline = (const H5O_pline_t*)mesg;
size_t i, size;
FUNC_ENTER (H5O_pline_size, 0);
size = 1 + /*version */
1 + /*number of filters */
6; /*reserved */
for (i=0; i<pline->nfilters; i++) {
size += 2 + /*filter identification number */
2 + /*name length */
2 + /*flags */
2; /*number of client data values */
if (pline->filter[i].name) {
size_t n = strlen(pline->filter[i].name) + 1;
size += H5O_ALIGN(n);
}
size += pline->filter[i].cd_nelmts * 4;
if (pline->filter[i].cd_nelmts % 2) size += 4;
}
FUNC_LEAVE (size);
}
/*-------------------------------------------------------------------------
* Function: H5O_comp_reset
* Function: H5O_pline_reset
*
* Purpose: Resets a compression message by freeing the client data and
* setting all fields to zero. The MESG buffer is not freed.
* Purpose: Resets a filter pipeline message by clearing all filters.
* The MESG buffer is not freed.
*
* Return: Success: SUCCEED
*
@ -223,25 +356,30 @@ H5O_comp_size (H5F_t __unused__ *f, const void *mesg)
*-------------------------------------------------------------------------
*/
static herr_t
H5O_comp_reset (void *mesg)
H5O_pline_reset (void *mesg)
{
H5O_compress_t *comp = (H5O_compress_t*)mesg;
H5O_pline_t *pline = (H5O_pline_t*)mesg;
size_t i;
FUNC_ENTER (H5O_comp_reset, FAIL);
FUNC_ENTER (H5O_pline_reset, FAIL);
assert (comp);
H5MM_xfree (comp->client_data);
HDmemset (comp, 0, sizeof *comp);
assert (pline);
for (i=0; i<pline->nfilters; i++) {
H5MM_xfree(pline->filter[i].name);
H5MM_xfree(pline->filter[i].cd_values);
}
H5MM_xfree(pline->filter);
HDmemset(pline, 0, sizeof *pline);
FUNC_LEAVE (SUCCEED);
}
/*-------------------------------------------------------------------------
* Function: H5O_comp_debug
* Function: H5O_pline_debug
*
* Purpose: Prints debugging information for compression message MESG on
* output stream STREAM. Each line is indented INDENT
* Purpose: Prints debugging information for filter pipeline message MESG
* on output stream STREAM. Each line is indented INDENT
* characters and the field name takes up FWIDTH characters.
*
* Return: Success: SUCCEED
@ -256,56 +394,54 @@ H5O_comp_reset (void *mesg)
*-------------------------------------------------------------------------
*/
static herr_t
H5O_comp_debug (H5F_t __unused__ *f, const void *mesg, FILE *stream,
H5O_pline_debug (H5F_t __unused__ *f, const void *mesg, FILE *stream,
intn indent, intn fwidth)
{
const H5O_compress_t *comp = (const H5O_compress_t *)mesg;
size_t i, j;
const H5O_pline_t *pline = (const H5O_pline_t *)mesg;
size_t i, j;
FUNC_ENTER(H5O_comp_debug, FAIL);
FUNC_ENTER(H5O_pline_debug, FAIL);
/* check args */
assert(f);
assert(comp);
assert(pline);
assert(stream);
assert(indent >= 0);
assert(fwidth >= 0);
fprintf (stream, "%*s%-*s %d\n", indent, "", fwidth,
"Method:",
(int)(comp->method));
fprintf (stream, "%*s%-*s 0x%02x\n", indent, "", fwidth,
"Flags:",
(unsigned)(comp->flags));
fprintf (stream, "%*s%-*s %lu\n", indent, "", fwidth,
"Size of client data:",
(unsigned long)(comp->cd_size));
fprintf(stream, "%*s%-*s %lu/%lu\n", indent, "", fwidth,
"Number of filters:",
(unsigned long)(pline->nfilters),
(unsigned long)(pline->nalloc));
if (comp->cd_size>0) {
fprintf (stream, "%*s%s\n", indent, "", "Client Data:");
for (i=0; i<comp->cd_size; i+=16) {
fprintf (stream, "%*s%04d: ", indent+3, "", i);
for (j=0; j<16; j++) {
if (8==j) putc (' ', stream);
if (i+j<comp->cd_size) {
fprintf (stream, "%02x ", comp->client_data[i+j]);
} else {
fputs (" ", stream);
}
}
for (j=0; j<16 && i+j<comp->cd_size; j++) {
if (8==j) putc (' ', stream);
if (comp->client_data[i+j]>' ' &&
comp->client_data[i+j]<='~') {
putc (comp->client_data[i+j], stream);
} else {
putc ('.', stream);
}
putc ('\n', stream);
}
for (i=0; i<pline->nfilters; i++) {
char name[32];
sprintf(name, "Filter at position %lu", (unsigned long)i);
fprintf(stream, "%*s%-*s\n", indent, "", fwidth, name);
fprintf(stream, "%*s%-*s 0x%04x\n", indent+3, "", MAX(0, fwidth-3),
"Filter identification:",
(unsigned)(pline->filter[i].id));
if (pline->filter[i].name) {
fprintf(stream, "%*s%-*s \"%s\"\n", indent+3, "", MAX(0, fwidth-3),
"Filter name:",
pline->filter[i].name);
} else {
fprintf(stream, "%*s%-*s NONE\n", indent+3, "", MAX(0, fwidth-3),
"Filter name:");
}
fprintf(stream, "%*s%-*s 0x%04x\n", indent+3, "", MAX(0, fwidth-3),
"Flags:",
(unsigned)(pline->filter[i].flags));
fprintf(stream, "%*s%-*s %lu\n", indent+3, "", MAX(0, fwidth-3),
"Num CD values:",
(unsigned long)(pline->filter[i].cd_nelmts));
for (j=0; j<pline->filter[i].cd_nelmts; j++) {
char field_name[32];
sprintf(field_name, "CD value %d", j);
fprintf(stream, "%*s%-*s %lu\n", indent+6, "", MAX(0, fwidth-6),
field_name,
(unsigned long)(pline->filter[i].cd_values[j]));
}
} else {
fprintf (stream, "%*s%-*s None\n", indent, "", fwidth, "Client Data:");
}
FUNC_LEAVE(SUCCEED);

View File

@ -169,17 +169,22 @@ typedef struct H5O_layout_t {
} H5O_layout_t;
/*
* Compression message.
* Filter pipeline message.
*/
#define H5O_COMPRESS_ID 0x000b
extern const H5O_class_t H5O_COMPRESS[1];
#define H5O_PLINE_ID 0x000b
extern const H5O_class_t H5O_PLINE[1];
typedef struct H5O_compress_t {
H5Z_method_t method; /*algorithm ID */
uintn flags; /*flags */
size_t cd_size; /*client data size */
uint8 *client_data; /*client data passed to algorithm */
} H5O_compress_t;
typedef struct H5O_pline_t {
size_t nfilters; /*num filters defined */
size_t nalloc; /*num elements in `filter' array */
struct {
H5Z_filter_t id; /*filter identification number */
uintn flags; /*defn and invocation flags */
char *name; /*optional filter name */
size_t cd_nelmts; /*number of elements in cd_values[] */
uintn *cd_values; /*client data values */
} *filter; /*array of filters */
} H5O_pline_t;
/*
* Attribute Message.

342
src/H5P.c
View File

@ -2150,18 +2150,31 @@ H5Pget_preserve (hid_t plist_id)
/*-------------------------------------------------------------------------
* Function: H5Pset_compression
* Function: H5Pset_filter
*
* Purpose: Sets the compression method in a dataset creation property
* list. This sets default values for the compression
* attributes by setting the flags to zero and supplying no
* compression client data. It's probably better to use
* specific compression initialization functions like
* H5Pset_deflate().
* Purpose: Adds the specified FILTER and corresponding properties to the
* end of the transient or permanent output filter pipeline
* depending on whether PLIST is a dataset creation or dataset
* transfer property list. The FLAGS argument specifies certain
* general properties of the filter and is documented below.
* The CD_VALUES is an array of CD_NELMTS integers which are
* auxiliary data for the filter. The integer vlues will be
* stored in the dataset object header as part of the filter
* information.
*
* The FLAGS, CD_SIZE, and CLIENT_DATA are copied to the
* property list and eventually to the file and passed to the
* compression functions.
* The FLAGS argument is a bit vector of the following fields:
*
* H5Z_FLAG_OPTIONAL(0x0001)
* If this bit is set then the filter is optional. If the
* filter fails during an H5Dwrite() operation then the filter
* is just excluded from the pipeline for the chunk for which it
* failed; the filter will not participate in the pipeline
* during an H5Dread() of the chunk. If this bit is clear and
* the filter fails then the entire I/O operation fails.
*
* Note: This function currently supports only the permanent filter
* pipeline. That is, PLIST_ID must be a dataset creation
* property list.
*
* Return: Success: SUCCEED
*
@ -2175,181 +2188,230 @@ H5Pget_preserve (hid_t plist_id)
*-------------------------------------------------------------------------
*/
herr_t
H5Pset_compression (hid_t plist_id, H5Z_method_t method, unsigned int flags,
size_t cd_size, const void *client_data)
H5Pset_filter (hid_t plist_id, H5Z_filter_t filter, unsigned int flags,
size_t cd_nelmts, const unsigned int cd_values[/*cd_nelmts*/])
{
H5D_create_t *plist = NULL;
FUNC_ENTER (H5Pset_compression, FAIL);
H5TRACE5("e","iZmIuzx",plist_id,method,flags,cd_size,client_data);
FUNC_ENTER (H5Pset_filter, FAIL);
H5TRACE5("e","iZfIuz*[a3]Iu",plist_id,filter,flags,cd_nelmts,cd_values);
/* Check arguments */
if (H5P_DATASET_XFER==H5P_get_class(plist_id)) {
HRETURN_ERROR(H5E_PLINE, H5E_UNSUPPORTED, FAIL,
"transient pipelines are not supported yet");
}
if (H5P_DATASET_CREATE!=H5P_get_class (plist_id) ||
NULL==(plist=H5I_object (plist_id))) {
HRETURN_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL,
"not a dataset creation property list");
}
if (method<0 || method>H5Z_USERDEF_MAX) {
if (filter<0 || filter>H5Z_FILTER_MAX) {
HRETURN_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL,
"invalid compression method");
"invalid filter identifier");
}
if (flags & ~H5Z_FLAG_DEFMASK) {
HRETURN_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
"invalid flags");
}
if (cd_nelmts>0 && !cd_values) {
HRETURN_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
"no client data values supplied");
}
/* Clear any previous compression method info, then set new value */
H5O_reset (H5O_COMPRESS, &(plist->compress));
plist->compress.method = method;
plist->compress.flags = flags;
plist->compress.cd_size = cd_size;
if (cd_size) {
if (NULL==(plist->compress.client_data = H5MM_malloc (cd_size))) {
HRETURN_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL,
"memory allocation failed");
}
HDmemcpy (plist->compress.client_data, client_data, cd_size);
/* Do it */
if (H5Z_append(&(plist->pline), filter, flags, cd_nelmts, cd_values)<0) {
HRETURN_ERROR(H5E_PLINE, H5E_CANTINIT, FAIL,
"unable to add filter to pipeline");
}
FUNC_LEAVE (SUCCEED);
}
/*-------------------------------------------------------------------------
* Function: H5Pget_compression
*
* Purpose: Gets the compression method information from a dataset
* creation property list. The CLIENT_DATA buffer is initially
* CD_SIZE bytes. On return, CLIENT_DATA will be initialized
* with at most that many bytes, and CD_SIZE will contain the
* actual size of the client data, which might be larger than
* its original value.
*
* Return: Success: Compression method.
*
* Failure: FAIL
*
* Programmer: Robb Matzke
* Wednesday, April 15, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
H5Z_method_t
H5Pget_compression (hid_t plist_id, unsigned int *flags/*out*/,
size_t *cd_size/*in_out*/, void *client_data/*out*/)
{
H5D_create_t *plist = NULL;
FUNC_ENTER (H5Pget_compression, FAIL);
H5TRACE4("Zm","ix*zx",plist_id,flags,cd_size,client_data);
/* Check arguments */
if (H5P_DATASET_CREATE!=H5P_get_class (plist_id) ||
NULL==(plist=H5I_object (plist_id))) {
HRETURN_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL,
"not a dataset creation property list");
}
/* Output values */
if (flags) *flags = plist->compress.flags;
if (cd_size) {
if (*cd_size>0 && client_data) {
HDmemcpy (client_data, plist->compress.client_data,
MIN(plist->compress.cd_size, *cd_size));
}
*cd_size = plist->compress.cd_size;
}
FUNC_LEAVE (plist->compress.method);
}
/*-------------------------------------------------------------------------
* Function: H5Pset_deflate
*
* Purpose: Sets the compression method for a dataset creation property
* list to H5D_COMPRESS_DEFLATE and the compression level to
* LEVEL which should be a value between zero and nine,
* inclusive. Lower compression levels are faster but result in
* less compression. This is the same algorithm as used by the
* GNU gzip program.
*
* Return: Success: SUCCEED
*
* Failure: FAIL
*
* Programmer: Robb Matzke
* Wednesday, April 15, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
herr_t
H5Pset_deflate (hid_t plist_id, int level)
{
H5D_create_t *plist = NULL;
FUNC_ENTER (H5Pset_deflate, FAIL);
H5TRACE2("e","iIs",plist_id,level);
/* Check arguments */
if (H5P_DATASET_CREATE!=H5P_get_class (plist_id) ||
NULL==(plist=H5I_object (plist_id))) {
HRETURN_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL,
"not a dataset creation property list");
}
if (level<0 || level>9) {
HRETURN_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL,
"invalid deflate level");
}
/* Clear previous compression parameters */
H5O_reset (H5O_COMPRESS, &(plist->compress));
plist->compress.method = H5Z_DEFLATE;
plist->compress.flags = level;
FUNC_LEAVE (SUCCEED);
}
/*-------------------------------------------------------------------------
* Function: H5Pget_deflate
* Function: H5Pget_nfilters
*
* Purpose: Returns the deflate compression level from a dataset creation
* property list that uses that method.
* Purpose: Returns the number of filters in the permanent or transient
* pipeline depending on whether PLIST_ID is a dataset creation
* or dataset transfer property list. In each pipeline the
* filters are numbered from zero through N-1 where N is the
* value returned by this function. During output to the file
* the filters of a pipeline are applied in increasing order
* (the inverse is true for input).
*
* Return: Success: A value between zero and nine, inclusive.
* Smaller values indicate faster compression
* while higher values indicate better
* compression ratios.
* Note: Only permanent filters are supported at this time.
*
* Return: Success: Number of filters or zero if there are none.
*
* Failure: FAIL
*
* Programmer: Robb Matzke
* Wednesday, April 15, 1998
* Tuesday, August 4, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
int
H5Pget_deflate (hid_t plist_id)
H5Pget_nfilters (hid_t plist_id)
{
H5D_create_t *plist = NULL;
FUNC_ENTER (H5Pget_deflate, FAIL);
FUNC_ENTER(H5Pget_nfilters, FAIL);
H5TRACE1("Is","i",plist_id);
if (H5P_DATASET_XFER==H5P_get_class(plist_id)) {
HRETURN_ERROR(H5E_PLINE, H5E_UNSUPPORTED, FAIL,
"transient pipelines are not supported yet");
}
if (H5P_DATASET_CREATE!=H5P_get_class(plist_id) ||
NULL==(plist=H5I_object(plist_id))) {
HRETURN_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL,
"not a dataset creation property list");
}
FUNC_LEAVE(plist->pline.nfilters);
}
/*-------------------------------------------------------------------------
* Function: H5Pget_filter
*
* Purpose: This is the query counterpart of H5Pset_filter() and returns
* information about a particular filter number in a permanent
* or transient pipeline depending on whether PLIST_ID is a
* dataset creation or transfer property list. On input,
* CD_NELMTS indicates the number of entries in the CD_VALUES
* array allocated by the caller while on exit it contains the
* number of values defined by the filter. The IDX should be a
* value between zero and N-1 as described for H5Pget_nfilters()
* and the function will return failure if the filter number is
* out or range.
*
* Return: Success: Filter identification number.
*
* Failure: H5Z_FILTER_ERROR (-1)
*
* Programmer: Robb Matzke
* Wednesday, April 15, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
H5Z_filter_t
H5Pget_filter (hid_t plist_id, int idx, unsigned int *flags/*out*/,
size_t *cd_nelmts/*in_out*/, unsigned cd_values[]/*out*/)
{
H5D_create_t *plist = NULL;
size_t i;
FUNC_ENTER (H5Pget_filter, FAIL);
H5TRACE5("Zf","iIsx*zx",plist_id,idx,flags,cd_nelmts,cd_values);
/* Check arguments */
if (H5P_DATASET_XFER==H5P_get_class(plist_id)) {
HRETURN_ERROR(H5E_PLINE, H5E_UNSUPPORTED, FAIL,
"transient filters are not supported yet");
}
if (H5P_DATASET_CREATE!=H5P_get_class (plist_id) ||
NULL==(plist=H5I_object (plist_id))) {
HRETURN_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL,
"not a dataset creation property list");
}
if (H5Z_DEFLATE!=plist->compress.method) {
HRETURN_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL,
"deflate method is not being used");
if (idx<0 || (size_t)idx>=plist->pline.nfilters) {
HRETURN_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
"filter number is invalid");
}
if (cd_nelmts || cd_values) {
if (cd_nelmts && *cd_nelmts>256) {
/*
* It's likely that users forget to initialize this on input, so
* we'll check that it has a reasonable value. The actual number
* is unimportant because the H5O layer will detect when a message
* is too large.
*/
HRETURN_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
"probable uninitialized *cd_nelmts argument");
}
if (cd_nelmts && *cd_nelmts>0 && !cd_values) {
HRETURN_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
"client data values not supplied");
}
/*
* If cd_nelmts is null but cd_values is non-null then just ignore
* cd_values
*/
if (!cd_nelmts) cd_values = NULL;
}
FUNC_LEAVE (plist->compress.flags % 10);
/* Output values */
if (flags) *flags = plist->pline.filter[idx].flags;
if (cd_values) {
for (i=0; i<plist->pline.filter[idx].cd_nelmts && i<*cd_nelmts; i++) {
cd_values[i] = plist->pline.filter[idx].cd_values[i];
}
}
if (cd_nelmts) *cd_nelmts = plist->pline.filter[idx].cd_nelmts;
FUNC_LEAVE (plist->pline.filter[idx].id);
}
/*-------------------------------------------------------------------------
* Function: H5Pset_deflate
*
* Purpose: Sets the compression method for a permanent or transient
* filter pipeline (depending on whether PLIST_ID is a dataset
* creation or transfer property list) to H5Z_FILTER_DEFLATE
* and the compression level to LEVEL which should be a value
* between zero and nine, inclusive. Lower compression levels
* are faster but result in less compression. This is the same
* algorithm as used by the GNU gzip program.
*
* Return: Success: SUCCEED
*
* Failure: FAIL
*
* Programmer: Robb Matzke
* Wednesday, April 15, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
herr_t
H5Pset_deflate (hid_t plist_id, unsigned level)
{
H5D_create_t *plist = NULL;
FUNC_ENTER (H5Pset_deflate, FAIL);
H5TRACE2("e","iIu",plist_id,level);
/* Check arguments */
if (H5P_DATASET_XFER==H5P_get_class(plist_id)) {
HRETURN_ERROR(H5E_PLINE, H5E_UNSUPPORTED, FAIL,
"transient filter pipelines are not supported yet");
}
if (H5P_DATASET_CREATE!=H5P_get_class (plist_id) ||
NULL==(plist=H5I_object (plist_id))) {
HRETURN_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL,
"not a dataset creation property list");
}
if (level>9) {
HRETURN_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL,
"invalid deflate level");
}
/* Add the filter */
if (H5Z_append(&(plist->pline), H5Z_FILTER_DEFLATE, H5Z_FLAG_OPTIONAL,
1, &level)<0) {
HRETURN_ERROR(H5E_PLINE, H5E_CANTINIT, FAIL,
"unable to add deflate filter to pipeline");
}
FUNC_LEAVE (SUCCEED);
}

View File

@ -92,14 +92,14 @@ herr_t H5Pset_buffer (hid_t plist_id, size_t size, void *tconv, void *bkg);
size_t H5Pget_buffer (hid_t plist_id, void **tconv/*out*/, void **bkg/*out*/);
herr_t H5Pset_preserve (hid_t plist_id, hbool_t status);
int H5Pget_preserve (hid_t plist_id);
herr_t H5Pset_compression (hid_t plist_id, H5Z_method_t method,
unsigned int flags, size_t cd_size,
const void *client_data);
H5Z_method_t H5Pget_compression (hid_t plist_id, unsigned int *flags/*out*/,
size_t *cd_size/*in,out*/,
void *client_data/*out*/);
herr_t H5Pset_deflate (hid_t plist_id, int level);
int H5Pget_deflate (hid_t plist_id);
herr_t H5Pset_filter (hid_t plist_id, H5Z_filter_t filter, unsigned int flags,
size_t cd_nelmts, const unsigned int c_values[]);
int H5Pget_nfilters(hid_t plist_id);
H5Z_filter_t H5Pget_filter(hid_t plist_id, int filter,
unsigned int *flags/*out*/,
size_t *cd_nelmts/*out*/,
unsigned cd_values[]/*out*/);
herr_t H5Pset_deflate (hid_t plist_id, unsigned aggression);
herr_t H5Pset_cache (hid_t plist_id, int mdc_nelmts, size_t rdcc_nbytes,
double rdcc_w0);
herr_t H5Pget_cache (hid_t plist_id, int *mdc_nelmts, size_t *rdcc_nbytes,

View File

@ -125,7 +125,7 @@ H5S_all_favail (const H5S_t *space, const H5S_sel_iter_t *sel_iter, size_t max)
*/
size_t
H5S_all_fgath (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
const struct H5O_pline_t *pline, const struct H5O_efl_t *efl,
size_t elmt_size, const H5S_t *file_space,
H5S_sel_iter_t *file_iter, size_t nelmts,
const H5D_transfer_t xfer_mode, void *_buf/*out*/)
@ -181,7 +181,7 @@ H5S_all_fgath (H5F_t *f, const struct H5O_layout_t *layout,
/*
* Gather from file.
*/
if (H5F_arr_read (f, layout, comp, efl, hsize, hsize, zero, file_offset,
if (H5F_arr_read (f, layout, pline, efl, hsize, hsize, zero, file_offset,
xfer_mode, buf/*out*/)<0) {
HRETURN_ERROR (H5E_DATASPACE, H5E_READERROR, 0, "read error");
}
@ -215,7 +215,7 @@ H5S_all_fgath (H5F_t *f, const struct H5O_layout_t *layout,
*/
herr_t
H5S_all_fscat (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
const struct H5O_pline_t *pline, const struct H5O_efl_t *efl,
size_t elmt_size, const H5S_t *file_space,
H5S_sel_iter_t *file_iter, size_t nelmts,
const H5D_transfer_t xfer_mode, const void *_buf)
@ -267,7 +267,7 @@ H5S_all_fscat (H5F_t *f, const struct H5O_layout_t *layout,
/*
* Scatter to file.
*/
if (H5F_arr_write (f, layout, comp, efl, hsize, hsize, zero,
if (H5F_arr_write (f, layout, pline, efl, hsize, hsize, zero,
file_offset, xfer_mode, buf)<0) {
HRETURN_ERROR (H5E_DATASPACE, H5E_WRITEERROR, FAIL, "write error");
}
@ -479,7 +479,7 @@ H5S_all_mscat (const void *_tconv_buf, size_t elmt_size,
REVISION LOG
--------------------------------------------------------------------------*/
herr_t
H5S_all_release (H5S_t *space)
H5S_all_release (H5S_t __unused__ *space)
{
FUNC_ENTER (H5S_all_release, FAIL);

View File

@ -23,7 +23,7 @@ static intn interface_initialize_g = FALSE;
typedef struct {
H5F_t *f;
const struct H5O_layout_t *layout;
const struct H5O_compress_t *comp;
const struct H5O_pline_t *pline;
const struct H5O_efl_t *efl;
size_t elmt_size;
const H5S_t *space;
@ -291,21 +291,29 @@ H5S_hyper_get_regions (size_t *num_regions, intn dim, size_t bound_count,
ret_value[0].start=MAX(node->start[next_dim],pos[next_dim])+offset[next_dim];
ret_value[0].end=node->end[next_dim]+offset[next_dim];
#ifdef QAK
printf("%s: check 3.2, lo_bounds=%d, start=%d, hi_bounds=%d, end=%d\n",
FUNC, (int)node->start[next_dim], (int)ret_value[curr_reg].start,
(int)node->end[next_dim], (int)ret_value[curr_reg].end);
printf("%s: check 3.2, lo_bounds=%d, start=%d, "
"hi_bounds=%d, end=%d\n",
FUNC, (int)node->start[next_dim],
(int)ret_value[curr_reg].start,
(int)node->end[next_dim],
(int)ret_value[curr_reg].end);
#endif /* QAK */
/* Increment the number of regions */
num_reg++;
} else {
#ifdef QAK
printf("%s: check 4.0, lo_bounds=%d, start=%d, hi_bounds=%d, end=%d\n",
FUNC, (int)node->start[next_dim], (int)ret_value[curr_reg].start,
(int)node->end[next_dim], (int)ret_value[curr_reg].end);
printf("%s: check 4.0, lo_bounds=%d, start=%d, "
"hi_bounds=%d, end=%d\n",
FUNC, (int)node->start[next_dim],
(int)ret_value[curr_reg].start,
(int)node->end[next_dim],
(int)ret_value[curr_reg].end);
#endif /* QAK */
/* Enlarge array */
ret_value=H5MM_realloc(ret_value,(sizeof(H5S_hyper_region_t)*(num_reg+1)));
ret_value=H5MM_realloc(ret_value,
(sizeof(H5S_hyper_region_t)*
(num_reg+1)));
/* Initialize with new region */
ret_value[num_reg].start=node->start[next_dim]+offset[next_dim];
@ -320,7 +328,8 @@ H5S_hyper_get_regions (size_t *num_regions, intn dim, size_t bound_count,
/* Sort region list and eliminate duplicates if necessary */
if(num_reg>1) {
qsort(ret_value,num_reg,sizeof(H5S_hyper_region_t),H5S_hyper_compare_regions);
qsort(ret_value,num_reg,sizeof(H5S_hyper_region_t),
H5S_hyper_compare_regions);
for(i=1,curr_reg=0,uniq_reg=1; i<num_reg; i++) {
if(ret_value[curr_reg].start!=ret_value[i].start &&
ret_value[curr_reg].end!=ret_value[i].end) {
@ -410,12 +419,15 @@ H5S_hyper_fread (intn dim, H5S_hyper_fhyper_info_t *fhyper_info)
/* Set up hyperslab I/O parameters which apply to all regions */
/* Copy the location of the region in the file */
HDmemcpy(file_offset, fhyper_info->iter->hyp.pos, (fhyper_info->space->extent.u.simple.rank * sizeof(hssize_t)));
HDmemcpy(file_offset, fhyper_info->iter->hyp.pos,
(fhyper_info->space->extent.u.simple.rank *
sizeof(hssize_t)));
file_offset[fhyper_info->space->extent.u.simple.rank]=0;
/* Set the hyperslab size to copy */
hsize[0]=1;
H5V_array_fill(hsize, hsize, sizeof(hsize[0]), fhyper_info->space->extent.u.simple.rank);
H5V_array_fill(hsize, hsize, sizeof(hsize[0]),
fhyper_info->space->extent.u.simple.rank);
hsize[fhyper_info->space->extent.u.simple.rank]=fhyper_info->elmt_size;
/* Set the memory offset to the origin */
@ -426,7 +438,8 @@ H5S_hyper_fread (intn dim, H5S_hyper_fhyper_info_t *fhyper_info)
#ifdef QAK
printf("%s: check 2.2, i=%d\n",FUNC,(int)i);
#endif /* QAK */
region_size=MIN(fhyper_info->nelmts, (regions[i].end-regions[i].start)+1);
region_size=MIN(fhyper_info->nelmts,
(regions[i].end-regions[i].start)+1);
hsize[fhyper_info->space->extent.u.simple.rank-1]=region_size;
file_offset[fhyper_info->space->extent.u.simple.rank-1]=regions[i].start;
@ -434,9 +447,12 @@ H5S_hyper_fread (intn dim, H5S_hyper_fhyper_info_t *fhyper_info)
* Gather from file.
*/
if (H5F_arr_read (fhyper_info->f, fhyper_info->layout,
fhyper_info->comp, fhyper_info->efl, hsize, hsize, zero,
file_offset, fhyper_info->xfer_mode, fhyper_info->dst/*out*/)<0) {
HRETURN_ERROR (H5E_DATASPACE, H5E_READERROR, 0, "read error");
fhyper_info->pline, fhyper_info->efl,
hsize, hsize, zero, file_offset,
fhyper_info->xfer_mode,
fhyper_info->dst/*out*/)<0) {
HRETURN_ERROR (H5E_DATASPACE, H5E_READERROR, 0,
"read error");
}
#ifdef QAK
printf("%s: check 2.3, region #%d\n",FUNC,(int)i);
@ -446,7 +462,8 @@ H5S_hyper_fread (intn dim, H5S_hyper_fhyper_info_t *fhyper_info)
#endif /* QAK */
/* Advance the pointer in the buffer */
fhyper_info->dst = ((uint8 *)fhyper_info->dst) + region_size*fhyper_info->elmt_size;
fhyper_info->dst = ((uint8 *)fhyper_info->dst) +
region_size*fhyper_info->elmt_size;
/* Increment the number of elements read */
num_read+=region_size;
@ -458,7 +475,8 @@ H5S_hyper_fread (intn dim, H5S_hyper_fhyper_info_t *fhyper_info)
if(region_size==(hsize_t)((regions[i].end-regions[i].start)+1))
fhyper_info->iter->hyp.pos[dim+1]=(-1);
else
fhyper_info->iter->hyp.pos[dim+1] = regions[i].start + region_size;
fhyper_info->iter->hyp.pos[dim+1] = regions[i].start +
region_size;
/* Decrement the iterator count */
fhyper_info->iter->hyp.elmt_left-=region_size;
@ -474,15 +492,17 @@ H5S_hyper_fread (intn dim, H5S_hyper_fhyper_info_t *fhyper_info)
/* Step through each region in this dimension */
for(i=0; i<num_regions && fhyper_info->nelmts>0; i++) {
/* Step through each location in each region */
for(j=regions[i].start; j<=regions[i].end && fhyper_info->nelmts>0; j++) {
for(j=regions[i].start;
j<=regions[i].end && fhyper_info->nelmts>0;
j++) {
#ifdef QAK
printf("%s: check 4.0, dim=%d, location=%d\n",FUNC,dim,j);
#endif /* QAK */
/*
* If we are moving to a new position in this dim, reset
* the next lower dim. location.
*/
* If we are moving to a new position in this dim, reset
* the next lower dim. location.
*/
if(fhyper_info->iter->hyp.pos[dim]!=j)
fhyper_info->iter->hyp.pos[dim+1]=(-1);
@ -527,7 +547,7 @@ H5S_hyper_fread (intn dim, H5S_hyper_fhyper_info_t *fhyper_info)
*/
size_t
H5S_hyper_fgath (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts, const H5D_transfer_t xfer_mode,
@ -571,7 +591,7 @@ H5S_hyper_fgath (H5F_t *f, const struct H5O_layout_t *layout,
/* Initialize parameter block for recursive calls */
fhyper_info.f=f;
fhyper_info.layout=layout;
fhyper_info.comp=comp;
fhyper_info.pline=pline;
fhyper_info.efl=efl;
fhyper_info.elmt_size=elmt_size;
fhyper_info.space=file_space;
@ -674,7 +694,7 @@ H5S_hyper_fwrite (intn dim, H5S_hyper_fhyper_info_t *fhyper_info)
* Scatter from file.
*/
if (H5F_arr_write (fhyper_info->f, fhyper_info->layout,
fhyper_info->comp, fhyper_info->efl,
fhyper_info->pline, fhyper_info->efl,
hsize, hsize, zero, file_offset,
fhyper_info->xfer_mode,
fhyper_info->src)<0) {
@ -758,7 +778,7 @@ H5S_hyper_fwrite (intn dim, H5S_hyper_fhyper_info_t *fhyper_info)
*/
herr_t
H5S_hyper_fscat (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts, const H5D_transfer_t xfer_mode,
@ -803,7 +823,7 @@ H5S_hyper_fscat (H5F_t *f, const struct H5O_layout_t *layout,
/* Initialize parameter block for recursive calls */
fhyper_info.f=f;
fhyper_info.layout=layout;
fhyper_info.comp=comp;
fhyper_info.pline=pline;
fhyper_info.efl=efl;
fhyper_info.elmt_size=elmt_size;
fhyper_info.space=file_space;
@ -889,24 +909,30 @@ H5S_hyper_mread (intn dim, H5S_hyper_fhyper_info_t *fhyper_info)
/* Set up hyperslab I/O parameters which apply to all regions */
/* Set up the size of the memory space */
HDmemcpy(mem_size, fhyper_info->space->extent.u.simple.size, fhyper_info->space->extent.u.simple.rank*sizeof(hsize_t));
HDmemcpy(mem_size, fhyper_info->space->extent.u.simple.size,
fhyper_info->space->extent.u.simple.rank*sizeof(hsize_t));
mem_size[fhyper_info->space->extent.u.simple.rank]=fhyper_info->elmt_size;
/* Copy the location of the region in the file */
HDmemcpy(mem_offset, fhyper_info->iter->hyp.pos, (fhyper_info->space->extent.u.simple.rank * sizeof(hssize_t)));
HDmemcpy(mem_offset, fhyper_info->iter->hyp.pos,
(fhyper_info->space->extent.u.simple.rank *
sizeof(hssize_t)));
mem_offset[fhyper_info->space->extent.u.simple.rank]=0;
/* Set the hyperslab size to copy */
hsize[0]=1;
H5V_array_fill(hsize, hsize, sizeof(hsize[0]), fhyper_info->space->extent.u.simple.rank);
H5V_array_fill(hsize, hsize, sizeof(hsize[0]),
fhyper_info->space->extent.u.simple.rank);
hsize[fhyper_info->space->extent.u.simple.rank]=fhyper_info->elmt_size;
/* Set the memory offset to the origin */
HDmemset (zero, 0, ((fhyper_info->space->extent.u.simple.rank+1)* sizeof(*zero)));
HDmemset (zero, 0, ((fhyper_info->space->extent.u.simple.rank+1)*
sizeof(*zero)));
/* perform I/O on data from regions */
for(i=0; i<num_regions && fhyper_info->nelmts>0; i++) {
region_size=MIN(fhyper_info->nelmts, (regions[i].end-regions[i].start)+1);
region_size=MIN(fhyper_info->nelmts,
(regions[i].end-regions[i].start)+1);
hsize[fhyper_info->space->extent.u.simple.rank-1]=region_size;
mem_offset[fhyper_info->space->extent.u.simple.rank-1]=regions[i].start;
#ifdef QAK
@ -925,7 +951,8 @@ H5S_hyper_mread (intn dim, H5S_hyper_fhyper_info_t *fhyper_info)
}
/* Advance the pointer in the buffer */
fhyper_info->dst = ((uint8 *)fhyper_info->dst) + region_size*fhyper_info->elmt_size;
fhyper_info->dst = ((uint8 *)fhyper_info->dst) +
region_size*fhyper_info->elmt_size;
/* Increment the number of elements read */
num_read+=region_size;
@ -954,15 +981,17 @@ H5S_hyper_mread (intn dim, H5S_hyper_fhyper_info_t *fhyper_info)
/* Step through each region in this dimension */
for(i=0; i<num_regions && fhyper_info->nelmts>0; i++) {
/* Step through each location in each region */
for(j=regions[i].start; j<=regions[i].end && fhyper_info->nelmts>0; j++) {
for(j=regions[i].start;
j<=regions[i].end && fhyper_info->nelmts>0;
j++) {
#ifdef QAK
printf("%s: check 4.0, dim=%d, location=%d\n",FUNC,dim,j);
#endif /* QAK */
/*
* If we are moving to a new position in this dim, reset
* the next lower dim. location.
*/
* If we are moving to a new position in this dim, reset
* the next lower dim. location.
*/
if(fhyper_info->iter->hyp.pos[dim]!=j)
fhyper_info->iter->hyp.pos[dim+1]=(-1);

View File

@ -215,7 +215,7 @@ H5S_point_favail (const H5S_t __unused__ *space, const H5S_sel_iter_t *sel_iter,
*/
size_t
H5S_point_fgath (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts, const H5D_transfer_t xfer_mode,
@ -259,7 +259,8 @@ H5S_point_fgath (H5F_t *f, const struct H5O_layout_t *layout,
while(num_read<nelmts) {
if(file_iter->pnt.elmt_left>0) {
/* Copy the location of the point to get */
HDmemcpy(file_offset, file_iter->pnt.curr->pnt,ndims*sizeof(hssize_t));
HDmemcpy(file_offset, file_iter->pnt.curr->pnt,
ndims*sizeof(hssize_t));
file_offset[ndims] = 0;
/* Add in the offset */
@ -267,7 +268,7 @@ H5S_point_fgath (H5F_t *f, const struct H5O_layout_t *layout,
file_offset[i] += file_space->select.offset[i];
/* Go read the point */
if (H5F_arr_read (f, layout, comp, efl, hsize, hsize, zero,
if (H5F_arr_read (f, layout, pline, efl, hsize, hsize, zero,
file_offset, xfer_mode, buf/*out*/)<0) {
HRETURN_ERROR (H5E_DATASPACE, H5E_READERROR, 0, "read error");
}
@ -323,7 +324,7 @@ H5S_point_fgath (H5F_t *f, const struct H5O_layout_t *layout,
*/
herr_t
H5S_point_fscat (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts, const H5D_transfer_t xfer_mode,
@ -395,7 +396,7 @@ H5S_point_fscat (H5F_t *f, const struct H5O_layout_t *layout,
}
#endif /* QAK */
/* Go write the point */
if (H5F_arr_write (f, layout, comp, efl, hsize, hsize, zero,
if (H5F_arr_write (f, layout, pline, efl, hsize, hsize, zero,
file_offset, xfer_mode, buf)<0) {
HRETURN_ERROR (H5E_DATASPACE, H5E_WRITEERROR, 0, "write error");
}

View File

@ -145,23 +145,26 @@ typedef struct H5S_t {
typedef struct H5S_tconv_t {
/* Initialize file element numbering information */
herr_t (*finit)(const struct H5O_layout_t *layout, const H5S_t *space,
H5S_sel_iter_t *iter);
H5S_sel_iter_t *iter);
/* Initialize memory element numbering information */
herr_t (*minit)(const struct H5O_layout_t *layout, const H5S_t *space,
H5S_sel_iter_t *iter);
H5S_sel_iter_t *iter);
/* Initialize background element numbering information */
herr_t (*binit)(const struct H5O_layout_t *layout, const H5S_t *space,
H5S_sel_iter_t *iter);
H5S_sel_iter_t *iter);
/* Figure out the optimal number of elements to transfer to/from the file */
/*
* Figure out the optimal number of elements to transfer to/from the
* file.
*/
size_t (*favail)(const H5S_t *file_space, const H5S_sel_iter_t *file_iter,
size_t max);
size_t max);
/* Gather elements from disk to type conversion buffer */
size_t (*fgath)(H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts,
@ -169,7 +172,7 @@ typedef struct H5S_tconv_t {
/* Scatter elements from type conversion buffer to disk */
herr_t (*fscat)(H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *compress,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts,
@ -187,14 +190,14 @@ typedef struct H5S_tconv_t {
/* Read from file to application w/o intermediate scratch buffer */
herr_t (*read)(H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
const H5D_transfer_t xfer_mode, void *buf/*out*/);
/* Write directly from app buffer to file */
herr_t (*write)(H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
const H5D_transfer_t xfer_mode, const void *buf);
@ -236,7 +239,7 @@ size_t H5S_simp_init (const struct H5O_layout_t *layout,
const H5S_t *mem_space, const H5S_t *file_space,
size_t desired_nelmts);
size_t H5S_simp_fgath (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space,
size_t start, size_t nelmts,
@ -248,18 +251,18 @@ size_t H5S_simp_mgath (const void *buf, size_t elmt_size,
const H5S_t *mem_space,
size_t start, size_t nelmts, void *tconv_buf/*out*/);
herr_t H5S_simp_fscat (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space,
size_t start, size_t nelmts,
const H5D_transfer_t xfer_mode, const void *tconv_buf);
herr_t H5S_simp_read (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
const H5D_transfer_t xfer_mode, void *buf/*out*/);
herr_t H5S_simp_write (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp,
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, const H5S_t *mem_space,
const H5D_transfer_t xfer_mode, const void *buf);
@ -268,23 +271,25 @@ herr_t H5S_simp_write (H5F_t *f, const struct H5O_layout_t *layout,
herr_t H5S_point_init (const struct H5O_layout_t *layout,
const H5S_t *space, H5S_sel_iter_t *iter);
size_t H5S_point_favail (const H5S_t *space, const H5S_sel_iter_t *iter,
size_t max);
size_t max);
size_t H5S_point_fgath (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
size_t elmt_size, const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts,
const H5D_transfer_t xfer_mode, void *buf/*out*/);
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts, const H5D_transfer_t xfer_mode,
void *buf/*out*/);
herr_t H5S_point_fscat (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
size_t elmt_size, const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts,
const H5D_transfer_t xfer_mode, const void *buf);
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts, const H5D_transfer_t xfer_mode,
const void *buf);
size_t H5S_point_mgath (const void *_buf, size_t elmt_size,
const H5S_t *mem_space, H5S_sel_iter_t *mem_iter,
size_t nelmts, void *_tconv_buf/*out*/);
const H5S_t *mem_space, H5S_sel_iter_t *mem_iter,
size_t nelmts, void *_tconv_buf/*out*/);
herr_t H5S_point_mscat (const void *_tconv_buf, size_t elmt_size,
const H5S_t *mem_space, H5S_sel_iter_t *mem_iter,
size_t nelmts, void *_buf/*out*/);
const H5S_t *mem_space, H5S_sel_iter_t *mem_iter,
size_t nelmts, void *_buf/*out*/);
herr_t H5S_point_add (H5S_t *space, size_t num_elemn, const hssize_t **coord);
herr_t H5S_point_release (H5S_t *space);
hsize_t H5S_point_npoints (const H5S_t *space);
@ -295,23 +300,25 @@ hbool_t H5S_point_select_valid (const H5S_t *space);
herr_t H5S_all_init (const struct H5O_layout_t *layout,
const H5S_t *space, H5S_sel_iter_t *iter);
size_t H5S_all_favail (const H5S_t *space, const H5S_sel_iter_t *iter,
size_t max);
size_t max);
size_t H5S_all_fgath (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
size_t elmt_size, const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts,
const H5D_transfer_t xfer_mode, void *buf/*out*/);
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts, const H5D_transfer_t xfer_mode,
void *buf/*out*/);
herr_t H5S_all_fscat (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
size_t elmt_size, const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts,
const H5D_transfer_t xfer_mode, const void *buf);
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts, const H5D_transfer_t xfer_mode,
const void *buf);
size_t H5S_all_mgath (const void *_buf, size_t elmt_size,
const H5S_t *mem_space, H5S_sel_iter_t *mem_iter,
size_t nelmts, void *_tconv_buf/*out*/);
const H5S_t *mem_space, H5S_sel_iter_t *mem_iter,
size_t nelmts, void *_tconv_buf/*out*/);
herr_t H5S_all_mscat (const void *_tconv_buf, size_t elmt_size,
const H5S_t *mem_space, H5S_sel_iter_t *mem_iter,
size_t nelmts, void *_buf/*out*/);
const H5S_t *mem_space, H5S_sel_iter_t *mem_iter,
size_t nelmts, void *_buf/*out*/);
herr_t H5S_all_release (H5S_t *space);
hsize_t H5S_all_npoints (const H5S_t *space);
@ -321,22 +328,25 @@ herr_t H5S_hyper_init (const struct H5O_layout_t *layout,
size_t H5S_hyper_favail (const H5S_t *space, const H5S_sel_iter_t *iter,
size_t max);
size_t H5S_hyper_fgath (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
size_t elmt_size, const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts,
const H5D_transfer_t xfer_mode, void *buf/*out*/);
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts, const H5D_transfer_t xfer_mode,
void *buf/*out*/);
herr_t H5S_hyper_fscat (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
size_t elmt_size, const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts,
const H5D_transfer_t xfer_mode, const void *buf);
const struct H5O_pline_t *pline,
const struct H5O_efl_t *efl, size_t elmt_size,
const H5S_t *file_space, H5S_sel_iter_t *file_iter,
size_t nelmts, const H5D_transfer_t xfer_mode,
const void *buf);
size_t H5S_hyper_mgath (const void *_buf, size_t elmt_size,
const H5S_t *mem_space, H5S_sel_iter_t *mem_iter,
size_t nelmts, void *_tconv_buf/*out*/);
const H5S_t *mem_space, H5S_sel_iter_t *mem_iter,
size_t nelmts, void *_tconv_buf/*out*/);
herr_t H5S_hyper_mscat (const void *_tconv_buf, size_t elmt_size,
const H5S_t *mem_space, H5S_sel_iter_t *mem_iter,
size_t nelmts, void *_buf/*out*/);
herr_t H5S_hyper_add (H5S_t *space, const hssize_t *start, const hsize_t *size);
const H5S_t *mem_space, H5S_sel_iter_t *mem_iter,
size_t nelmts, void *_buf/*out*/);
herr_t H5S_hyper_add (H5S_t *space, const hssize_t *start,
const hsize_t *size);
herr_t H5S_hyper_release (H5S_t *space);
herr_t H5S_hyper_sel_iter_release (H5S_sel_iter_t *sel_iter);
hsize_t H5S_hyper_npoints (const H5S_t *space);

View File

@ -121,7 +121,7 @@ H5S_simp_init (const struct H5O_layout_t __unused__ *layout,
*/
size_t
H5S_simp_fgath (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
const struct H5O_pline_t *pline, const struct H5O_efl_t *efl,
size_t elmt_size, const H5S_t *file_space,
size_t start, size_t nelmts,
const H5D_transfer_t xfer_mode, void *buf/*out*/)
@ -180,7 +180,7 @@ H5S_simp_fgath (H5F_t *f, const struct H5O_layout_t *layout,
/*
* Gather from file.
*/
if (H5F_arr_read (f, layout, comp, efl, hsize, hsize, zero, file_offset,
if (H5F_arr_read (f, layout, pline, efl, hsize, hsize, zero, file_offset,
xfer_mode, buf/*out*/)<0) {
HRETURN_ERROR (H5E_DATASPACE, H5E_READERROR, 0, "read error");
}
@ -402,7 +402,7 @@ H5S_simp_mgath (const void *buf, size_t elmt_size,
*/
herr_t
H5S_simp_fscat (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
const struct H5O_pline_t *pline, const struct H5O_efl_t *efl,
size_t elmt_size, const H5S_t *file_space,
size_t start, size_t nelmts,
const H5D_transfer_t xfer_mode, const void *buf)
@ -461,7 +461,7 @@ H5S_simp_fscat (H5F_t *f, const struct H5O_layout_t *layout,
/*
* Scatter to file.
*/
if (H5F_arr_write (f, layout, comp, efl, hsize, hsize, zero,
if (H5F_arr_write (f, layout, pline, efl, hsize, hsize, zero,
file_offset, xfer_mode, buf)<0) {
HRETURN_ERROR (H5E_DATASPACE, H5E_WRITEERROR, FAIL, "write error");
}
@ -494,7 +494,7 @@ H5S_simp_fscat (H5F_t *f, const struct H5O_layout_t *layout,
*/
herr_t
H5S_simp_read (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
const struct H5O_pline_t *pline, const struct H5O_efl_t *efl,
size_t elmt_size, const H5S_t *file_space,
const H5S_t *mem_space, const H5D_transfer_t xfer_mode,
void *buf/*out*/)
@ -613,7 +613,7 @@ H5S_simp_read (H5F_t *f, const struct H5O_layout_t *layout,
mem_offset[file_space->extent.u.simple.rank] = 0;
/* Read the hyperslab */
if (H5F_arr_read (f, layout, comp, efl, hslab_size,
if (H5F_arr_read (f, layout, pline, efl, hslab_size,
mem_size, mem_offset, file_offset, xfer_mode, buf)<0) {
HRETURN_ERROR (H5E_IO, H5E_READERROR, FAIL, "unable to read dataset");
}
@ -646,7 +646,7 @@ H5S_simp_read (H5F_t *f, const struct H5O_layout_t *layout,
*/
herr_t
H5S_simp_write (H5F_t *f, const struct H5O_layout_t *layout,
const struct H5O_compress_t *comp, const struct H5O_efl_t *efl,
const struct H5O_pline_t *pline, const struct H5O_efl_t *efl,
size_t elmt_size, const H5S_t *file_space,
const H5S_t *mem_space, const H5D_transfer_t xfer_mode,
const void *buf)
@ -765,7 +765,7 @@ H5S_simp_write (H5F_t *f, const struct H5O_layout_t *layout,
mem_offset[file_space->extent.u.simple.rank] = 0;
/* Write the hyperslab */
if (H5F_arr_write (f, layout, comp, efl, hslab_size,
if (H5F_arr_write (f, layout, pline, efl, hslab_size,
mem_size, mem_offset, file_offset, xfer_mode, buf)<0) {
HRETURN_ERROR (H5E_IO, H5E_WRITEERROR, FAIL,
"unable to write dataset");

729
src/H5Z.c
View File

@ -5,7 +5,7 @@
* Programmer: Robb Matzke <matzke@llnl.gov>
* Thursday, April 16, 1998
*
* Purpose: Functions for data compression.
* Purpose: Functions for data filters.
*/
#include <H5private.h>
#include <H5Eprivate.h>
@ -25,48 +25,39 @@ static herr_t H5Z_init_interface (void);
static void H5Z_term_interface (void);
/*
* The compression table maps compression method number to a struct that
* contains pointers to the compress and uncompress methods along with timing
* statistics.
* The filter table maps filter identification numbers to structs that
* contain a pointers to the filter function and timing statistics.
*/
typedef struct H5Z_class_t {
char *name; /*method name for debugging */
H5Z_func_t compress; /*compression function */
H5Z_func_t uncompress; /*uncompression function */
H5Z_filter_t id; /*filter ID number */
char *comment; /*comment for debugging */
H5Z_func_t func; /*the filter function */
#ifdef H5Z_DEBUG
struct {
hsize_t nbytes; /*bytes compressed including overruns */
hsize_t over; /*bytes of overrun */
H5_timer_t timer; /*total compression time inc. overruns */
hsize_t failed; /*bytes of failure (not overruns) */
} comp;
struct {
hsize_t nbytes; /*bytes uncompressed, including overruns*/
hsize_t over; /*bytes of overrun */
H5_timer_t timer; /*total uncompression time */
hsize_t failed; /*bytes of failure (not overruns) */
} uncomp;
hsize_t total; /*total number of bytes processed */
hsize_t errors; /*bytes of total attributable to errors */
H5_timer_t timer; /*execution time including errors */
} stats[2]; /*0=output, 1=input */
#endif
} H5Z_class_t;
static H5Z_class_t H5Z_g[H5Z_USERDEF_MAX+1];
/* Compression and uncompression methods */
static size_t H5Z_zlib_c (unsigned int flags, size_t __unused__ cd_size,
const void __unused__ *client_data,
size_t src_nbytes, const void *_src,
size_t dst_nbytes, void *dst/*out*/);
static size_t H5Z_zlib_u (unsigned int flags, size_t __unused__ cd_size,
const void __unused__ *client_data,
size_t src_nbytes, const void *_src,
size_t dst_nbytes, void *dst/*out*/);
static size_t H5Z_table_alloc_g = 0;
static size_t H5Z_table_used_g = 0;
static H5Z_class_t *H5Z_table_g = NULL;
static H5Z_class_t *H5Z_find(H5Z_filter_t id);
/* Predefined filters */
static size_t H5Z_filter_deflate(uintn flags, size_t cd_nelmts,
const uintn cd_values[], size_t nbytes,
size_t *buf_size, void **buf);
/*-------------------------------------------------------------------------
* Function: H5Z_init_interface
*
* Purpose: Initializes the data compression layer.
* Purpose: Initializes the data filter layer.
*
* Return: Success: SUCCEED
*
@ -83,11 +74,10 @@ static herr_t
H5Z_init_interface (void)
{
FUNC_ENTER (H5Z_init_interface, FAIL);
H5_add_exit (H5Z_term_interface);
H5Z_register (H5Z_NONE, "none", NULL, NULL);
H5Z_register (H5Z_DEFLATE, "deflate", H5Z_zlib_c, H5Z_zlib_u);
H5Z_register (H5Z_FILTER_DEFLATE, "deflate",
H5Z_filter_deflate);
FUNC_LEAVE (SUCCEED);
}
@ -110,75 +100,69 @@ H5Z_init_interface (void)
static void
H5Z_term_interface (void)
{
size_t i;
#ifdef H5Z_DEBUG
int i, nprint=0;
char name[16];
int dir, nprint=0;
char comment[16], bandwidth[32];
for (i=0; i<=H5Z_table_used_g; i++) {
for (dir=0; dir<2; dir++) {
if (0==H5Z_table_g[i].stats[dir].total) continue;
for (i=0; i<=H5Z_USERDEF_MAX; i++) {
if (H5Z_g[i].comp.nbytes || H5Z_g[i].uncomp.nbytes) {
if (0==nprint++) {
HDfprintf (stderr, "H5Z: compression statistics accumulated "
/* Print column headers */
HDfprintf (stderr, "H5Z: filter statistics accumulated "
"over life of library:\n");
HDfprintf (stderr, " %-10s %10s %7s %7s %8s %8s %8s %9s\n",
"Method", "Total", "Overrun", "Errors", "User",
HDfprintf (stderr, " %-16s %10s %10s %8s %8s %8s %10s\n",
"Filter", "Total", "Errors", "User",
"System", "Elapsed", "Bandwidth");
HDfprintf (stderr, " %-10s %10s %7s %7s %8s %8s %8s %9s\n",
"------", "-----", "-------", "------", "----",
HDfprintf (stderr, " %-16s %10s %10s %8s %8s %8s %10s\n",
"------", "-----", "------", "----",
"------", "-------", "---------");
}
sprintf (name, "%s-c", H5Z_g[i].name);
/* Truncate the comment to fit in the field */
strncpy(comment, H5Z_table_g[i].comment, sizeof comment);
comment[sizeof(comment)-1] = '\0';
/*
* Format bandwidth to have four significant digits and units
* of `B/s', `kB/s', `MB/s', `GB/s', or `TB/s' or the word
* `Inf' if the elapsed time is zero.
*/
H5_bandwidth(bandwidth,
(double)(H5Z_table_g[i].stats[dir].total),
H5Z_table_g[i].stats[dir].timer.etime);
/* Print the statistics */
HDfprintf (stderr,
" %-12s %10Hd %7Hd %7Hd %8.2f %8.2f %8.2f ",
name,
H5Z_g[i].comp.nbytes,
H5Z_g[i].comp.over,
H5Z_g[i].comp.failed,
H5Z_g[i].comp.timer.utime,
H5Z_g[i].comp.timer.stime,
H5Z_g[i].comp.timer.etime);
if (H5Z_g[i].comp.timer.etime>0) {
HDfprintf (stderr, "%9.3e\n",
H5Z_g[i].comp.nbytes / H5Z_g[i].comp.timer.etime);
} else {
HDfprintf (stderr, "%9s\n", "NaN");
}
sprintf (name, "%s-u", H5Z_g[i].name);
HDfprintf (stderr,
" %-12s %10Hd %7Hd %7Hd %8.2f %8.2f %8.2f ",
name,
H5Z_g[i].uncomp.nbytes,
H5Z_g[i].uncomp.over,
H5Z_g[i].uncomp.failed,
H5Z_g[i].uncomp.timer.utime,
H5Z_g[i].uncomp.timer.stime,
H5Z_g[i].uncomp.timer.etime);
if (H5Z_g[i].uncomp.timer.etime>0) {
HDfprintf (stderr, "%9.3e\n",
H5Z_g[i].uncomp.nbytes/H5Z_g[i].uncomp.timer.etime);
} else {
HDfprintf (stderr, "%9s\n", "NaN");
}
" %c%-15s %10Hd %10Hd %8.2f %8.2f %8.2f "
"%10s\n", dir?'<':'>', comment,
H5Z_table_g[i].stats[dir].total,
H5Z_table_g[i].stats[dir].errors,
H5Z_table_g[i].stats[dir].timer.utime,
H5Z_table_g[i].stats[dir].timer.stime,
H5Z_table_g[i].stats[dir].timer.etime,
bandwidth);
}
H5MM_xfree (H5Z_g[i].name);
}
HDmemset (H5Z_g, 0, sizeof H5Z_g);
#endif
/* Free the table */
for (i=0; i<H5Z_table_used_g; i++) {
H5MM_xfree(H5Z_table_g[i].comment);
}
H5Z_table_g = H5MM_xfree(H5Z_table_g);
H5Z_table_used_g = H5Z_table_alloc_g = 0;
}
/*-------------------------------------------------------------------------
* Function: H5Zregister
*
* Purpose: This function registers new compression and uncompression
* methods for a method number. The NAME argument is used for
* debugging and may be the null pointer. Either or both of
* CFUNC (the compression function) and UFUNC (the uncompression
* method) may be null pointers.
*
* The statistics associated with a method number are not reset
* by this function; they accumulate over the life of the
* library.
* Purpose: This function registers new filter. The COMMENT argument is
* used for debugging and may be the null pointer.
*
* Return: Success: SUCCEED
*
@ -192,178 +176,40 @@ H5Z_term_interface (void)
*-------------------------------------------------------------------------
*/
herr_t
H5Zregister (H5Z_method_t method, const char *name, H5Z_func_t cfunc,
H5Z_func_t ufunc)
H5Zregister (H5Z_filter_t id, const char *comment, H5Z_func_t func)
{
FUNC_ENTER (H5Zregister, FAIL);
H5TRACE4("e","Zmsxx",method,name,cfunc,ufunc);
H5TRACE3("e","Zfsx",id,comment,func);
/* Check args */
if (method<0 || method>H5Z_USERDEF_MAX) {
if (id<0 || id>H5Z_FILTER_MAX) {
HRETURN_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL,
"invalid data compression method number");
"invalid filter identification number");
}
if (method<16) {
if (id<256) {
HRETURN_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL,
"unable to modify predefined compression methods");
"unable to modify predefined filters");
}
if (!func) {
HRETURN_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL,
"no function specified");
}
/* Do it */
if (H5Z_register (method, name, cfunc, ufunc)<0) {
HRETURN_ERROR (H5E_COMP, H5E_CANTINIT, FAIL,
"unable to register compression methods");
if (H5Z_register (id, comment, func)<0) {
HRETURN_ERROR (H5E_PLINE, H5E_CANTINIT, FAIL,
"unable to register filter");
}
FUNC_LEAVE (SUCCEED);
}
/*-------------------------------------------------------------------------
* Function: H5Z_compress
*
* Purpose: Compress NBYTES from SRC into at most NBYTES of DST.
*
* Return: Success: Number of bytes in DST
*
* Failure: 0 if the DST buffer overflowed or something
* else went wrong.
*
* Programmer: Robb Matzke
* Thursday, April 16, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
size_t
H5Z_compress (const H5O_compress_t *comp, size_t nbytes, const void *src,
void *dst/*out*/)
{
size_t ret_value = 0;
H5Z_method_t method = comp ? comp->method : H5Z_NONE;
H5Z_func_t cfunc = NULL;
#ifdef H5Z_DEBUG
intn over = 0;
H5_timer_t timer;
#endif
FUNC_ENTER (H5Z_compress, 0);
#ifdef H5Z_DEBUG
H5_timer_begin (&timer);
#endif
if (H5Z_NONE==method) {
/* No compression method */
HGOTO_DONE (0);
} else if (NULL==(cfunc=H5Z_g[method].compress)) {
/* No compress function */
HGOTO_ERROR (H5E_COMP, H5E_UNSUPPORTED, 0,
"compression method is not supported");
} else if (0==(ret_value=(cfunc)(comp->flags, comp->cd_size,
comp->client_data, nbytes,
src, nbytes, dst))) {
/* Compress failed */
HGOTO_ERROR (H5E_COMP, H5E_CANTINIT, 0, "compression failed");
} else if (ret_value>=nbytes) {
/* Output is not smaller than input */
#ifdef H5Z_DEBUG
H5Z_g[method].comp.over += nbytes;
over = 1;
#endif
HGOTO_DONE (0);
}
done:
#ifdef H5Z_DEBUG
H5Z_g[method].comp.nbytes += nbytes;
if (0==ret_value && !over) H5Z_g[method].comp.failed += nbytes;
H5_timer_end (&(H5Z_g[method].comp.timer), &timer);
#endif
FUNC_LEAVE (ret_value);
}
/*-------------------------------------------------------------------------
* Function: H5Z_uncompress
*
* Purpose: Uncompress SRC_NBYTES from SRC into at most DST_NBYTES of
* DST.
*
* Return: Success: Number of bytes in DST buffer.
*
* Failure: 0 if the uncompression failed or DST wasn't
* big enough to hold the result.
*
* Programmer: Robb Matzke
* Thursday, April 16, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
size_t
H5Z_uncompress (const H5O_compress_t *comp, size_t src_nbytes, const void *src,
size_t dst_nbytes, void *dst/*out*/)
{
size_t ret_value = 0;
H5Z_func_t ufunc = NULL;
H5Z_method_t method = comp ? comp->method : H5Z_NONE;
#ifdef H5Z_DEBUG
H5_timer_t timer;
#endif
FUNC_ENTER (H5Z_uncompress, 0);
#ifdef H5Z_DEBUG
H5_timer_begin (&timer);
#endif
if (H5Z_NONE==method) {
/* No compression method */
assert (src_nbytes<=dst_nbytes);
HDmemcpy (dst, src, src_nbytes);
ret_value = src_nbytes;
} else if (src_nbytes==dst_nbytes) {
/* Data is not compressed */
#ifdef H5Z_DEBUG
H5Z_g[method].uncomp.over += src_nbytes;
#endif
HDmemcpy (dst, src, src_nbytes);
ret_value = src_nbytes;
} else if (NULL==(ufunc=H5Z_g[method].uncompress)) {
/* No uncompress function */
HGOTO_ERROR (H5E_COMP, H5E_UNSUPPORTED, 0,
"uncompression method is not supported");
} else if (0==(ret_value=(ufunc)(comp->flags, comp->cd_size,
comp->client_data, src_nbytes,
src, dst_nbytes, dst))) {
/* Uncompress failed */
HGOTO_ERROR (H5E_COMP, H5E_CANTINIT, 0, "uncompression failed");
}
done:
#ifdef H5Z_DEBUG
H5Z_g[method].uncomp.nbytes += dst_nbytes;
if (0==ret_value) H5Z_g[method].uncomp.failed += dst_nbytes;
H5_timer_end (&(H5Z_g[method].uncomp.timer), &timer);
#endif
FUNC_LEAVE (ret_value);
}
/*-------------------------------------------------------------------------
* Function: H5Z_register
*
* Purpose: Same as the public version except this one allows compression
* methods to be set for predefined method numbers <16.
* Purpose: Same as the public version except this one allows filters
* to be set for predefined method numbers <256
*
* Return: Success: SUCCEED
*
@ -377,86 +223,293 @@ H5Z_uncompress (const H5O_compress_t *comp, size_t src_nbytes, const void *src,
*-------------------------------------------------------------------------
*/
herr_t
H5Z_register (H5Z_method_t method, const char *name, H5Z_func_t cfunc,
H5Z_func_t ufunc)
H5Z_register (H5Z_filter_t id, const char *comment, H5Z_func_t func)
{
size_t i;
FUNC_ENTER (H5Z_register, FAIL);
assert (method>=0 && method<=H5Z_USERDEF_MAX);
H5MM_xfree (H5Z_g[method].name);
H5Z_g[method].name = H5MM_xstrdup (name);
H5Z_g[method].compress = cfunc;
H5Z_g[method].uncompress = ufunc;
assert (id>=0 && id<=H5Z_FILTER_MAX);
/* Is the filter already registered? */
for (i=0; i<H5Z_table_used_g; i++) {
if (H5Z_table_g[i].id==id) break;
}
if (i>=H5Z_table_used_g) {
if (H5Z_table_used_g>=H5Z_table_alloc_g) {
size_t n = MAX(32, 2*H5Z_table_alloc_g);
H5Z_class_t *table = H5MM_realloc(H5Z_table_g,
n*sizeof(H5Z_class_t));
if (!table) {
HRETURN_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
"unable to extend filter table");
}
H5Z_table_g = table;
H5Z_table_alloc_g = n;
}
/* Initialize */
i = H5Z_table_used_g++;
HDmemset(H5Z_table_g+i, 0, sizeof(H5Z_class_t));
H5Z_table_g[i].id = id;
H5Z_table_g[i].comment = H5MM_xstrdup(comment);
H5Z_table_g[i].func = func;
} else {
/* Replace old contents */
H5MM_xfree(H5Z_table_g[i].comment);
H5Z_table_g[i].comment = H5MM_xstrdup(comment);
H5Z_table_g[i].func = func;
}
FUNC_LEAVE (SUCCEED);
}
/*-------------------------------------------------------------------------
* Function: H5Z_zlib_c
* Function: H5Z_append
*
* Purpose: Compress SRC_NBYTES bytes from SRC into at most DST_NBYTES of
* DST using the compression level as specified in FLAGS.
* Purpose: Append another filter to the specified pipeline.
*
* Return: Success: Number of bytes compressed into DST limited
* by DST_NBYTES.
* Return: Success: SUCCEED
*
* Failure: 0
* Failure: FAIL
*
* Programmer: Robb Matzke
* Thursday, April 16, 1998
* Tuesday, August 4, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
static size_t
H5Z_zlib_c (unsigned int flags, size_t __unused__ cd_size,
const void __unused__ *client_data, size_t src_nbytes,
const void *src, size_t dst_nbytes, void *dst/*out*/)
herr_t
H5Z_append(H5O_pline_t *pline, H5Z_filter_t filter, uintn flags,
size_t cd_nelmts, const unsigned int cd_values[/*cd_nelmts*/])
{
size_t ret_value = 0;
#if defined(HAVE_LIBZ) && defined(HAVE_ZLIB_H)
const Bytef *z_src = (const Bytef*)src;
Bytef *z_dst = (Bytef*)dst;
uLongf z_dst_nbytes = (uLongf)dst_nbytes;
uLong z_src_nbytes = (uLong)src_nbytes;
int level = flags % 10;
int status;
#endif
size_t idx, i;
FUNC_ENTER (H5Z_zlib_c, 0);
FUNC_ENTER(H5Z_append, FAIL);
assert(pline);
assert(filter>=0 && filter<=H5Z_FILTER_MAX);
assert(0==(flags & ~H5Z_FLAG_DEFMASK));
assert(0==cd_nelmts || cd_values);
#if defined(HAVE_LIBZ) && defined (HAVE_ZLIB_H)
status = compress2 (z_dst, &z_dst_nbytes, z_src, z_src_nbytes, level);
if (Z_BUF_ERROR==status) {
ret_value = dst_nbytes;
} else if (Z_MEM_ERROR==status) {
HGOTO_ERROR (H5E_COMP, H5E_CANTINIT, 0, "deflate memory error");
} else if (Z_OK!=status) {
HGOTO_ERROR (H5E_COMP, H5E_CANTINIT, 0, "deflate error");
} else {
ret_value = z_dst_nbytes;
/*
* Check filter limit. We do it here for early warnings although we may
* decide to relax this restriction in the future.
*/
if (pline->nfilters>=32) {
HRETURN_ERROR(H5E_PLINE, H5E_CANTINIT, FAIL,
"too many filters in pipeline");
}
#else
HGOTO_ERROR (H5E_COMP, H5E_UNSUPPORTED, 0,
"hdf5 was not compiled with zlib-1.0.2 or better");
#endif
done:
FUNC_LEAVE (ret_value);
/* Allocate additional space in the pipeline if it's full */
if (pline->nfilters>=pline->nalloc) {
H5O_pline_t x;
x.nalloc = MAX(32, 2*pline->nalloc);
x.filter = H5MM_realloc(pline->filter, x.nalloc*sizeof(x.filter[0]));
if (NULL==x.filter) {
HRETURN_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
"memory allocation failed for filter pipeline");
}
pline->nalloc = x.nalloc;
pline->filter = x.filter;
}
/* Add the new filter to the pipeline */
idx = pline->nfilters;
pline->filter[idx].id = filter;
pline->filter[idx].flags = flags;
pline->filter[idx].name = NULL; /*we'll pick it up later*/
pline->filter[idx].cd_nelmts = cd_nelmts;
if (cd_nelmts>0) {
pline->filter[idx].cd_values = H5MM_malloc(cd_nelmts*sizeof(uintn));
if (NULL==pline->filter[idx].cd_values) {
HRETURN_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL,
"memory allocation failed for filter");
}
for (i=0; i<cd_nelmts; i++) {
pline->filter[idx].cd_values[i] = cd_values[i];
}
}
pline->nfilters++;
FUNC_LEAVE(SUCCEED);
}
/*-------------------------------------------------------------------------
* Function: H5Z_zlib_u
* Function: H5Z_find
*
* Purpose: Uncompress SRC_NBYTES from SRC into at most DST_NBYTES of
* DST.
* Purpose: Given a filter ID return a pointer to a global struct that
* defines the filter.
*
* Return: Success: Number of bytes returned in DST.
* Return: Success: Ptr to entry in global filter table.
*
* Failure: 0
* Failure: NULL
*
* Programmer: Robb Matzke
* Wednesday, August 5, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
static H5Z_class_t *
H5Z_find(H5Z_filter_t id)
{
size_t i;
FUNC_ENTER(H5Z_find, NULL);
for (i=0; i<H5Z_table_used_g; i++) {
if (H5Z_table_g[i].id == id) {
return H5Z_table_g + i;
}
}
FUNC_LEAVE(NULL);
}
/*-------------------------------------------------------------------------
* Function: H5Z_pipeline
*
* Purpose: Process data through the filter pipeline. The FLAGS argument
* is the filter invocation flags (definition flags come from
* the PLINE->filter[].flags). The filters are processed in
* definition order unless the H5Z_FLAG_REVERSE is set. The
* FILTER_MASK is a bit-mask to indicate which filters to skip
* and on exit will indicate which filters failed. Each
* filter has an index number in the pipeline and that index
* number is the filter's bit in the FILTER_MASK. NBYTES is the
* number of bytes of data to filter and on exit should be the
* number of resulting bytes while BUF_SIZE holds the total
* allocated size of the buffer, which is pointed to BUF.
*
* If the buffer must grow during processing of the pipeline
* then the pipeline function should free the original buffer
* and return a fresh buffer, adjusting BUF_SIZE accordingly.
*
* Return: Success: 0
*
* Failure: -1
*
* Programmer: Robb Matzke
* Tuesday, August 4, 1998
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
herr_t
H5Z_pipeline(H5F_t *f, const H5O_pline_t *pline, uintn flags,
uintn *filter_mask/*in,out*/, size_t *nbytes/*in,out*/,
size_t *buf_size/*in,out*/, void **buf/*in,out*/)
{
size_t i, idx, new_nbytes;
H5Z_class_t *fclass=NULL;
uintn failed = 0;
#ifdef H5Z_DEBUG
H5_timer_t timer;
#endif
FUNC_ENTER(H5Z_pipeline, FAIL);
assert(f);
assert(0==(flags & ~H5Z_FLAG_INVMASK));
assert(filter_mask);
assert(nbytes && *nbytes>0);
assert(buf_size && *buf_size>0);
assert(buf && *buf);
assert(!pline || pline->nfilters<32);
if (pline && (flags & H5Z_FLAG_REVERSE)) {
for (i=pline->nfilters; i>0; --i) {
idx = i-1;
if (*filter_mask & ((unsigned)1<<idx)) {
failed |= (unsigned)1 << idx;
continue;/*filter excluded*/
}
if (NULL==(fclass=H5Z_find(pline->filter[idx].id))) {
failed |= (unsigned)1 << idx;
HRETURN_ERROR(H5E_PLINE, H5E_READERROR, FAIL,
"required filter is not registered");
}
#ifdef H5Z_DEBUG
H5_timer_begin(&timer);
#endif
new_nbytes = (fclass->func)(flags|(pline->filter[idx].flags),
pline->filter[idx].cd_nelmts,
pline->filter[idx].cd_values,
*nbytes, buf_size, buf);
#ifdef H5Z_DEBUG
H5_timer_end(&(fclass->stats[1].timer), &timer);
fclass->stats[1].total += MAX(*nbytes, new_nbytes);
if (0==new_nbytes) fclass->stats[1].errors += *nbytes;
#endif
if (0==new_nbytes) {
failed |= (unsigned)1 << idx;
HRETURN_ERROR(H5E_PLINE, H5E_READERROR, FAIL,
"filter returned failure");
}
*nbytes = new_nbytes;
}
} else if (pline) {
for (idx=0; idx<pline->nfilters; idx++) {
if (*filter_mask & ((unsigned)1<<idx)) {
failed |= (unsigned)1 << idx;
continue; /*filter excluded*/
}
if (NULL==(fclass=H5Z_find(pline->filter[idx].id))) {
failed |= (unsigned)1 << idx;
if (pline->filter[idx].flags & H5Z_FLAG_OPTIONAL) {
H5E_clear();
continue;
} else {
HRETURN_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL,
"required filter is not registered");
}
}
#ifdef H5Z_DEBUG
H5_timer_begin(&timer);
#endif
new_nbytes = (fclass->func)(flags|(pline->filter[idx].flags),
pline->filter[idx].cd_nelmts,
pline->filter[idx].cd_values,
*nbytes, buf_size, buf);
#ifdef H5Z_DEBUG
H5_timer_end(&(fclass->stats[0].timer), &timer);
fclass->stats[0].total += MAX(*nbytes, new_nbytes);
if (0==new_nbytes) fclass->stats[0].errors += *nbytes;
#endif
if (0==new_nbytes) {
failed |= (unsigned)1 << idx;
if (0==(pline->filter[idx].flags & H5Z_FLAG_OPTIONAL)) {
HRETURN_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL,
"filter returned failure");
} else {
H5E_clear();
}
} else {
*nbytes = new_nbytes;
}
}
}
*filter_mask = failed;
FUNC_LEAVE(SUCCEED);
}
/*-------------------------------------------------------------------------
* Function: H5Z_filter_deflate
*
* Purpose:
*
* Return: Success:
*
* Failure:
*
* Programmer: Robb Matzke
* Thursday, April 16, 1998
@ -466,39 +519,99 @@ H5Z_zlib_c (unsigned int flags, size_t __unused__ cd_size,
*-------------------------------------------------------------------------
*/
static size_t
H5Z_zlib_u (unsigned int __unused__ flags, size_t __unused__ cd_size,
const void __unused__ *client_data, size_t src_nbytes,
const void *src, size_t dst_nbytes, void *dst/*out*/)
H5Z_filter_deflate (uintn flags, size_t cd_nelmts, const uintn cd_values[],
size_t nbytes, size_t *buf_size, void **buf)
{
size_t ret_value = 0;
#ifdef HAVE_ZLIB_H
const Bytef *z_src = (const Bytef*)src;
Bytef *z_dst = (Bytef*)dst;
uLongf z_dst_nbytes = (uLongf)dst_nbytes;
uLong z_src_nbytes = (uLong)src_nbytes;
int aggression = 6;
void *outbuf = NULL;
#if defined(HAVE_LIBZ) && defined(HAVE_ZLIB_H)
int status;
#endif
FUNC_ENTER (H5Z_zlib_u, 0);
FUNC_ENTER (H5Z_filter_deflate, 0);
/* Get aggression level */
if (cd_nelmts!=1 || cd_values[0]>9) {
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, 0,
"invalid deflate aggression level");
}
aggression = cd_values[0];
#if defined(HAVE_LIBZ) && defined (HAVE_ZLIB_H)
status = uncompress (z_dst, &z_dst_nbytes, z_src, z_src_nbytes);
if (Z_BUF_ERROR==status) {
HGOTO_ERROR (H5E_COMP, H5E_CANTINIT, 0,
"deflate destination buffer was too small");
} else if (Z_MEM_ERROR==status) {
HGOTO_ERROR (H5E_COMP, H5E_CANTINIT, 0, "deflate memory error");
} else if (Z_DATA_ERROR==status) {
HGOTO_ERROR (H5E_COMP, H5E_CANTINIT, 0, "deflate corrupted data");
} else if (Z_OK!=status) {
HGOTO_ERROR (H5E_COMP, H5E_CANTINIT, 0, "deflate error");
if (flags & H5Z_FLAG_REVERSE) {
/* Input; uncompress */
z_stream z_strm;
size_t nalloc = *buf_size;
outbuf = H5MM_malloc(nalloc);
HDmemset(&z_strm, 0, sizeof(z_strm));
z_strm.next_in = *buf;
z_strm.avail_in = nbytes;
z_strm.next_out = outbuf;
z_strm.avail_out = nalloc;
if (Z_OK!=inflateInit(&z_strm)) {
HGOTO_ERROR(H5E_PLINE, H5E_CANTINIT, 0, "inflateInit() failed");
}
while (1) {
status = inflate(&z_strm, Z_SYNC_FLUSH);
if (Z_STREAM_END==status) break; /*done*/
if (Z_OK!=status) {
inflateEnd(&z_strm);
HGOTO_ERROR(H5E_PLINE, H5E_CANTINIT, 0, "inflate() failed");
}
if (Z_OK==status && 0==z_strm.avail_out) {
nalloc *= 2;
outbuf = H5MM_realloc(outbuf, nalloc);
z_strm.next_out = (char*)outbuf + z_strm.total_out;
z_strm.avail_out = nalloc - z_strm.total_out;
}
}
H5MM_xfree(*buf);
*buf = outbuf;
outbuf = NULL;
*buf_size = nalloc;
ret_value = z_strm.total_out;
inflateEnd(&z_strm);
} else {
/*
* Output; compress but fail if the result would be larger than the
* input. The library doesn't provide in-place compression, so we
* must allocate a separate buffer for the result.
*/
const Bytef *z_src = (const Bytef*)(*buf);
Bytef *z_dst; /*destination buffer */
uLongf z_dst_nbytes = (uLongf)nbytes;
uLong z_src_nbytes = (uLong)nbytes;
if (NULL==(z_dst=outbuf=H5MM_malloc(nbytes))) {
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0,
"unable to allocate deflate destination buffer");
}
status = compress2 (z_dst, &z_dst_nbytes, z_src, z_src_nbytes,
aggression);
if (Z_BUF_ERROR==status) {
HGOTO_ERROR(H5E_PLINE, H5E_CANTINIT, 0, "overflow");
} else if (Z_MEM_ERROR==status) {
HGOTO_ERROR (H5E_PLINE, H5E_CANTINIT, 0, "deflate memory error");
} else if (Z_OK!=status) {
HGOTO_ERROR (H5E_PLINE, H5E_CANTINIT, 0, "deflate error");
} else {
H5MM_xfree(*buf);
*buf = outbuf;
outbuf = NULL;
*buf_size = nbytes;
ret_value = z_dst_nbytes;
}
}
ret_value = z_dst_nbytes;
#else
HGOTO_ERROR (H5E_COMP, H5E_UNSUPPORTED, 0,
HGOTO_ERROR (H5E_PLINE, H5E_UNSUPPORTED, 0,
"hdf5 was not compiled with zlib-1.0.2 or better");
#endif
done:
H5MM_xfree(outbuf);
FUNC_LEAVE (ret_value);
}

View File

@ -10,14 +10,13 @@
#include <H5Zpublic.h>
struct H5O_compress_t; /*forward decl*/
struct H5O_pline_t; /*forward decl*/
herr_t H5Z_register (H5Z_method_t method, const char *name,
H5Z_func_t compress, H5Z_func_t uncompress);
size_t H5Z_compress (const struct H5O_compress_t *compress, size_t nbytes,
const void *src, void *dst/*out*/);
size_t H5Z_uncompress (const struct H5O_compress_t *compress,
size_t src_nbytes, const void *src, size_t dst_nbytes,
void *dst/*out*/);
herr_t H5Z_register(H5Z_filter_t id, const char *comment, H5Z_func_t filter);
herr_t H5Z_append(struct H5O_pline_t *pline, H5Z_filter_t filter, uintn flags,
size_t cd_nelmts, const unsigned int cd_values[]);
herr_t H5Z_pipeline(H5F_t *f, const struct H5O_pline_t *pline, uintn flags,
uintn *filter_mask/*in,out*/, size_t *nbytes/*in,out*/,
size_t *buf_size/*in,out*/, void **buf/*in,out*/);
#endif

View File

@ -9,59 +9,50 @@
#define _H5Zpublic_H
/*
* Compression methods. Method zero means no compression. Methods 1 through
* 15 are defined by the library. Methods 16-255 are user-defined.
* Filter identifiers. Values 0 through 255 are for filters defined by the
* HDF5 library. Values 256 through 511 are available for testing new
* filters. Subsequent values should be obtained from the HDF5 development
* team at hdf5dev@ncsa.uiuc.edu. These values will never change because they
* appear in the HDF5 files.
*/
typedef int H5Z_method_t;
#define H5Z_NONE 0 /*no compression, must be zero */
#define H5Z_DEFLATE 1 /*deflation like gzip */
#define H5Z_RES_2 2 /*reserved for internal use */
#define H5Z_RES_3 3 /*reserved for internal use */
#define H5Z_RES_4 4 /*reserved for internal use */
#define H5Z_RES_5 5 /*reserved for internal use */
#define H5Z_RES_6 6 /*reserved for internal use */
#define H5Z_RES_7 7 /*reserved for internal use */
#define H5Z_RES_8 8 /*reserved for internal use */
#define H5Z_RES_9 9 /*reserved for internal use */
#define H5Z_RES_10 10 /*reserved for internal use */
#define H5Z_RES_11 11 /*reserved for internal use */
#define H5Z_RES_12 12 /*reserved for internal use */
#define H5Z_RES_13 13 /*reserved for internal use */
#define H5Z_RES_14 14 /*reserved for internal use */
#define H5Z_RES_15 15 /*reserved for internal use */
/* user-defined 16-255 */
#define H5Z_USERDEF_MIN 16 /*first user-defined method */
#define H5Z_USERDEF_MAX 255 /*last user-defined method */
typedef int H5Z_filter_t;
#define H5Z_FILTER_ERROR (-1) /*no filter */
#define H5Z_FILTER_NONE 0 /*reserved indefinitely */
#define H5Z_FILTER_DEFLATE 1 /*deflation like gzip */
#define H5Z_FILTER_MAX 65535 /*maximum filter id */
/* Flags for filter definition */
#define H5Z_FLAG_DEFMASK 0x00ff /*definition flag mask */
#define H5Z_FLAG_OPTIONAL 0x0001 /*filter is optional */
/* Additional flags for filter invocation */
#define H5Z_FLAG_INVMASK 0xff00 /*invocation flag mask */
#define H5Z_FLAG_REVERSE 0x0100 /*reverse direction; read */
/*
* A compression function takes some configuration data which comes from the
* compression message, namely FLAGS, CD_SIZE, and CLIENT_DATA. It should
* read SRC_NBYTES from SRC and compress them into at most DST_NBYTES of DST.
* If the compressed data would be larger than DST_NBYTES the function should
* return a value greater than or equal to DST_NBYTES. On failure the
* function may return zero.
* A filter gets definition flags and invocation flags (defined above), the
* client data array and size defined when the filter was added to the
* pipeline, the size in bytes of the data on which to operate, and pointers
* to a buffer and its allocated size.
*
* The uncompression function is the inverse of compression and takes the
* same arguments. The SRC_NBYTES argument is the number of compressed bytes
* in SRC. The function should uncompress SRC into DST. For redundancy,
* DST_NBYTES contains the size of the DST buffer although if the algorithm
* is operating properly and the file has not been corrupted the uncompressed
* data will never be larger than DST_NBYTES. The function should return the
* number of bytes in the DST buffer or zero on failure. Failure includes
* the overflow of the DST buffer.
* The filter should store the result in the supplied buffer if possible,
* otherwise it can allocate a new buffer, freeing the original. The
* allocated size of the new buffer should be returned through the BUF_SIZE
* pointer and the new buffer through the BUF pointer.
*
* The return value from the filter is the number of bytes in the output
* buffer. If an error occurs then the function should return zero and leave
* all pointer arguments unchanged.
*/
typedef size_t (*H5Z_func_t)(unsigned int flags, size_t cd_size,
const void *client_data, size_t src_nbytes,
const void *src, size_t dst_nbytes,
void *dst/*out*/);
typedef size_t (*H5Z_func_t)(unsigned int flags, size_t cd_nelmts,
const unsigned int cd_values[], size_t nbytes,
size_t *buf_size, void **buf);
#ifdef __cplusplus
extern "C" {
#endif
herr_t H5Zregister (H5Z_method_t method, const char *name, H5Z_func_t compress,
H5Z_func_t uncompress);
herr_t H5Zregister(H5Z_filter_t id, const char *comment, H5Z_func_t filter);
#ifdef __cplusplus
}

View File

@ -83,6 +83,9 @@
/* Define if you have the BSDgettimeofday function. */
#undef HAVE_BSDGETTIMEOFDAY
/* Define if you have the difftime function. */
#undef HAVE_DIFFTIME
/* Define if you have the fork function. */
#undef HAVE_FORK

View File

@ -215,6 +215,7 @@ typedef struct {
void H5_timer_reset (H5_timer_t *timer);
void H5_timer_begin (H5_timer_t *timer);
void H5_timer_end (H5_timer_t *sum/*in,out*/, H5_timer_t *timer/*in,out*/);
void H5_bandwidth(char *buf/*out*/, double nbytes, double nseconds);
/*
* Redefine all the POSIX functions. We should never see a POSIX
@ -254,7 +255,11 @@ void H5_timer_end (H5_timer_t *sum/*in,out*/, H5_timer_t *timer/*in,out*/);
#define HDctermid(S) ctermid(S)
#define HDctime(T) ctime(T)
#define HDcuserid(S) cuserid(S)
#ifdef HAVE_DIFFTIME
#define HDdifftime(X,Y) difftime(X,Y)
#else
#define HDdifftime(X,Y) ((double)(X)-(double)(Y))
#endif
#define HDdiv(X,Y) div(X,Y)
#define HDdup(F) dup(F)
#define HDdup2(F,I) dup2(F,I)

View File

@ -35,6 +35,6 @@
#include <H5Ppublic.h> /* Property lists */
#include <H5Spublic.h> /* Dataspaces */
#include <H5Tpublic.h> /* Datatypes */
#include <H5Zpublic.h> /* Data compression */
#include <H5Zpublic.h> /* Data filters */
#endif

View File

@ -492,4 +492,5 @@ mtime.o: \
../src/H5Ppublic.h \
../src/H5Zpublic.h \
../src/H5Spublic.h \
../src/H5Tpublic.h
../src/H5Tpublic.h \
../src/H5private.h

View File

@ -40,7 +40,7 @@
#define DSET_COMPRESS_NAME "compressed"
#define DSET_BOGUS_NAME "bogus"
#define H5Z_BOGUS 255
#define H5Z_BOGUS 305
/*-------------------------------------------------------------------------
@ -354,9 +354,9 @@ test_tconv(hid_t file)
/*-------------------------------------------------------------------------
* Function: bogus
*
* Purpose: A bogus compression method.
* Purpose: A bogus compression method that doesn't do anything.
*
* Return: Success: SRC_NBYTES, see compression documentation.
* Return: Success: Data chunk size
*
* Failure: 0
*
@ -368,12 +368,11 @@ test_tconv(hid_t file)
*-------------------------------------------------------------------------
*/
static size_t
bogus (unsigned int __unused__ flags, size_t __unused__ cd_size,
const void __unused__ *client_data, size_t src_nbytes, const void *src,
size_t __unused__ dst_nbytes, void *dst/*out*/)
bogus(unsigned int __unused__ flags, size_t __unused__ cd_nelmts,
const unsigned int __unused__ cd_values[], size_t nbytes,
size_t __unused__ *buf_size, void __unused__ **buf)
{
memcpy (dst, src, src_nbytes);
return src_nbytes;
return nbytes;
}
@ -586,7 +585,7 @@ test_compression(hid_t file)
points[hs_offset[0]+i][hs_offset[1]+j] = rand ();
}
}
H5Sselect_hyperslab (space, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL);
H5Sselect_hyperslab(space, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL);
status = H5Dwrite (dataset, H5T_NATIVE_INT, space, space, xfer, points);
if (status<0) goto error;
@ -603,8 +602,10 @@ test_compression(hid_t file)
printf(" At index %lu,%lu\n",
(unsigned long)(hs_offset[0]+i),
(unsigned long)(hs_offset[1]+j));
printf(" At original: %d\n", (int)points[hs_offset[0]+i][hs_offset[1]+j]);
printf(" At returned: %d\n", (int)check[hs_offset[0]+i][hs_offset[1]+j]);
printf(" At original: %d\n",
(int)points[hs_offset[0]+i][hs_offset[1]+j]);
printf(" At returned: %d\n",
(int)check[hs_offset[0]+i][hs_offset[1]+j]);
goto error;
}
}
@ -619,11 +620,11 @@ test_compression(hid_t file)
printf ("%-70s", "Testing compression (app-defined method)");
fflush (stdout);
H5Zregister (H5Z_BOGUS, "bogus", bogus, bogus);
H5Pset_compression (dc, H5Z_BOGUS, 0, 0, NULL);
H5Dclose (dataset);
H5Sclose (space);
space = H5Screate_simple (2, size, NULL);
if (H5Zregister (H5Z_BOGUS, "bogus", bogus)<0) goto error;
if (H5Pset_filter (dc, H5Z_BOGUS, 0, 0, NULL)<0) goto error;
if (H5Dclose (dataset)<0) goto error;
if (H5Sclose (space)<0) goto error;
if ((space = H5Screate_simple (2, size, NULL))<0) goto error;
dataset = H5Dcreate (file, DSET_BOGUS_NAME, H5T_NATIVE_INT, space, dc);
assert (dataset>=0);

View File

@ -20,7 +20,7 @@
#define TRUE 1
#define FILE_NAME_1 "mtime.h5"
#include <H5config.h>
#include <H5private.h>
#ifndef HAVE_ATTRIBUTE
# undef __attribute__
# define __attribute__(X) /*void*/
@ -130,7 +130,7 @@ main(void)
puts(" Modification times will be mantained in the file bug cannot");
puts(" be queried on this system. See H5O_mtime_decode().");
return 1;
} else if (fabs(difftime(now, sb.mtime))>60.0) {
} else if (fabs(HDdifftime(now, sb.mtime))>60.0) {
puts("*FAILED*");
tm = localtime(&(sb.mtime));
strftime(buf1, sizeof buf1, "%Y-%m-%d %H:%M:%S", tm);

View File

@ -175,7 +175,7 @@ list (hid_t group, const char *name, void __unused__ *op_data)
hid_t obj;
hid_t (*func)(void*);
void *edata;
int i;
int i, nf;
char buf[512], comment[50];
H5G_stat_t statbuf;
struct tm *tm;
@ -204,6 +204,30 @@ list (hid_t group, const char *name, void __unused__ *op_data)
printf ("}\n");
H5Dclose (space);
H5Aiterate (obj, NULL, list_attr, NULL);
/* Print additional information about datasets */
if (verbose_g>0) {
hid_t dcpl = H5Dget_create_plist(obj);
if ((nf = H5Pget_nfilters(dcpl))>0) {
for (i=0; i<nf; i++) {
unsigned filt_flags;
H5Z_filter_t filt_id;
unsigned cd_values[20];
size_t cd_nelmts = NELMTS(cd_values);
size_t cd_num;
filt_id = H5Pget_filter(dcpl, i, &filt_flags, &cd_nelmts,
cd_values);
sprintf(comment, "Filter-%d:", i);
printf(" %-10s %u %s {", comment, (unsigned)filt_id,
filt_flags & H5Z_FLAG_OPTIONAL?"OPT":"");
for (cd_num=0; cd_num<cd_nelmts; cd_num++) {
printf("%s%u", cd_num?", ":"", cd_values[cd_num]);
}
printf("}\n");
}
}
H5Pclose(dcpl);
}
H5Dclose (obj);
} else if ((obj=H5Gopen (group, name))>=0) {
printf ("Group\n");
@ -240,7 +264,7 @@ list (hid_t group, const char *name, void __unused__ *op_data)
if (comment[0]) printf(" %-10s %s\n", "Comment:", comment);
}
if (dump_g && (obj=H5Dopen(group, name))) {
if (dump_g && (obj=H5Dopen(group, name))>=0) {
/* Turn on error reporting before dumping the data */
H5Eset_auto(func, edata);
dump_dataset_values(obj);

View File

@ -293,8 +293,10 @@ h5dump_simple(FILE *stream, const h5dump_t *info, hid_t dset, hid_t p_type)
hs_size, NULL);
H5Sselect_hyperslab(sm_space, H5S_SELECT_SET, zero, NULL,
&hs_nelmts, NULL);
H5Dread(dset, p_type, sm_space, f_space, H5P_DEFAULT, sm_buf);
if (H5Dread(dset, p_type, sm_space, f_space, H5P_DEFAULT, sm_buf)<0) {
return -1;
}
/* Print the data */
for (i=0; i<hs_nelmts; i++) {
/* Render the element */
@ -375,7 +377,7 @@ h5dump_fixtype(hid_t f_type)
hid_t m_type=-1, f_memb;
hid_t *memb=NULL;
char **name=NULL;
int nmembs, i, j, *ndims=NULL;
int nmembs=0, i, j, *ndims=NULL;
size_t size, offset, *dims=NULL, nelmts;
size = H5Tget_size(f_type);