mirror of
https://github.com/HDFGroup/hdf5.git
synced 2025-03-19 16:50:46 +08:00
[svn-r9329]
Purpose: Feature Description: Datatypes and groups now use H5FO "file object" code that was previously only used by datasets. These objects will hold a file open if the file is closed but they have not yet been closed. If these objects are unlinked then relinked, they will not be destroyed. If they are opened twice (even by two different names), both IDs will "see" changes made to the object using the other ID. When an object is opened using two different names (e.g., if a dataset was opened under one name, then mounted and opened under its new name), calling H5Iget_name() on a given hid_t will return the name used to open that hid_t, not the current name of the object (this is a feature, and a change from the previous behavior of datasets). Solution: Used H5FO code that was already in place for datasets. Broke H5D_t's, H5T_t's, and H5G_t's into a "shared" struct and a private struct. The shared structs (H5D_shared_t, etc.) hold the object's information and are used by all IDs that point to a given object in the file. The private structs are pointed to by the hid_t and contain the object's group entry information (including its name) and a pointer to the shared struct for that object. This changed the naming of structs throughout the library (e.g., datatype->size is now datatype->shared->size). I added an updated H5Tinit.c to windows.zip. Platforms tested: Visual Studio 7, sleipnir, arabica, verbena Misc. update:
This commit is contained in:
parent
a841ea3529
commit
5c0011a713
@ -69,7 +69,7 @@ H5D_compact_readvv(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, const H5D_t *dset,
|
||||
assert(dset);
|
||||
|
||||
/* Use the vectorized memory copy routine to do actual work */
|
||||
if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,dset->layout.u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0)
|
||||
if((ret_value=H5V_memcpyvv(buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr,dset->shared->layout.u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr))<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed");
|
||||
|
||||
done:
|
||||
@ -113,10 +113,10 @@ H5D_compact_writevv(H5F_t UNUSED *f, hid_t UNUSED dxpl_id, H5D_t *dset,
|
||||
assert(dset);
|
||||
|
||||
/* Use the vectorized memory copy routine to do actual work */
|
||||
if((ret_value=H5V_memcpyvv(dset->layout.u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0)
|
||||
if((ret_value=H5V_memcpyvv(dset->shared->layout.u.compact.buf,dset_max_nseq,dset_curr_seq,dset_size_arr,dset_offset_arr,buf,mem_max_nseq,mem_curr_seq,mem_size_arr,mem_offset_arr))<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vectorized memcpy failed");
|
||||
|
||||
dset->layout.u.compact.dirty = TRUE;
|
||||
dset->shared->layout.u.compact.dirty = TRUE;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value);
|
||||
|
168
src/H5Dcontig.c
168
src/H5Dcontig.c
@ -80,7 +80,7 @@ H5D_contig_create(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
|
||||
assert(dset);
|
||||
|
||||
/* Allocate space for the contiguous data */
|
||||
if (HADDR_UNDEF==(dset->layout.u.contig.addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, dset->layout.u.contig.size)))
|
||||
if (HADDR_UNDEF==(dset->shared->layout.u.contig.addr=H5MF_alloc(f, H5FD_MEM_DRAW, dxpl_id, dset->shared->layout.u.contig.size)))
|
||||
HGOTO_ERROR (H5E_IO, H5E_NOSPACE, FAIL, "unable to reserve file space");
|
||||
|
||||
done:
|
||||
@ -131,10 +131,10 @@ H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
|
||||
/* Check args */
|
||||
assert(f);
|
||||
assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
|
||||
assert(dset && H5D_CONTIGUOUS==dset->layout.type);
|
||||
assert(H5F_addr_defined(dset->layout.u.contig.addr));
|
||||
assert(dset->layout.u.contig.size>0);
|
||||
assert(dset->space);
|
||||
assert(dset && H5D_CONTIGUOUS==dset->shared->layout.type);
|
||||
assert(H5F_addr_defined(dset->shared->layout.u.contig.addr));
|
||||
assert(dset->shared->layout.u.contig.size>0);
|
||||
assert(dset->shared->space);
|
||||
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
/* Retrieve MPI parameters */
|
||||
@ -153,17 +153,17 @@ H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
|
||||
/* Get size of elements */
|
||||
elmt_size=H5T_get_size(dset->type);
|
||||
elmt_size=H5T_get_size(dset->shared->type);
|
||||
assert(elmt_size>0);
|
||||
|
||||
/* Get the number of elements in the dataset's dataspace */
|
||||
snpoints = H5S_GET_EXTENT_NPOINTS(dset->space);
|
||||
snpoints = H5S_GET_EXTENT_NPOINTS(dset->shared->space);
|
||||
assert(snpoints>=0);
|
||||
H5_ASSIGN_OVERFLOW(npoints,snpoints,hssize_t,size_t);
|
||||
|
||||
/* If fill value is not library default, use it to set the element size */
|
||||
if(dset->fill.buf)
|
||||
elmt_size=dset->fill.size;
|
||||
if(dset->shared->fill.buf)
|
||||
elmt_size=dset->shared->fill.size;
|
||||
|
||||
/*
|
||||
* Fill the entire current extent with the fill value. We can do
|
||||
@ -174,12 +174,12 @@ H5D_contig_fill(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
|
||||
bufsize = ptsperbuf*elmt_size;
|
||||
|
||||
/* Fill the buffer with the user's fill value */
|
||||
if(dset->fill.buf) {
|
||||
if(dset->shared->fill.buf) {
|
||||
/* Allocate temporary buffer */
|
||||
if ((buf=H5FL_BLK_MALLOC(non_zero_fill,bufsize))==NULL)
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for fill buffer");
|
||||
|
||||
H5V_array_fill(buf, dset->fill.buf, elmt_size, ptsperbuf);
|
||||
H5V_array_fill(buf, dset->shared->fill.buf, elmt_size, ptsperbuf);
|
||||
|
||||
/* Indicate that a non-zero fill buffer was used */
|
||||
non_zero_fill_f=1;
|
||||
@ -318,9 +318,9 @@ H5D_contig_get_addr(const H5D_t *dset)
|
||||
|
||||
/* check args */
|
||||
assert(dset);
|
||||
assert(dset->layout.type==H5D_CONTIGUOUS);
|
||||
assert(dset->shared->layout.type==H5D_CONTIGUOUS);
|
||||
|
||||
FUNC_LEAVE_NOAPI(dset->layout.u.contig.addr);
|
||||
FUNC_LEAVE_NOAPI(dset->shared->layout.u.contig.addr);
|
||||
} /* end H5D_contig_get_addr */
|
||||
|
||||
|
||||
@ -359,7 +359,7 @@ H5D_contig_write(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
assert (dset);
|
||||
assert (buf);
|
||||
|
||||
if (H5D_contig_writevv(f, dxpl_id, dset, dset->layout.u.contig.addr, dset->layout.u.contig.size,
|
||||
if (H5D_contig_writevv(f, dxpl_id, dset, dset->shared->layout.u.contig.addr, dset->shared->layout.u.contig.size,
|
||||
1, &dset_curr_seq, &dset_len, &dset_off, 1, &mem_curr_seq, &mem_len, &mem_off, buf)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "vector write failed");
|
||||
|
||||
@ -423,9 +423,9 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
v=*mem_curr_seq;
|
||||
|
||||
/* Stash local copies of these value */
|
||||
if(dset->cache.contig.sieve_buf!=NULL) {
|
||||
sieve_start=dset->cache.contig.sieve_loc;
|
||||
sieve_size=dset->cache.contig.sieve_size;
|
||||
if(dset->shared->cache.contig.sieve_buf!=NULL) {
|
||||
sieve_start=dset->shared->cache.contig.sieve_loc;
|
||||
sieve_size=dset->shared->cache.contig.sieve_size;
|
||||
sieve_end=sieve_start+sieve_size;
|
||||
} /* end if */
|
||||
|
||||
@ -444,19 +444,19 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
buf = (unsigned char *)_buf + mem_offset_arr[v];
|
||||
|
||||
/* Check if the sieve buffer is allocated yet */
|
||||
if(dset->cache.contig.sieve_buf==NULL) {
|
||||
if(dset->shared->cache.contig.sieve_buf==NULL) {
|
||||
/* Check if we can actually hold the I/O request in the sieve buffer */
|
||||
if(size>dset->cache.contig.sieve_buf_size) {
|
||||
if(size>dset->shared->cache.contig.sieve_buf_size) {
|
||||
if (H5F_block_read(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed");
|
||||
} /* end if */
|
||||
else {
|
||||
/* Allocate room for the data sieve buffer */
|
||||
if (NULL==(dset->cache.contig.sieve_buf=H5FL_BLK_MALLOC(sieve_buf,dset->cache.contig.sieve_buf_size)))
|
||||
if (NULL==(dset->shared->cache.contig.sieve_buf=H5FL_BLK_MALLOC(sieve_buf,dset->shared->cache.contig.sieve_buf_size)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed");
|
||||
|
||||
/* Determine the new sieve buffer size & location */
|
||||
dset->cache.contig.sieve_loc=addr;
|
||||
dset->shared->cache.contig.sieve_loc=addr;
|
||||
|
||||
/* Make certain we don't read off the end of the file */
|
||||
if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(f)))
|
||||
@ -469,21 +469,21 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
max_data=dset_size-dset_offset_arr[u];
|
||||
|
||||
/* Compute the size of the sieve buffer */
|
||||
H5_ASSIGN_OVERFLOW(dset->cache.contig.sieve_size,MIN3(rel_eoa-dset->cache.contig.sieve_loc,max_data,dset->cache.contig.sieve_buf_size),hsize_t,size_t);
|
||||
H5_ASSIGN_OVERFLOW(dset->shared->cache.contig.sieve_size,MIN3(rel_eoa-dset->shared->cache.contig.sieve_loc,max_data,dset->shared->cache.contig.sieve_buf_size),hsize_t,size_t);
|
||||
|
||||
/* Read the new sieve buffer */
|
||||
if (H5F_block_read(f, H5FD_MEM_DRAW, dset->cache.contig.sieve_loc, dset->cache.contig.sieve_size, dxpl_id, dset->cache.contig.sieve_buf)<0)
|
||||
if (H5F_block_read(f, H5FD_MEM_DRAW, dset->shared->cache.contig.sieve_loc, dset->shared->cache.contig.sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed");
|
||||
|
||||
/* Grab the data out of the buffer (must be first piece of data in buffer ) */
|
||||
HDmemcpy(buf,dset->cache.contig.sieve_buf,size);
|
||||
HDmemcpy(buf,dset->shared->cache.contig.sieve_buf,size);
|
||||
|
||||
/* Reset sieve buffer dirty flag */
|
||||
dset->cache.contig.sieve_dirty=0;
|
||||
dset->shared->cache.contig.sieve_dirty=0;
|
||||
|
||||
/* Stash local copies of these value */
|
||||
sieve_start=dset->cache.contig.sieve_loc;
|
||||
sieve_size=dset->cache.contig.sieve_size;
|
||||
sieve_start=dset->shared->cache.contig.sieve_loc;
|
||||
sieve_size=dset->shared->cache.contig.sieve_size;
|
||||
sieve_end=sieve_start+sieve_size;
|
||||
} /* end else */
|
||||
} /* end if */
|
||||
@ -493,7 +493,7 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
|
||||
/* If entire read is within the sieve buffer, read it from the buffer */
|
||||
if(addr>=sieve_start && contig_end<sieve_end) {
|
||||
unsigned char *base_sieve_buf=dset->cache.contig.sieve_buf+(addr-sieve_start);
|
||||
unsigned char *base_sieve_buf=dset->shared->cache.contig.sieve_buf+(addr-sieve_start);
|
||||
|
||||
/* Grab the data out of the buffer */
|
||||
HDmemcpy(buf,base_sieve_buf,size);
|
||||
@ -501,18 +501,18 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
/* Entire request is not within this data sieve buffer */
|
||||
else {
|
||||
/* Check if we can actually hold the I/O request in the sieve buffer */
|
||||
if(size>dset->cache.contig.sieve_buf_size) {
|
||||
if(size>dset->shared->cache.contig.sieve_buf_size) {
|
||||
/* Check for any overlap with the current sieve buffer */
|
||||
if((sieve_start>=addr && sieve_start<(contig_end+1))
|
||||
|| ((sieve_end-1)>=addr && (sieve_end-1)<(contig_end+1))) {
|
||||
/* Flush the sieve buffer, if it's dirty */
|
||||
if(dset->cache.contig.sieve_dirty) {
|
||||
if(dset->shared->cache.contig.sieve_dirty) {
|
||||
/* Write to file */
|
||||
if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->cache.contig.sieve_buf)<0)
|
||||
if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
|
||||
|
||||
/* Reset sieve buffer dirty flag */
|
||||
dset->cache.contig.sieve_dirty=0;
|
||||
dset->shared->cache.contig.sieve_dirty=0;
|
||||
} /* end if */
|
||||
} /* end if */
|
||||
|
||||
@ -523,17 +523,17 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
/* Element size fits within the buffer size */
|
||||
else {
|
||||
/* Flush the sieve buffer if it's dirty */
|
||||
if(dset->cache.contig.sieve_dirty) {
|
||||
if(dset->shared->cache.contig.sieve_dirty) {
|
||||
/* Write to file */
|
||||
if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->cache.contig.sieve_buf)<0)
|
||||
if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
|
||||
|
||||
/* Reset sieve buffer dirty flag */
|
||||
dset->cache.contig.sieve_dirty=0;
|
||||
dset->shared->cache.contig.sieve_dirty=0;
|
||||
} /* end if */
|
||||
|
||||
/* Determine the new sieve buffer size & location */
|
||||
dset->cache.contig.sieve_loc=addr;
|
||||
dset->shared->cache.contig.sieve_loc=addr;
|
||||
|
||||
/* Make certain we don't read off the end of the file */
|
||||
if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(f)))
|
||||
@ -547,22 +547,22 @@ H5D_contig_readvv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
|
||||
/* Compute the size of the sieve buffer */
|
||||
/* Don't read off the end of the file, don't read past the end of the data element and don't read more than the buffer size */
|
||||
H5_ASSIGN_OVERFLOW(dset->cache.contig.sieve_size,MIN3(rel_eoa-dset->cache.contig.sieve_loc,max_data,dset->cache.contig.sieve_buf_size),hsize_t,size_t);
|
||||
H5_ASSIGN_OVERFLOW(dset->shared->cache.contig.sieve_size,MIN3(rel_eoa-dset->shared->cache.contig.sieve_loc,max_data,dset->shared->cache.contig.sieve_buf_size),hsize_t,size_t);
|
||||
|
||||
/* Update local copies of sieve information */
|
||||
sieve_start=dset->cache.contig.sieve_loc;
|
||||
sieve_size=dset->cache.contig.sieve_size;
|
||||
sieve_start=dset->shared->cache.contig.sieve_loc;
|
||||
sieve_size=dset->shared->cache.contig.sieve_size;
|
||||
sieve_end=sieve_start+sieve_size;
|
||||
|
||||
/* Read the new sieve buffer */
|
||||
if (H5F_block_read(f, H5FD_MEM_DRAW, dset->cache.contig.sieve_loc, dset->cache.contig.sieve_size, dxpl_id, dset->cache.contig.sieve_buf)<0)
|
||||
if (H5F_block_read(f, H5FD_MEM_DRAW, dset->shared->cache.contig.sieve_loc, dset->shared->cache.contig.sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed");
|
||||
|
||||
/* Grab the data out of the buffer (must be first piece of data in buffer ) */
|
||||
HDmemcpy(buf,dset->cache.contig.sieve_buf,size);
|
||||
HDmemcpy(buf,dset->shared->cache.contig.sieve_buf,size);
|
||||
|
||||
/* Reset sieve buffer dirty flag */
|
||||
dset->cache.contig.sieve_dirty=0;
|
||||
dset->shared->cache.contig.sieve_dirty=0;
|
||||
} /* end else */
|
||||
} /* end else */
|
||||
} /* end else */
|
||||
@ -683,9 +683,9 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
v=*mem_curr_seq;
|
||||
|
||||
/* Stash local copies of these values */
|
||||
if(dset->cache.contig.sieve_buf!=NULL) {
|
||||
sieve_start=dset->cache.contig.sieve_loc;
|
||||
sieve_size=dset->cache.contig.sieve_size;
|
||||
if(dset->shared->cache.contig.sieve_buf!=NULL) {
|
||||
sieve_start=dset->shared->cache.contig.sieve_loc;
|
||||
sieve_size=dset->shared->cache.contig.sieve_size;
|
||||
sieve_end=sieve_start+sieve_size;
|
||||
} /* end if */
|
||||
|
||||
@ -704,19 +704,19 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
buf = (const unsigned char *)_buf + mem_offset_arr[v];
|
||||
|
||||
/* No data sieve buffer yet, go allocate one */
|
||||
if(dset->cache.contig.sieve_buf==NULL) {
|
||||
if(dset->shared->cache.contig.sieve_buf==NULL) {
|
||||
/* Check if we can actually hold the I/O request in the sieve buffer */
|
||||
if(size>dset->cache.contig.sieve_buf_size) {
|
||||
if(size>dset->shared->cache.contig.sieve_buf_size) {
|
||||
if (H5F_block_write(f, H5FD_MEM_DRAW, addr, size, dxpl_id, buf)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
|
||||
} /* end if */
|
||||
else {
|
||||
/* Allocate room for the data sieve buffer */
|
||||
if (NULL==(dset->cache.contig.sieve_buf=H5FL_BLK_MALLOC(sieve_buf,dset->cache.contig.sieve_buf_size)))
|
||||
if (NULL==(dset->shared->cache.contig.sieve_buf=H5FL_BLK_MALLOC(sieve_buf,dset->shared->cache.contig.sieve_buf_size)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed");
|
||||
|
||||
/* Determine the new sieve buffer size & location */
|
||||
dset->cache.contig.sieve_loc=addr;
|
||||
dset->shared->cache.contig.sieve_loc=addr;
|
||||
|
||||
/* Make certain we don't read off the end of the file */
|
||||
if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(f)))
|
||||
@ -729,24 +729,24 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
max_data=dset_size-dset_offset_arr[u];
|
||||
|
||||
/* Compute the size of the sieve buffer */
|
||||
H5_ASSIGN_OVERFLOW(dset->cache.contig.sieve_size,MIN3(rel_eoa-dset->cache.contig.sieve_loc,max_data,dset->cache.contig.sieve_buf_size),hsize_t,size_t);
|
||||
H5_ASSIGN_OVERFLOW(dset->shared->cache.contig.sieve_size,MIN3(rel_eoa-dset->shared->cache.contig.sieve_loc,max_data,dset->shared->cache.contig.sieve_buf_size),hsize_t,size_t);
|
||||
|
||||
/* Check if there is any point in reading the data from the file */
|
||||
if(dset->cache.contig.sieve_size>size) {
|
||||
if(dset->shared->cache.contig.sieve_size>size) {
|
||||
/* Read the new sieve buffer */
|
||||
if (H5F_block_read(f, H5FD_MEM_DRAW, dset->cache.contig.sieve_loc, dset->cache.contig.sieve_size, dxpl_id, dset->cache.contig.sieve_buf)<0)
|
||||
if (H5F_block_read(f, H5FD_MEM_DRAW, dset->shared->cache.contig.sieve_loc, dset->shared->cache.contig.sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed");
|
||||
} /* end if */
|
||||
|
||||
/* Grab the data out of the buffer (must be first piece of data in buffer ) */
|
||||
HDmemcpy(dset->cache.contig.sieve_buf,buf,size);
|
||||
HDmemcpy(dset->shared->cache.contig.sieve_buf,buf,size);
|
||||
|
||||
/* Set sieve buffer dirty flag */
|
||||
dset->cache.contig.sieve_dirty=1;
|
||||
dset->shared->cache.contig.sieve_dirty=1;
|
||||
|
||||
/* Stash local copies of these values */
|
||||
sieve_start=dset->cache.contig.sieve_loc;
|
||||
sieve_size=dset->cache.contig.sieve_size;
|
||||
sieve_start=dset->shared->cache.contig.sieve_loc;
|
||||
sieve_size=dset->shared->cache.contig.sieve_size;
|
||||
sieve_end=sieve_start+sieve_size;
|
||||
} /* end else */
|
||||
} /* end if */
|
||||
@ -756,35 +756,35 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
|
||||
/* If entire write is within the sieve buffer, write it to the buffer */
|
||||
if(addr>=sieve_start && contig_end<sieve_end) {
|
||||
unsigned char *base_sieve_buf=dset->cache.contig.sieve_buf+(addr-sieve_start);
|
||||
unsigned char *base_sieve_buf=dset->shared->cache.contig.sieve_buf+(addr-sieve_start);
|
||||
|
||||
/* Put the data into the sieve buffer */
|
||||
HDmemcpy(base_sieve_buf,buf,size);
|
||||
|
||||
/* Set sieve buffer dirty flag */
|
||||
dset->cache.contig.sieve_dirty=1;
|
||||
dset->shared->cache.contig.sieve_dirty=1;
|
||||
|
||||
} /* end if */
|
||||
/* Entire request is not within this data sieve buffer */
|
||||
else {
|
||||
/* Check if we can actually hold the I/O request in the sieve buffer */
|
||||
if(size>dset->cache.contig.sieve_buf_size) {
|
||||
if(size>dset->shared->cache.contig.sieve_buf_size) {
|
||||
/* Check for any overlap with the current sieve buffer */
|
||||
if((sieve_start>=addr && sieve_start<(contig_end+1))
|
||||
|| ((sieve_end-1)>=addr && (sieve_end-1)<(contig_end+1))) {
|
||||
/* Flush the sieve buffer, if it's dirty */
|
||||
if(dset->cache.contig.sieve_dirty) {
|
||||
if(dset->shared->cache.contig.sieve_dirty) {
|
||||
/* Write to file */
|
||||
if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->cache.contig.sieve_buf)<0)
|
||||
if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
|
||||
|
||||
/* Reset sieve buffer dirty flag */
|
||||
dset->cache.contig.sieve_dirty=0;
|
||||
dset->shared->cache.contig.sieve_dirty=0;
|
||||
} /* end if */
|
||||
|
||||
/* Force the sieve buffer to be re-read the next time */
|
||||
dset->cache.contig.sieve_loc=HADDR_UNDEF;
|
||||
dset->cache.contig.sieve_size=0;
|
||||
dset->shared->cache.contig.sieve_loc=HADDR_UNDEF;
|
||||
dset->shared->cache.contig.sieve_size=0;
|
||||
} /* end if */
|
||||
|
||||
/* Write directly from the user's buffer */
|
||||
@ -795,49 +795,49 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
else {
|
||||
/* Check if it is possible to (exactly) prepend or append to existing (dirty) sieve buffer */
|
||||
if(((addr+size)==sieve_start || addr==sieve_end) &&
|
||||
(size+sieve_size)<=dset->cache.contig.sieve_buf_size &&
|
||||
dset->cache.contig.sieve_dirty) {
|
||||
(size+sieve_size)<=dset->shared->cache.contig.sieve_buf_size &&
|
||||
dset->shared->cache.contig.sieve_dirty) {
|
||||
/* Prepend to existing sieve buffer */
|
||||
if((addr+size)==sieve_start) {
|
||||
/* Move existing sieve information to correct location */
|
||||
HDmemmove(dset->cache.contig.sieve_buf+size,dset->cache.contig.sieve_buf,sieve_size);
|
||||
HDmemmove(dset->shared->cache.contig.sieve_buf+size,dset->shared->cache.contig.sieve_buf,sieve_size);
|
||||
|
||||
/* Copy in new information (must be first in sieve buffer) */
|
||||
HDmemcpy(dset->cache.contig.sieve_buf,buf,size);
|
||||
HDmemcpy(dset->shared->cache.contig.sieve_buf,buf,size);
|
||||
|
||||
/* Adjust sieve location */
|
||||
dset->cache.contig.sieve_loc=addr;
|
||||
dset->shared->cache.contig.sieve_loc=addr;
|
||||
|
||||
} /* end if */
|
||||
/* Append to existing sieve buffer */
|
||||
else {
|
||||
/* Copy in new information */
|
||||
HDmemcpy(dset->cache.contig.sieve_buf+sieve_size,buf,size);
|
||||
HDmemcpy(dset->shared->cache.contig.sieve_buf+sieve_size,buf,size);
|
||||
} /* end else */
|
||||
|
||||
/* Adjust sieve size */
|
||||
dset->cache.contig.sieve_size += size;
|
||||
dset->shared->cache.contig.sieve_size += size;
|
||||
|
||||
/* Update local copies of sieve information */
|
||||
sieve_start=dset->cache.contig.sieve_loc;
|
||||
sieve_size=dset->cache.contig.sieve_size;
|
||||
sieve_start=dset->shared->cache.contig.sieve_loc;
|
||||
sieve_size=dset->shared->cache.contig.sieve_size;
|
||||
sieve_end=sieve_start+sieve_size;
|
||||
|
||||
} /* end if */
|
||||
/* Can't add the new data onto the existing sieve buffer */
|
||||
else {
|
||||
/* Flush the sieve buffer if it's dirty */
|
||||
if(dset->cache.contig.sieve_dirty) {
|
||||
if(dset->shared->cache.contig.sieve_dirty) {
|
||||
/* Write to file */
|
||||
if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->cache.contig.sieve_buf)<0)
|
||||
if (H5F_block_write(f, H5FD_MEM_DRAW, sieve_start, sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "block write failed");
|
||||
|
||||
/* Reset sieve buffer dirty flag */
|
||||
dset->cache.contig.sieve_dirty=0;
|
||||
dset->shared->cache.contig.sieve_dirty=0;
|
||||
} /* end if */
|
||||
|
||||
/* Determine the new sieve buffer size & location */
|
||||
dset->cache.contig.sieve_loc=addr;
|
||||
dset->shared->cache.contig.sieve_loc=addr;
|
||||
|
||||
/* Make certain we don't read off the end of the file */
|
||||
if (HADDR_UNDEF==(abs_eoa=H5F_get_eoa(f)))
|
||||
@ -851,25 +851,25 @@ H5D_contig_writevv(H5F_t *f, hid_t dxpl_id, H5D_t *dset,
|
||||
|
||||
/* Compute the size of the sieve buffer */
|
||||
/* Don't read off the end of the file, don't read past the end of the data element and don't read more than the buffer size */
|
||||
H5_ASSIGN_OVERFLOW(dset->cache.contig.sieve_size,MIN3(rel_eoa-dset->cache.contig.sieve_loc,max_data,dset->cache.contig.sieve_buf_size),hsize_t,size_t);
|
||||
H5_ASSIGN_OVERFLOW(dset->shared->cache.contig.sieve_size,MIN3(rel_eoa-dset->shared->cache.contig.sieve_loc,max_data,dset->shared->cache.contig.sieve_buf_size),hsize_t,size_t);
|
||||
|
||||
/* Update local copies of sieve information */
|
||||
sieve_start=dset->cache.contig.sieve_loc;
|
||||
sieve_size=dset->cache.contig.sieve_size;
|
||||
sieve_start=dset->shared->cache.contig.sieve_loc;
|
||||
sieve_size=dset->shared->cache.contig.sieve_size;
|
||||
sieve_end=sieve_start+sieve_size;
|
||||
|
||||
/* Check if there is any point in reading the data from the file */
|
||||
if(dset->cache.contig.sieve_size>size) {
|
||||
if(dset->shared->cache.contig.sieve_size>size) {
|
||||
/* Read the new sieve buffer */
|
||||
if (H5F_block_read(f, H5FD_MEM_DRAW, dset->cache.contig.sieve_loc, dset->cache.contig.sieve_size, dxpl_id, dset->cache.contig.sieve_buf)<0)
|
||||
if (H5F_block_read(f, H5FD_MEM_DRAW, dset->shared->cache.contig.sieve_loc, dset->shared->cache.contig.sieve_size, dxpl_id, dset->shared->cache.contig.sieve_buf)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "block read failed");
|
||||
} /* end if */
|
||||
|
||||
/* Grab the data out of the buffer (must be first piece of data in buffer ) */
|
||||
HDmemcpy(dset->cache.contig.sieve_buf,buf,size);
|
||||
HDmemcpy(dset->shared->cache.contig.sieve_buf,buf,size);
|
||||
|
||||
/* Set sieve buffer dirty flag */
|
||||
dset->cache.contig.sieve_dirty=1;
|
||||
dset->shared->cache.contig.sieve_dirty=1;
|
||||
|
||||
} /* end else */
|
||||
} /* end else */
|
||||
|
148
src/H5Dio.c
148
src/H5Dio.c
@ -661,7 +661,7 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
|
||||
|
||||
if (!file_space)
|
||||
file_space = dataset->space;
|
||||
file_space = dataset->shared->space;
|
||||
if (!mem_space)
|
||||
mem_space = file_space;
|
||||
if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space))<0)
|
||||
@ -702,26 +702,26 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
* fill time is NEVER, there is no way to tell whether part of data
|
||||
* has been overwritten. So just proceed in reading.
|
||||
*/
|
||||
if(nelmts > 0 && dataset->efl.nused==0 &&
|
||||
((dataset->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->layout.u.contig.addr))
|
||||
|| (dataset->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->layout.u.chunk.addr)))) {
|
||||
if(nelmts > 0 && dataset->shared->efl.nused==0 &&
|
||||
((dataset->shared->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|
||||
|| (dataset->shared->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))) {
|
||||
H5D_fill_value_t fill_status; /* Whether/How the fill value is defined */
|
||||
|
||||
/* Retrieve dataset's fill-value properties */
|
||||
if(H5P_is_fill_value_defined(&dataset->dcpl_cache.fill, &fill_status)<0)
|
||||
if(H5P_is_fill_value_defined(&dataset->shared->dcpl_cache.fill, &fill_status)<0)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't tell if fill value defined")
|
||||
|
||||
/* Should be impossible, but check anyway... */
|
||||
if(fill_status == H5D_FILL_VALUE_UNDEFINED &&
|
||||
(dataset->dcpl_cache.fill_time == H5D_FILL_TIME_ALLOC || dataset->dcpl_cache.fill_time == H5D_FILL_TIME_IFSET))
|
||||
(dataset->shared->dcpl_cache.fill_time == H5D_FILL_TIME_ALLOC || dataset->shared->dcpl_cache.fill_time == H5D_FILL_TIME_IFSET))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "read failed: dataset doesn't exist, no data can be read")
|
||||
|
||||
/* If we're never going to fill this dataset, just leave the junk in the user's buffer */
|
||||
if(dataset->dcpl_cache.fill_time == H5D_FILL_TIME_NEVER)
|
||||
if(dataset->shared->dcpl_cache.fill_time == H5D_FILL_TIME_NEVER)
|
||||
HGOTO_DONE(SUCCEED)
|
||||
|
||||
/* Go fill the user's selection with the dataset's fill value */
|
||||
if(H5D_fill(dataset->dcpl_cache.fill.buf,dataset->type,buf,mem_type,mem_space,dxpl_id)<0)
|
||||
if(H5D_fill(dataset->shared->dcpl_cache.fill.buf,dataset->shared->type,buf,mem_type,mem_space,dxpl_id)<0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "filling buf failed")
|
||||
else
|
||||
HGOTO_DONE(SUCCEED)
|
||||
@ -735,11 +735,11 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
* enough value in xfer_parms since turning off data type conversion also
|
||||
* turns off background preservation.
|
||||
*/
|
||||
if (NULL==(tpath=H5T_path_find(dataset->type, mem_type, NULL, NULL, dxpl_id)))
|
||||
if (NULL==(tpath=H5T_path_find(dataset->shared->type, mem_type, NULL, NULL, dxpl_id)))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to convert between src and dest data types")
|
||||
|
||||
/* Set the storage flags for the space conversion check */
|
||||
switch(dataset->layout.type) {
|
||||
switch(dataset->shared->layout.type) {
|
||||
case H5D_COMPACT:
|
||||
sconv_flags |= H5S_CONV_STORAGE_COMPACT;
|
||||
break;
|
||||
@ -757,7 +757,7 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
} /* end switch */
|
||||
|
||||
/* Get dataspace functions */
|
||||
if (NULL==(sconv=H5S_find(dataset->ent.file, mem_space, file_space, sconv_flags, &use_par_opt_io, &dataset->layout)))
|
||||
if (NULL==(sconv=H5S_find(dataset->ent.file, mem_space, file_space, sconv_flags, &use_par_opt_io, &dataset->shared->layout)))
|
||||
HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to convert from file to memory data space")
|
||||
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
@ -767,7 +767,7 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
a more general collective chunk IO algorithm is applied.
|
||||
*/
|
||||
|
||||
if(dataset->layout.type == H5D_CHUNKED) { /*only check for chunking storage */
|
||||
if(dataset->shared->layout.type == H5D_CHUNKED) { /*only check for chunking storage */
|
||||
check_prop = H5Pexist(dxpl_id,H5D_XFER_COLL_CHUNK_NAME);
|
||||
if(check_prop < 0)
|
||||
HGOTO_ERROR(H5E_PLIST, H5E_UNSUPPORTED, FAIL, "unable to check property list");
|
||||
@ -790,14 +790,14 @@ H5D_read(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
#endif /*H5_HAVE_PARALLEL*/
|
||||
|
||||
/* Determine correct I/O routine to invoke */
|
||||
if(dataset->layout.type!=H5D_CHUNKED) {
|
||||
if(dataset->shared->layout.type!=H5D_CHUNKED) {
|
||||
if(H5D_contig_read(nelmts, dataset, mem_type, mem_space, file_space, tpath, sconv,
|
||||
dxpl_cache, dxpl_id, dataset->type_id, mem_type_id, buf)<0)
|
||||
dxpl_cache, dxpl_id, dataset->shared->type_id, mem_type_id, buf)<0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data")
|
||||
} /* end if */
|
||||
else {
|
||||
if(H5D_chunk_read(nelmts, dataset, mem_type, mem_space, file_space, tpath, sconv,
|
||||
dxpl_cache, dxpl_id, dataset->type_id, mem_type_id, buf)<0)
|
||||
dxpl_cache, dxpl_id, dataset->shared->type_id, mem_type_id, buf)<0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data")
|
||||
} /* end else */
|
||||
|
||||
@ -888,12 +888,12 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
|
||||
|
||||
/* All filters in the DCPL must have encoding enabled. */
|
||||
if(! dataset->checked_filters)
|
||||
if(! dataset->shared->checked_filters)
|
||||
{
|
||||
if(H5Z_can_apply(dataset->dcpl_id, dataset->type_id) <0)
|
||||
if(H5Z_can_apply(dataset->shared->dcpl_id, dataset->shared->type_id) <0)
|
||||
HGOTO_ERROR(H5E_PLINE, H5E_CANAPPLY, FAIL, "can't apply filters")
|
||||
|
||||
dataset->checked_filters = TRUE;
|
||||
dataset->shared->checked_filters = TRUE;
|
||||
}
|
||||
|
||||
/* If MPI based VFD is used, no VL datatype support yet. */
|
||||
@ -921,7 +921,7 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
|
||||
|
||||
if (!file_space)
|
||||
file_space = dataset->space;
|
||||
file_space = dataset->shared->space;
|
||||
if (!mem_space)
|
||||
mem_space = file_space;
|
||||
if((snelmts = H5S_GET_SELECT_NPOINTS(mem_space))<0)
|
||||
@ -953,9 +953,9 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
/* <none needed currently> */
|
||||
|
||||
/* Allocate data space and initialize it if it hasn't been. */
|
||||
if(nelmts > 0 && dataset->efl.nused==0 &&
|
||||
((dataset->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->layout.u.contig.addr))
|
||||
|| (dataset->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->layout.u.chunk.addr)))) {
|
||||
if(nelmts > 0 && dataset->shared->efl.nused==0 &&
|
||||
((dataset->shared->layout.type==H5D_CONTIGUOUS && !H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|
||||
|| (dataset->shared->layout.type==H5D_CHUNKED && !H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))) {
|
||||
hssize_t file_nelmts; /* Number of elements in file dataset's dataspace */
|
||||
hbool_t full_overwrite; /* Whether we are over-writing all the elements */
|
||||
|
||||
@ -964,7 +964,7 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
HGOTO_ERROR (H5E_DATASET, H5E_BADVALUE, FAIL, "can't retrieve number of elements in file dataset")
|
||||
|
||||
/* Always allow fill values to be written if the dataset has a VL datatype */
|
||||
if(H5T_detect_class(dataset->type, H5T_VLEN))
|
||||
if(H5T_detect_class(dataset->shared->type, H5T_VLEN))
|
||||
full_overwrite=FALSE;
|
||||
else
|
||||
full_overwrite=(hsize_t)file_nelmts==nelmts ? TRUE : FALSE;
|
||||
@ -982,11 +982,11 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
* enough value in xfer_parms since turning off data type conversion also
|
||||
* turns off background preservation.
|
||||
*/
|
||||
if (NULL==(tpath=H5T_path_find(mem_type, dataset->type, NULL, NULL, dxpl_id)))
|
||||
if (NULL==(tpath=H5T_path_find(mem_type, dataset->shared->type, NULL, NULL, dxpl_id)))
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to convert between src and dest data types")
|
||||
|
||||
/* Set the storage flags for the space conversion check */
|
||||
switch(dataset->layout.type) {
|
||||
switch(dataset->shared->layout.type) {
|
||||
case H5D_COMPACT:
|
||||
sconv_flags |= H5S_CONV_STORAGE_COMPACT;
|
||||
break;
|
||||
@ -1004,7 +1004,7 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
} /* end switch */
|
||||
|
||||
/* Get dataspace functions */
|
||||
if (NULL==(sconv=H5S_find(dataset->ent.file, mem_space, file_space, sconv_flags, &use_par_opt_io, &dataset->layout)))
|
||||
if (NULL==(sconv=H5S_find(dataset->ent.file, mem_space, file_space, sconv_flags, &use_par_opt_io, &dataset->shared->layout)))
|
||||
HGOTO_ERROR (H5E_DATASET, H5E_UNSUPPORTED, FAIL, "unable to convert from memory to file data space")
|
||||
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
@ -1014,7 +1014,7 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
a more general collective chunk IO algorithm is applied.
|
||||
*/
|
||||
|
||||
if(dataset->layout.type == H5D_CHUNKED) { /*only check for chunking storage */
|
||||
if(dataset->shared->layout.type == H5D_CHUNKED) { /*only check for chunking storage */
|
||||
|
||||
check_prop = H5Pexist(dxpl_id,H5D_XFER_COLL_CHUNK_NAME);
|
||||
if(check_prop < 0)
|
||||
@ -1037,14 +1037,14 @@ H5D_write(H5D_t *dataset, hid_t mem_type_id, const H5S_t *mem_space,
|
||||
#endif /*H5_HAVE_PARALLEL*/
|
||||
|
||||
/* Determine correct I/O routine to invoke */
|
||||
if(dataset->layout.type!=H5D_CHUNKED) {
|
||||
if(dataset->shared->layout.type!=H5D_CHUNKED) {
|
||||
if(H5D_contig_write(nelmts, dataset, mem_type, mem_space, file_space, tpath, sconv,
|
||||
dxpl_cache, dxpl_id, mem_type_id, dataset->type_id, buf)<0)
|
||||
dxpl_cache, dxpl_id, mem_type_id, dataset->shared->type_id, buf)<0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data")
|
||||
} /* end if */
|
||||
else {
|
||||
if(H5D_chunk_write(nelmts, dataset, mem_type, mem_space, file_space, tpath, sconv,
|
||||
dxpl_cache, dxpl_id, mem_type_id, dataset->type_id, buf)<0)
|
||||
dxpl_cache, dxpl_id, mem_type_id, dataset->shared->type_id, buf)<0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data")
|
||||
} /* end else */
|
||||
|
||||
@ -1132,19 +1132,19 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset,
|
||||
H5_timer_begin(&timer);
|
||||
#endif
|
||||
/* Sanity check dataset, then read it */
|
||||
assert(((dataset->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->layout.u.contig.addr))
|
||||
|| (dataset->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->layout.u.chunk.addr)))
|
||||
|| dataset->efl.nused>0 || 0 == nelmts
|
||||
|| dataset->layout.type==H5D_COMPACT);
|
||||
assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|
||||
|| (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))
|
||||
|| dataset->shared->efl.nused>0 || 0 == nelmts
|
||||
|| dataset->shared->layout.type==H5D_COMPACT);
|
||||
H5_CHECK_OVERFLOW(nelmts,hsize_t,size_t);
|
||||
status = (sconv->read)(dataset->ent.file, dxpl_cache, dxpl_id,
|
||||
dataset, (H5D_storage_t *)&(dataset->efl),
|
||||
(size_t)nelmts, H5T_get_size(dataset->type),
|
||||
dataset, (H5D_storage_t *)&(dataset->shared->efl),
|
||||
(size_t)nelmts, H5T_get_size(dataset->shared->type),
|
||||
file_space, mem_space,
|
||||
buf/*out*/);
|
||||
#ifdef H5S_DEBUG
|
||||
H5_timer_end(&(sconv->stats[1].read_timer), &timer);
|
||||
sconv->stats[1].read_nbytes += nelmts * H5T_get_size(dataset->type);
|
||||
sconv->stats[1].read_nbytes += nelmts * H5T_get_size(dataset->shared->type);
|
||||
sconv->stats[1].read_ncalls++;
|
||||
#endif
|
||||
|
||||
@ -1163,7 +1163,7 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset,
|
||||
HGOTO_DONE(SUCCEED)
|
||||
|
||||
/* Compute element sizes and other parameters */
|
||||
src_type_size = H5T_get_size(dataset->type);
|
||||
src_type_size = H5T_get_size(dataset->shared->type);
|
||||
dst_type_size = H5T_get_size(mem_type);
|
||||
max_type_size = MAX(src_type_size, dst_type_size);
|
||||
target_size = dxpl_cache->max_temp_buf;
|
||||
@ -1240,12 +1240,12 @@ H5D_contig_read(hsize_t nelmts, H5D_t *dataset,
|
||||
H5_timer_begin(&timer);
|
||||
#endif
|
||||
/* Sanity check that space is allocated, then read data from it */
|
||||
assert(((dataset->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->layout.u.contig.addr))
|
||||
|| (dataset->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->layout.u.chunk.addr)))
|
||||
|| dataset->efl.nused>0 || 0 == nelmts
|
||||
|| dataset->layout.type==H5D_COMPACT);
|
||||
assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|
||||
|| (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))
|
||||
|| dataset->shared->efl.nused>0 || 0 == nelmts
|
||||
|| dataset->shared->layout.type==H5D_COMPACT);
|
||||
n = H5S_select_fgath(dataset->ent.file, dxpl_cache, dxpl_id,
|
||||
dataset, (H5D_storage_t *)&(dataset->efl),
|
||||
dataset, (H5D_storage_t *)&(dataset->shared->efl),
|
||||
file_space, &file_iter, smine_nelmts,
|
||||
tconv_buf/*out*/);
|
||||
|
||||
@ -1384,8 +1384,8 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset,
|
||||
#endif
|
||||
H5_CHECK_OVERFLOW(nelmts,hsize_t,size_t);
|
||||
status = (sconv->write)(dataset->ent.file, dxpl_cache, dxpl_id,
|
||||
dataset, (H5D_storage_t *)&(dataset->efl),
|
||||
(size_t)nelmts, H5T_get_size(dataset->type),
|
||||
dataset, (H5D_storage_t *)&(dataset->shared->efl),
|
||||
(size_t)nelmts, H5T_get_size(dataset->shared->type),
|
||||
file_space, mem_space,
|
||||
buf);
|
||||
#ifdef H5S_DEBUG
|
||||
@ -1410,7 +1410,7 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset,
|
||||
|
||||
/* Compute element sizes and other parameters */
|
||||
src_type_size = H5T_get_size(mem_type);
|
||||
dst_type_size = H5T_get_size(dataset->type);
|
||||
dst_type_size = H5T_get_size(dataset->shared->type);
|
||||
max_type_size = MAX(src_type_size, dst_type_size);
|
||||
target_size = dxpl_cache->max_temp_buf;
|
||||
/* XXX: This could cause a problem if the user sets their buffer size
|
||||
@ -1449,7 +1449,7 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset,
|
||||
* malloc() is usually less resource-intensive if we allocate/free the
|
||||
* same size over and over.
|
||||
*/
|
||||
if(H5T_detect_class(dataset->type, H5T_VLEN)) {
|
||||
if(H5T_detect_class(dataset->shared->type, H5T_VLEN)) {
|
||||
/* Old data is retrieved into background buffer for VL datatype. The
|
||||
* data is used later for freeing heap objects. */
|
||||
need_bkg = H5T_BKG_YES;
|
||||
@ -1504,7 +1504,7 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset,
|
||||
H5_timer_begin(&timer);
|
||||
#endif
|
||||
n = H5S_select_fgath(dataset->ent.file, dxpl_cache, dxpl_id,
|
||||
dataset, (H5D_storage_t *)&(dataset->efl),
|
||||
dataset, (H5D_storage_t *)&(dataset->shared->efl),
|
||||
file_space, &bkg_iter, smine_nelmts,
|
||||
bkg_buf/*out*/);
|
||||
|
||||
@ -1523,9 +1523,9 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset,
|
||||
if (H5T_convert(tpath, src_id, dst_id, smine_nelmts, 0, 0, tconv_buf, bkg_buf, dxpl_id)<0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data type conversion failed")
|
||||
|
||||
/* Do the data transform after the type conversion (since we're using dataset->type). */
|
||||
/* Do the data transform after the type conversion (since we're using dataset->shared->type). */
|
||||
if(!H5Z_xform_noop(dxpl_cache->data_xform_prop))
|
||||
if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, dataset->type) < 0)
|
||||
if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, dataset->shared->type) < 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Error performing data transform")
|
||||
|
||||
/*
|
||||
@ -1535,7 +1535,7 @@ H5D_contig_write(hsize_t nelmts, H5D_t *dataset,
|
||||
H5_timer_begin(&timer);
|
||||
#endif
|
||||
status = H5S_select_fscat(dataset->ent.file, dxpl_cache, dxpl_id,
|
||||
dataset, (H5D_storage_t *)&(dataset->efl),
|
||||
dataset, (H5D_storage_t *)&(dataset->shared->efl),
|
||||
file_space, &file_iter, smine_nelmts,
|
||||
tconv_buf);
|
||||
#ifdef H5S_DEBUG
|
||||
@ -1636,10 +1636,10 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset,
|
||||
H5_timer_begin(&timer);
|
||||
#endif
|
||||
/* Sanity check dataset, then read it */
|
||||
assert(((dataset->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->layout.u.contig.addr))
|
||||
|| (dataset->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->layout.u.chunk.addr)))
|
||||
|| dataset->efl.nused>0 || 0 == nelmts
|
||||
|| dataset->layout.type==H5D_COMPACT);
|
||||
assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|
||||
|| (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))
|
||||
|| dataset->shared->efl.nused>0 || 0 == nelmts
|
||||
|| dataset->shared->layout.type==H5D_COMPACT);
|
||||
|
||||
/* Get first node in chunk tree */
|
||||
chunk_node=H5TB_first(fm.fsel->root);
|
||||
@ -1658,7 +1658,7 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset,
|
||||
/* Perform the actual read operation */
|
||||
status = (sconv->read)(dataset->ent.file, dxpl_cache, dxpl_id,
|
||||
dataset, &store,
|
||||
chunk_info->chunk_points, H5T_get_size(dataset->type),
|
||||
chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
|
||||
chunk_info->fspace, chunk_info->mspace,
|
||||
buf);
|
||||
|
||||
@ -1672,7 +1672,7 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset,
|
||||
|
||||
#ifdef H5S_DEBUG
|
||||
H5_timer_end(&(sconv->stats[1].read_timer), &timer);
|
||||
sconv->stats[1].read_nbytes += nelmts * H5T_get_size(dataset->type);
|
||||
sconv->stats[1].read_nbytes += nelmts * H5T_get_size(dataset->shared->type);
|
||||
sconv->stats[1].read_ncalls++;
|
||||
#endif
|
||||
|
||||
@ -1687,7 +1687,7 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset,
|
||||
HGOTO_DONE(SUCCEED)
|
||||
|
||||
/* Compute element sizes and other parameters */
|
||||
src_type_size = H5T_get_size(dataset->type);
|
||||
src_type_size = H5T_get_size(dataset->shared->type);
|
||||
dst_type_size = H5T_get_size(mem_type);
|
||||
max_type_size = MAX(src_type_size, dst_type_size);
|
||||
target_size = dxpl_cache->max_temp_buf;
|
||||
@ -1779,9 +1779,9 @@ H5D_chunk_read(hsize_t nelmts, H5D_t *dataset,
|
||||
H5_timer_begin(&timer);
|
||||
#endif
|
||||
/* Sanity check that space is allocated, then read data from it */
|
||||
assert(((dataset->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->layout.u.contig.addr))
|
||||
|| (dataset->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->layout.u.chunk.addr)))
|
||||
|| dataset->efl.nused>0 || dataset->layout.type==H5D_COMPACT);
|
||||
assert(((dataset->shared->layout.type==H5D_CONTIGUOUS && H5F_addr_defined(dataset->shared->layout.u.contig.addr))
|
||||
|| (dataset->shared->layout.type==H5D_CHUNKED && H5F_addr_defined(dataset->shared->layout.u.chunk.addr)))
|
||||
|| dataset->shared->efl.nused>0 || dataset->shared->layout.type==H5D_COMPACT);
|
||||
n = H5S_select_fgath(dataset->ent.file, dxpl_cache, dxpl_id,
|
||||
dataset, &store,
|
||||
chunk_info->fspace, &file_iter, smine_nelmts,
|
||||
@ -1996,7 +1996,7 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset,
|
||||
/* Perform the actual write operation */
|
||||
status = (sconv->write)(dataset->ent.file, dxpl_cache, dxpl_id,
|
||||
dataset, &store,
|
||||
chunk_info->chunk_points, H5T_get_size(dataset->type),
|
||||
chunk_info->chunk_points, H5T_get_size(dataset->shared->type),
|
||||
chunk_info->fspace, chunk_info->mspace,
|
||||
buf);
|
||||
|
||||
@ -2042,7 +2042,7 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset,
|
||||
|
||||
/* Compute element sizes and other parameters */
|
||||
src_type_size = H5T_get_size(mem_type);
|
||||
dst_type_size = H5T_get_size(dataset->type);
|
||||
dst_type_size = H5T_get_size(dataset->shared->type);
|
||||
max_type_size = MAX(src_type_size, dst_type_size);
|
||||
target_size = dxpl_cache->max_temp_buf;
|
||||
/* XXX: This could cause a problem if the user sets their buffer size
|
||||
@ -2070,7 +2070,7 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset,
|
||||
* malloc() is usually less resource-intensive if we allocate/free the
|
||||
* same size over and over.
|
||||
*/
|
||||
if(H5T_detect_class(dataset->type, H5T_VLEN)) {
|
||||
if(H5T_detect_class(dataset->shared->type, H5T_VLEN)) {
|
||||
/* Old data is retrieved into background buffer for VL datatype. The
|
||||
* data is used later for freeing heap objects. */
|
||||
need_bkg = H5T_BKG_YES;
|
||||
@ -2172,9 +2172,9 @@ H5D_chunk_write(hsize_t nelmts, H5D_t *dataset,
|
||||
tconv_buf, bkg_buf, dxpl_id)<0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "data type conversion failed")
|
||||
|
||||
/* Do the data transform after the type conversion (since we're using dataset->type) */
|
||||
/* Do the data transform after the type conversion (since we're using dataset->shared->type) */
|
||||
if(!H5Z_xform_noop(dxpl_cache->data_xform_prop))
|
||||
if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, dataset->type) < 0)
|
||||
if( H5Z_xform_eval(dxpl_cache->data_xform_prop, tconv_buf, smine_nelmts, dataset->shared->type) < 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Error performing data transform")
|
||||
|
||||
/*
|
||||
@ -2388,7 +2388,7 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
|
||||
#endif /* QAK */
|
||||
|
||||
/* Get layout for dataset */
|
||||
fm->layout = &(dataset->layout);
|
||||
fm->layout = &(dataset->shared->layout);
|
||||
|
||||
/* Check if the memory space is scalar & make equivalent memory space */
|
||||
if((sm_ndims = H5S_GET_EXTENT_NDIMS(mem_space))<0)
|
||||
@ -2397,16 +2397,16 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
|
||||
hsize_t dims[H5O_LAYOUT_NDIMS]; /* Temporary dimension information */
|
||||
|
||||
/* Set up "equivalent" n-dimensional dataspace with size '1' in each dimension */
|
||||
for(u=0; u<dataset->layout.u.chunk.ndims-1; u++)
|
||||
for(u=0; u<dataset->shared->layout.u.chunk.ndims-1; u++)
|
||||
dims[u]=1;
|
||||
if((equiv_mspace = H5S_create_simple(dataset->layout.u.chunk.ndims-1,dims,NULL))==NULL)
|
||||
if((equiv_mspace = H5S_create_simple(dataset->shared->layout.u.chunk.ndims-1,dims,NULL))==NULL)
|
||||
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTCREATE, FAIL, "unable to create equivalent dataspace for scalar space")
|
||||
|
||||
/* Indicate that this space needs to be released */
|
||||
equiv_mspace_init=1;
|
||||
|
||||
/* Set the number of dimensions for the memory dataspace */
|
||||
fm->m_ndims=dataset->layout.u.chunk.ndims-1;
|
||||
fm->m_ndims=dataset->shared->layout.u.chunk.ndims-1;
|
||||
} /* end else */
|
||||
else {
|
||||
equiv_mspace=(H5S_t *)mem_space; /* Casting away 'const' OK... */
|
||||
@ -2416,7 +2416,7 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
|
||||
} /* end else */
|
||||
|
||||
/* Get dim number and dimensionality for each dataspace */
|
||||
fm->f_ndims=f_ndims=dataset->layout.u.chunk.ndims-1;
|
||||
fm->f_ndims=f_ndims=dataset->shared->layout.u.chunk.ndims-1;
|
||||
|
||||
if(H5S_get_simple_extent_dims(file_space, fm->f_dims, NULL)<0)
|
||||
HGOTO_ERROR (H5E_DATASPACE, H5E_CANTGET, FAIL, "unable to get dimensionality")
|
||||
@ -2427,7 +2427,7 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
|
||||
fm->chunk_dim[u]=fm->layout->u.chunk.dim[u];
|
||||
|
||||
/* Round up to the next integer # of chunks, to accomodate partial chunks */
|
||||
fm->chunks[u] = ((fm->f_dims[u]+dataset->layout.u.chunk.dim[u])-1) / dataset->layout.u.chunk.dim[u];
|
||||
fm->chunks[u] = ((fm->f_dims[u]+dataset->shared->layout.u.chunk.dim[u])-1) / dataset->shared->layout.u.chunk.dim[u];
|
||||
} /* end for */
|
||||
|
||||
/* Compute the "down" size of 'chunks' information */
|
||||
@ -2456,7 +2456,7 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
|
||||
/* Check if file selection is a point selection */
|
||||
if(fsel_type==H5S_SEL_POINTS) {
|
||||
/* Create temporary datatypes for selection iteration */
|
||||
if((f_tid = H5I_register(H5I_DATATYPE, H5T_copy(dataset->type, H5T_COPY_ALL)))<0)
|
||||
if((f_tid = H5I_register(H5I_DATATYPE, H5T_copy(dataset->shared->type, H5T_COPY_ALL)))<0)
|
||||
HGOTO_ERROR (H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register file datatype")
|
||||
|
||||
/* Spaces aren't the same shape, iterate over the memory selection directly */
|
||||
@ -2536,7 +2536,7 @@ H5D_create_chunk_map(H5D_t *dataset, const H5T_t *mem_type, const H5S_t *file_sp
|
||||
|
||||
/* Create temporary datatypes for selection iteration */
|
||||
if(f_tid<0) {
|
||||
if((f_tid = H5I_register(H5I_DATATYPE, H5T_copy(dataset->type, H5T_COPY_ALL)))<0)
|
||||
if((f_tid = H5I_register(H5I_DATATYPE, H5T_copy(dataset->shared->type, H5T_COPY_ALL)))<0)
|
||||
HGOTO_ERROR (H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register file datatype")
|
||||
} /* end if */
|
||||
|
||||
|
227
src/H5Distore.c
227
src/H5Distore.c
@ -928,7 +928,10 @@ H5D_istore_iter_dump (H5F_t UNUSED *f, hid_t UNUSED dxpl_id, void *_lt_key, hadd
|
||||
herr_t
|
||||
H5D_istore_init (H5F_t *f, H5D_t *dset)
|
||||
{
|
||||
H5D_rdcc_t *rdcc = &(dset->cache.chunk);
|
||||
H5D_istore_ud1_t udata;
|
||||
H5B_shared_t *shared; /* Shared B-tree node info */
|
||||
size_t u; /* Local index variable */
|
||||
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
|
||||
herr_t ret_value=SUCCEED; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI(H5D_istore_init, FAIL);
|
||||
@ -942,7 +945,7 @@ H5D_istore_init (H5F_t *f, H5D_t *dset)
|
||||
} /* end if */
|
||||
|
||||
/* Allocate the shared structure */
|
||||
if(H5D_istore_shared_create(f, &dset->layout)<0)
|
||||
if(H5D_istore_shared_create(f, &dset->shared->layout)<0)
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't create wrapper for shared B-tree info")
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value);
|
||||
@ -986,16 +989,16 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
|
||||
if (ent->dirty) {
|
||||
H5D_istore_ud1_t udata; /*pass through B-tree */
|
||||
|
||||
udata.mesg = &dset->layout;
|
||||
udata.mesg = &dset->shared->layout;
|
||||
udata.key.filter_mask = 0;
|
||||
udata.addr = HADDR_UNDEF;
|
||||
udata.key.nbytes = ent->chunk_size;
|
||||
for (u=0; u<dset->layout.u.chunk.ndims; u++)
|
||||
for (u=0; u<dset->shared->layout.u.chunk.ndims; u++)
|
||||
udata.key.offset[u] = ent->offset[u];
|
||||
alloc = ent->alloc_size;
|
||||
|
||||
/* Should the chunk be filtered before writing it to disk? */
|
||||
if (dset->dcpl_cache.pline.nused) {
|
||||
if (dset->shared->dcpl_cache.pline.nused) {
|
||||
if (!reset) {
|
||||
/*
|
||||
* Copy the chunk to a new buffer before running it through
|
||||
@ -1017,7 +1020,7 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
|
||||
point_of_no_return = TRUE;
|
||||
ent->chunk = NULL;
|
||||
}
|
||||
if (H5Z_pipeline(&(dset->dcpl_cache.pline), 0, &(udata.key.filter_mask), dxpl_cache->err_detect,
|
||||
if (H5Z_pipeline(&(dset->shared->dcpl_cache.pline), 0, &(udata.key.filter_mask), dxpl_cache->err_detect,
|
||||
dxpl_cache->filter_cb, &(udata.key.nbytes), &alloc, &buf)<0)
|
||||
HGOTO_ERROR(H5E_PLINE, H5E_WRITEERROR, FAIL, "output pipeline failed")
|
||||
}
|
||||
@ -1026,14 +1029,14 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
|
||||
* Create the chunk it if it doesn't exist, or reallocate the chunk if
|
||||
* its size changed. Then write the data into the file.
|
||||
*/
|
||||
if (H5B_insert(f, dxpl_id, H5B_ISTORE, dset->layout.u.chunk.addr, &udata)<0)
|
||||
if (H5B_insert(f, dxpl_id, H5B_ISTORE, dset->shared->layout.u.chunk.addr, &udata)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk");
|
||||
if (H5F_block_write(f, H5FD_MEM_DRAW, udata.addr, udata.key.nbytes, dxpl_id, buf)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
|
||||
|
||||
/* Mark cache entry as clean */
|
||||
ent->dirty = FALSE;
|
||||
dset->cache.chunk.nflushes++;
|
||||
dset->shared->cache.chunk.nflushes++;
|
||||
} /* end if */
|
||||
|
||||
/* Reset, but do not free or removed from list */
|
||||
@ -1042,7 +1045,7 @@ H5D_istore_flush_entry(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
|
||||
if(buf==ent->chunk)
|
||||
buf = NULL;
|
||||
if(ent->chunk!=NULL)
|
||||
ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(dset->dcpl_cache.pline));
|
||||
ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(dset->shared->dcpl_cache.pline));
|
||||
} /* end if */
|
||||
|
||||
done:
|
||||
@ -1058,7 +1061,7 @@ done:
|
||||
*/
|
||||
if (ret_value<0 && point_of_no_return) {
|
||||
if(ent->chunk)
|
||||
ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(dset->dcpl_cache.pline));
|
||||
ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(dset->shared->dcpl_cache.pline));
|
||||
} /* end if */
|
||||
|
||||
FUNC_LEAVE_NOAPI(ret_value);
|
||||
@ -1087,7 +1090,7 @@ static herr_t
|
||||
H5D_istore_preempt(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
|
||||
H5D_t *dset, H5D_rdcc_ent_t * ent, hbool_t flush)
|
||||
{
|
||||
H5D_rdcc_t *rdcc = &(dset->cache.chunk);
|
||||
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
|
||||
herr_t ret_value=SUCCEED; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI_NOINIT(H5D_istore_preempt);
|
||||
@ -1105,7 +1108,7 @@ H5D_istore_preempt(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
|
||||
else {
|
||||
/* Don't flush, just free chunk */
|
||||
if(ent->chunk != NULL)
|
||||
ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(dset->dcpl_cache.pline));
|
||||
ent->chunk = H5D_istore_chunk_xfree(ent->chunk,&(dset->shared->dcpl_cache.pline));
|
||||
}
|
||||
|
||||
/* Unlink from list */
|
||||
@ -1155,7 +1158,7 @@ H5D_istore_flush (H5F_t *f, hid_t dxpl_id, H5D_t *dset, unsigned flags)
|
||||
{
|
||||
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
|
||||
H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
|
||||
H5D_rdcc_t *rdcc = &(dset->cache.chunk);
|
||||
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
|
||||
int nerrors=0;
|
||||
H5D_rdcc_ent_t *ent=NULL, *next=NULL;
|
||||
herr_t ret_value=SUCCEED; /* Return value */
|
||||
@ -1211,7 +1214,7 @@ H5D_istore_dest (H5F_t *f, hid_t dxpl_id, H5D_t *dset)
|
||||
{
|
||||
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
|
||||
H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
|
||||
H5D_rdcc_t *rdcc = &(dset->cache.chunk);
|
||||
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
|
||||
int nerrors=0;
|
||||
H5D_rdcc_ent_t *ent=NULL, *next=NULL;
|
||||
herr_t ret_value=SUCCEED; /* Return value */
|
||||
@ -1239,7 +1242,7 @@ H5D_istore_dest (H5F_t *f, hid_t dxpl_id, H5D_t *dset)
|
||||
HDmemset (rdcc, 0, sizeof(H5D_rdcc_t));
|
||||
|
||||
/* Free the raw B-tree node buffer */
|
||||
if(H5RC_DEC(dset->layout.u.chunk.btree_shared)<0)
|
||||
if(H5RC_DEC(dset->shared->layout.u.chunk.btree_shared)<0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_CANTFREE, FAIL, "unable to decrement ref-counted page");
|
||||
|
||||
done:
|
||||
@ -1363,7 +1366,7 @@ H5D_istore_prune (H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id, H
|
||||
size_t size)
|
||||
{
|
||||
int i, j, nerrors=0;
|
||||
H5D_rdcc_t *rdcc = &(dset->cache.chunk);
|
||||
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
|
||||
size_t total = rdcc->nbytes;
|
||||
const int nmeth=2; /*number of methods */
|
||||
int w[1]; /*weighting as an interval */
|
||||
@ -1500,11 +1503,11 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
|
||||
{
|
||||
unsigned idx=0; /*hash index number */
|
||||
hbool_t found = FALSE; /*already in cache? */
|
||||
const H5O_pline_t *pline=&(dset->dcpl_cache.pline); /* I/O pipeline info */
|
||||
const H5O_layout_t *layout=&(dset->layout); /* Dataset layout */
|
||||
const H5O_fill_t *fill=&(dset->dcpl_cache.fill); /* Fill value info */
|
||||
H5D_fill_time_t fill_time=dset->dcpl_cache.fill_time; /* Fill time */
|
||||
H5D_rdcc_t *rdcc = &(dset->cache.chunk);/*raw data chunk cache*/
|
||||
const H5O_pline_t *pline=&(dset->shared->dcpl_cache.pline); /* I/O pipeline info */
|
||||
const H5O_layout_t *layout=&(dset->shared->layout); /* Dataset layout */
|
||||
const H5O_fill_t *fill=&(dset->shared->dcpl_cache.fill); /* Fill value info */
|
||||
H5D_fill_time_t fill_time=dset->shared->dcpl_cache.fill_time; /* Fill time */
|
||||
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);/*raw data chunk cache*/
|
||||
H5D_rdcc_ent_t *ent = NULL; /*cache entry */
|
||||
unsigned u; /*counters */
|
||||
size_t chunk_size=0; /*size of a chunk */
|
||||
@ -1525,11 +1528,11 @@ H5D_istore_lock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
|
||||
|
||||
/* Search for the chunk in the cache */
|
||||
if (rdcc->nslots>0) {
|
||||
idx=H5D_HASH(dset,store->chunk.index);
|
||||
idx=H5D_HASH(dset->shared,store->chunk.index);
|
||||
ent = rdcc->slot[idx];
|
||||
|
||||
if (ent) {
|
||||
for (u=0, found=TRUE; u<dset->layout.u.chunk.ndims; u++) {
|
||||
for (u=0, found=TRUE; u<dset->shared->layout.u.chunk.ndims; u++) {
|
||||
if (store->chunk.offset[u]!=ent->offset[u]) {
|
||||
found = FALSE;
|
||||
break;
|
||||
@ -1768,7 +1771,7 @@ H5D_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
|
||||
H5D_t *dset, const H5D_storage_t *store,
|
||||
hbool_t dirty, unsigned idx_hint, uint8_t *chunk, size_t naccessed)
|
||||
{
|
||||
H5D_rdcc_t *rdcc = &(dset->cache.chunk);
|
||||
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
|
||||
H5D_rdcc_ent_t *ent = NULL;
|
||||
int found = -1;
|
||||
unsigned u;
|
||||
@ -1796,17 +1799,17 @@ H5D_istore_unlock(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_id,
|
||||
|
||||
HDmemset (&x, 0, sizeof x);
|
||||
x.dirty = TRUE;
|
||||
for (u=0; u<dset->layout.u.chunk.ndims; u++)
|
||||
for (u=0; u<dset->shared->layout.u.chunk.ndims; u++)
|
||||
x.offset[u] = store->chunk.offset[u];
|
||||
assert(dset->layout.u.chunk.size>0);
|
||||
H5_ASSIGN_OVERFLOW(x.chunk_size,dset->layout.u.chunk.size,hsize_t,size_t);
|
||||
assert(dset->shared->layout.u.chunk.size>0);
|
||||
H5_ASSIGN_OVERFLOW(x.chunk_size,dset->shared->layout.u.chunk.size,hsize_t,size_t);
|
||||
x.alloc_size = x.chunk_size;
|
||||
x.chunk = chunk;
|
||||
|
||||
H5D_istore_flush_entry (f, dxpl_cache, dxpl_id, dset, &x, TRUE);
|
||||
} else {
|
||||
if(chunk)
|
||||
H5D_istore_chunk_xfree (chunk,&(dset->dcpl_cache.pline));
|
||||
H5D_istore_chunk_xfree (chunk,&(dset->shared->dcpl_cache.pline));
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
@ -1859,8 +1862,8 @@ H5D_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
|
||||
/* Check args */
|
||||
assert(f);
|
||||
assert(dxpl_cache);
|
||||
assert(dset && H5D_CHUNKED==dset->layout.type);
|
||||
assert(dset->layout.u.chunk.ndims>0 && dset->layout.u.chunk.ndims<=H5O_LAYOUT_NDIMS);
|
||||
assert(dset && H5D_CHUNKED==dset->shared->layout.type);
|
||||
assert(dset->shared->layout.u.chunk.ndims>0 && dset->shared->layout.u.chunk.ndims<=H5O_LAYOUT_NDIMS);
|
||||
assert(store);
|
||||
assert(chunk_len_arr);
|
||||
assert(chunk_offset_arr);
|
||||
@ -1869,19 +1872,19 @@ H5D_istore_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxp
|
||||
assert(buf);
|
||||
|
||||
#ifndef NDEBUG
|
||||
for (u=0; u<dset->layout.u.chunk.ndims; u++)
|
||||
for (u=0; u<dset->shared->layout.u.chunk.ndims; u++)
|
||||
assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */
|
||||
#endif
|
||||
|
||||
/* Get the address of this chunk on disk */
|
||||
#ifdef QAK
|
||||
HDfprintf(stderr,"%s: store->chunk.offset={",FUNC);
|
||||
for(u=0; u<dset->layout.u.chunk.ndims; u++)
|
||||
HDfprintf(stderr,"%Hd%s",store->chunk.offset[u],(u<(dset->layout.u.chunk.ndims-1) ? ", " : "}\n"));
|
||||
for(u=0; u<dset->shared->layout.u.chunk.ndims; u++)
|
||||
HDfprintf(stderr,"%Hd%s",store->chunk.offset[u],(u<(dset->shared->layout.u.chunk.ndims-1) ? ", " : "}\n"));
|
||||
#endif /* QAK */
|
||||
chunk_addr=H5D_istore_get_addr(f, dxpl_id, &(dset->layout), store->chunk.offset, &udata);
|
||||
chunk_addr=H5D_istore_get_addr(f, dxpl_id, &(dset->shared->layout), store->chunk.offset, &udata);
|
||||
#ifdef QAK
|
||||
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Zu\n",FUNC,chunk_addr,dset->layout.u.chunk.size);
|
||||
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Zu\n",FUNC,chunk_addr,dset->shared->layout.u.chunk.size);
|
||||
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
|
||||
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
|
||||
HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]);
|
||||
@ -1899,14 +1902,14 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
|
||||
* writing to other elements in the same chunk. Do a direct
|
||||
* read-through of only the elements requested.
|
||||
*/
|
||||
if ((dset->layout.u.chunk.size>dset->cache.chunk.nbytes && dset->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF)
|
||||
if ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes && dset->shared->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF)
|
||||
|| (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & H5F_get_intent(f)))) {
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
/* Additional sanity check when operating in parallel */
|
||||
if (chunk_addr==HADDR_UNDEF || dset->dcpl_cache.pline.nused>0)
|
||||
if (chunk_addr==HADDR_UNDEF || dset->shared->dcpl_cache.pline.nused>0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk");
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
if ((ret_value=H5D_contig_readvv(f, dxpl_id, dset, chunk_addr, (hsize_t)dset->layout.u.chunk.size, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0)
|
||||
if ((ret_value=H5D_contig_readvv(f, dxpl_id, dset, chunk_addr, (hsize_t)dset->shared->layout.u.chunk.size, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_READERROR, FAIL, "unable to read raw data to file");
|
||||
} /* end if */
|
||||
else {
|
||||
@ -1923,20 +1926,20 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
|
||||
* the entire I/O operation on the chunk will be skipped. -QAK
|
||||
*/
|
||||
if(!H5F_addr_defined(chunk_addr)) {
|
||||
const H5O_fill_t *fill=&(dset->dcpl_cache.fill); /* Fill value info */
|
||||
H5D_fill_time_t fill_time=dset->dcpl_cache.fill_time; /* Fill time */
|
||||
const H5O_fill_t *fill=&(dset->shared->dcpl_cache.fill); /* Fill value info */
|
||||
H5D_fill_time_t fill_time=dset->shared->dcpl_cache.fill_time; /* Fill time */
|
||||
H5D_fill_value_t fill_status;
|
||||
H5D_rdcc_t *rdcc = &(dset->cache.chunk);/*raw data chunk cache*/
|
||||
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);/*raw data chunk cache*/
|
||||
hbool_t found = FALSE; /*already in cache? */
|
||||
|
||||
/* Check if the chunk is in the cache (but hasn't been written to disk yet) */
|
||||
if (rdcc->nslots>0) {
|
||||
unsigned idx=H5D_HASH(dset,store->chunk.index); /* Cache entry index */
|
||||
unsigned idx=H5D_HASH(dset->shared,store->chunk.index); /* Cache entry index */
|
||||
H5D_rdcc_ent_t *ent = rdcc->slot[idx]; /* Cache entry */
|
||||
|
||||
/* Potential match... */
|
||||
if (ent) {
|
||||
for (u=0, found=TRUE; u<dset->layout.u.chunk.ndims; u++) {
|
||||
for (u=0, found=TRUE; u<dset->shared->layout.u.chunk.ndims; u++) {
|
||||
if (store->chunk.offset[u]!=ent->offset[u]) {
|
||||
found = FALSE;
|
||||
break;
|
||||
@ -2052,8 +2055,8 @@ H5D_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
|
||||
/* Check args */
|
||||
assert(f);
|
||||
assert(dxpl_cache);
|
||||
assert(dset && H5D_CHUNKED==dset->layout.type);
|
||||
assert(dset->layout.u.chunk.ndims>0 && dset->layout.u.chunk.ndims<=H5O_LAYOUT_NDIMS);
|
||||
assert(dset && H5D_CHUNKED==dset->shared->layout.type);
|
||||
assert(dset->shared->layout.u.chunk.ndims>0 && dset->shared->layout.u.chunk.ndims<=H5O_LAYOUT_NDIMS);
|
||||
assert(store);
|
||||
assert(chunk_len_arr);
|
||||
assert(chunk_offset_arr);
|
||||
@ -2062,19 +2065,19 @@ H5D_istore_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
|
||||
assert(buf);
|
||||
|
||||
#ifndef NDEBUG
|
||||
for (u=0; u<dset->layout.u.chunk.ndims; u++)
|
||||
for (u=0; u<dset->shared->layout.u.chunk.ndims; u++)
|
||||
assert(store->chunk.offset[u]>=0); /*negative coordinates not supported (yet) */
|
||||
#endif
|
||||
|
||||
/* Get the address of this chunk on disk */
|
||||
#ifdef QAK
|
||||
HDfprintf(stderr,"%s: store->chunk.offset={",FUNC);
|
||||
for(u=0; u<dset->layout.u.chunk.ndims; u++)
|
||||
HDfprintf(stderr,"%Hd%s",store->chunk.offset[u],(u<(dset->layout.u.chunk.ndims-1) ? ", " : "}\n"));
|
||||
for(u=0; u<dset->shared->layout.u.chunk.ndims; u++)
|
||||
HDfprintf(stderr,"%Hd%s",store->chunk.offset[u],(u<(dset->shared->layout.u.chunk.ndims-1) ? ", " : "}\n"));
|
||||
#endif /* QAK */
|
||||
chunk_addr=H5D_istore_get_addr(f, dxpl_id, &(dset->layout), store->chunk.offset, &udata);
|
||||
chunk_addr=H5D_istore_get_addr(f, dxpl_id, &(dset->shared->layout), store->chunk.offset, &udata);
|
||||
#ifdef QAK
|
||||
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Zu\n",FUNC,chunk_addr,dset->layout.u.chunk.size);
|
||||
HDfprintf(stderr,"%s: chunk_addr=%a, chunk_size=%Zu\n",FUNC,chunk_addr,dset->shared->layout.u.chunk.size);
|
||||
HDfprintf(stderr,"%s: chunk_len_arr[%Zu]=%Zu\n",FUNC,*chunk_curr_seq,chunk_len_arr[*chunk_curr_seq]);
|
||||
HDfprintf(stderr,"%s: chunk_offset_arr[%Zu]=%Hu\n",FUNC,*chunk_curr_seq,chunk_offset_arr[*chunk_curr_seq]);
|
||||
HDfprintf(stderr,"%s: mem_len_arr[%Zu]=%Zu\n",FUNC,*mem_curr_seq,mem_len_arr[*mem_curr_seq]);
|
||||
@ -2092,14 +2095,14 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
|
||||
* writing to other elements in the same chunk. Do a direct
|
||||
* write-through of only the elements requested.
|
||||
*/
|
||||
if ((dset->layout.u.chunk.size>dset->cache.chunk.nbytes && dset->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF)
|
||||
if ((dset->shared->layout.u.chunk.size>dset->shared->cache.chunk.nbytes && dset->shared->dcpl_cache.pline.nused==0 && chunk_addr!=HADDR_UNDEF)
|
||||
|| (IS_H5FD_MPI(f) && (H5F_ACC_RDWR & H5F_get_intent(f)))) {
|
||||
#ifdef H5_HAVE_PARALLEL
|
||||
/* Additional sanity check when operating in parallel */
|
||||
if (chunk_addr==HADDR_UNDEF || dset->dcpl_cache.pline.nused>0)
|
||||
if (chunk_addr==HADDR_UNDEF || dset->shared->dcpl_cache.pline.nused>0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to locate raw data chunk");
|
||||
#endif /* H5_HAVE_PARALLEL */
|
||||
if ((ret_value=H5D_contig_writevv(f, dxpl_id, dset, chunk_addr, (hsize_t)dset->layout.u.chunk.size, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0)
|
||||
if ((ret_value=H5D_contig_writevv(f, dxpl_id, dset, chunk_addr, (hsize_t)dset->shared->layout.u.chunk.size, chunk_max_nseq, chunk_curr_seq, chunk_len_arr, chunk_offset_arr, mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr, buf))<0)
|
||||
HGOTO_ERROR (H5E_IO, H5E_WRITEERROR, FAIL, "unable to write raw data to file");
|
||||
} /* end if */
|
||||
else {
|
||||
@ -2119,7 +2122,7 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
|
||||
* be to lock the chunk in the dataset I/O routine (setting the relax flag
|
||||
* appropriately) and then unlock it after all the I/O the chunk was finished. -QAK
|
||||
*/
|
||||
if(chunk_max_nseq==1 && chunk_len_arr[0] == dset->layout.u.chunk.size)
|
||||
if(chunk_max_nseq==1 && chunk_len_arr[0] == dset->shared->layout.u.chunk.size)
|
||||
relax = TRUE;
|
||||
else
|
||||
relax = FALSE;
|
||||
@ -2128,13 +2131,13 @@ HDfprintf(stderr,"%s: mem_offset_arr[%Zu]=%Hu\n",FUNC,*mem_curr_seq,mem_offset_a
|
||||
total_bytes=0;
|
||||
for(u=*chunk_curr_seq; u<chunk_max_nseq; u++)
|
||||
total_bytes+=chunk_len_arr[u];
|
||||
if(total_bytes!=dset->layout.u.chunk.size)
|
||||
if(total_bytes!=dset->shared->layout.u.chunk.size)
|
||||
relax=FALSE;
|
||||
if(relax) {
|
||||
total_bytes=0;
|
||||
for(u=*mem_curr_seq; u<mem_max_nseq; u++)
|
||||
total_bytes+=mem_len_arr[u];
|
||||
if(total_bytes!=dset->layout.u.chunk.size)
|
||||
if(total_bytes!=dset->shared->layout.u.chunk.size)
|
||||
relax=FALSE;
|
||||
} /* end if */
|
||||
#endif /* OLD_WAY */
|
||||
@ -2234,7 +2237,7 @@ done:
|
||||
hsize_t
|
||||
H5D_istore_allocated(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
|
||||
{
|
||||
H5D_rdcc_t *rdcc = &(dset->cache.chunk); /*raw data chunk cache */
|
||||
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
|
||||
H5D_rdcc_ent_t *ent; /*cache entry */
|
||||
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
|
||||
H5D_dxpl_cache_t *dxpl_cache=&_dxpl_cache; /* Data transfer property cache */
|
||||
@ -2255,8 +2258,8 @@ H5D_istore_allocated(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
|
||||
} /* end for */
|
||||
|
||||
HDmemset(&udata, 0, sizeof udata);
|
||||
udata.mesg = &dset->layout;
|
||||
if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5D_istore_iter_allocated, dset->layout.u.chunk.addr, &udata)<0)
|
||||
udata.mesg = &dset->shared->layout;
|
||||
if (H5B_iterate(f, dxpl_id, H5B_ISTORE, H5D_istore_iter_allocated, dset->shared->layout.u.chunk.addr, &udata)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over chunk B-tree");
|
||||
|
||||
/* Set return value */
|
||||
@ -2482,18 +2485,18 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
|
||||
/* Check args */
|
||||
assert(f);
|
||||
assert(TRUE==H5P_isa_class(dxpl_id,H5P_DATASET_XFER));
|
||||
assert(dset && H5D_CHUNKED==dset->layout.type);
|
||||
assert(dset->layout.u.chunk.ndims>0 && dset->layout.u.chunk.ndims<=H5O_LAYOUT_NDIMS);
|
||||
assert(H5F_addr_defined(dset->layout.u.chunk.addr));
|
||||
assert(dset && H5D_CHUNKED==dset->shared->layout.type);
|
||||
assert(dset->shared->layout.u.chunk.ndims>0 && dset->shared->layout.u.chunk.ndims<=H5O_LAYOUT_NDIMS);
|
||||
assert(H5F_addr_defined(dset->shared->layout.u.chunk.addr));
|
||||
|
||||
/* Get dataset's creation property list */
|
||||
if (NULL == (dc_plist = H5I_object(dset->dcpl_id)))
|
||||
if (NULL == (dc_plist = H5I_object(dset->shared->dcpl_id)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list")
|
||||
|
||||
/* We only handle simple data spaces so far */
|
||||
if ((space_ndims=H5S_get_simple_extent_dims(dset->space, space_dim, NULL))<0)
|
||||
if ((space_ndims=H5S_get_simple_extent_dims(dset->shared->space, space_dim, NULL))<0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to get simple data space info")
|
||||
space_dim[space_ndims] = dset->layout.u.chunk.dim[space_ndims];
|
||||
space_dim[space_ndims] = dset->shared->layout.u.chunk.dim[space_ndims];
|
||||
|
||||
/* Get necessary properties from dataset creation property list */
|
||||
if(H5P_get(dc_plist, H5D_CRT_FILL_VALUE_NAME, &fill) < 0)
|
||||
@ -2531,9 +2534,9 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
|
||||
* Setup indice to go through all chunks. (Future improvement
|
||||
* should allocate only chunks that have no file space assigned yet.
|
||||
*/
|
||||
for (u=0; u<dset->layout.u.chunk.ndims; u++)
|
||||
for (u=0; u<dset->shared->layout.u.chunk.ndims; u++)
|
||||
chunk_offset[u] = 0;
|
||||
chunk_size = dset->layout.u.chunk.size;
|
||||
chunk_size = dset->shared->layout.u.chunk.size;
|
||||
|
||||
/* Check the dataset's fill-value status */
|
||||
if (H5P_is_fill_value_defined(&fill, &fill_status) < 0)
|
||||
@ -2589,8 +2592,8 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
|
||||
while (carry==0) {
|
||||
/* Check if the chunk exists yet on disk */
|
||||
chunk_exists=1;
|
||||
if(H5D_istore_get_addr(f,dxpl_id,&(dset->layout),chunk_offset, NULL)==HADDR_UNDEF) {
|
||||
const H5D_rdcc_t *rdcc = &(dset->cache.chunk); /*raw data chunk cache */
|
||||
if(H5D_istore_get_addr(f,dxpl_id,&(dset->shared->layout),chunk_offset, NULL)==HADDR_UNDEF) {
|
||||
const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
|
||||
H5D_rdcc_ent_t *ent = NULL; /*cache entry */
|
||||
|
||||
/* Didn't find the chunk on disk */
|
||||
@ -2600,7 +2603,7 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
|
||||
for(ent = rdcc->head; ent && !chunk_exists; ent = ent->next) {
|
||||
/* Assume a match */
|
||||
chunk_exists = 1;
|
||||
for(u = 0; u < dset->layout.u.chunk.ndims && chunk_exists; u++) {
|
||||
for(u = 0; u < dset->shared->layout.u.chunk.ndims && chunk_exists; u++) {
|
||||
if(ent->offset[u] != chunk_offset[u])
|
||||
chunk_exists = 0; /* Reset if no match */
|
||||
} /* end for */
|
||||
@ -2609,16 +2612,16 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
|
||||
|
||||
if(!chunk_exists) {
|
||||
/* Initialize the chunk information */
|
||||
udata.mesg = &dset->layout;
|
||||
udata.mesg = &dset->shared->layout;
|
||||
udata.key.filter_mask = filter_mask;
|
||||
udata.addr = HADDR_UNDEF;
|
||||
H5_CHECK_OVERFLOW(chunk_size,hsize_t,size_t);
|
||||
udata.key.nbytes = (size_t)chunk_size;
|
||||
for (u=0; u<dset->layout.u.chunk.ndims; u++)
|
||||
for (u=0; u<dset->shared->layout.u.chunk.ndims; u++)
|
||||
udata.key.offset[u] = chunk_offset[u];
|
||||
|
||||
/* Allocate the chunk with all processes */
|
||||
if (H5B_insert(f, dxpl_id, H5B_ISTORE, dset->layout.u.chunk.addr, &udata)<0)
|
||||
if (H5B_insert(f, dxpl_id, H5B_ISTORE, dset->shared->layout.u.chunk.addr, &udata)<0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to allocate chunk");
|
||||
|
||||
/* Check if fill values should be written to blocks */
|
||||
@ -2647,8 +2650,8 @@ H5D_istore_allocate(H5F_t *f, hid_t dxpl_id, const H5D_t *dset,
|
||||
} /* end if */
|
||||
|
||||
/* Increment indices */
|
||||
for (i=dset->layout.u.chunk.ndims-1, carry=1; i>=0 && carry; --i) {
|
||||
chunk_offset[i] += dset->layout.u.chunk.dim[i];
|
||||
for (i=dset->shared->layout.u.chunk.ndims-1, carry=1; i>=0 && carry; --i) {
|
||||
chunk_offset[i] += dset->shared->layout.u.chunk.dim[i];
|
||||
if (chunk_offset[i] >= (hssize_t)(space_dim[i]))
|
||||
chunk_offset[i] = 0;
|
||||
else
|
||||
@ -2782,7 +2785,7 @@ herr_t
|
||||
H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
|
||||
hid_t dxpl_id, H5D_t *dset)
|
||||
{
|
||||
H5D_rdcc_t *rdcc = &(dset->cache.chunk); /*raw data chunk cache */
|
||||
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
|
||||
H5D_rdcc_ent_t *ent = NULL, *next = NULL; /*cache entry */
|
||||
unsigned u; /*counters */
|
||||
int found; /*remove this entry */
|
||||
@ -2795,12 +2798,12 @@ H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
|
||||
/* Check args */
|
||||
assert(f);
|
||||
assert(dxpl_cache);
|
||||
assert(dset && H5D_CHUNKED == dset->layout.type);
|
||||
assert(dset->layout.u.chunk.ndims > 0 && dset->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
|
||||
assert(H5F_addr_defined(dset->layout.u.chunk.addr));
|
||||
assert(dset && H5D_CHUNKED == dset->shared->layout.type);
|
||||
assert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
|
||||
assert(H5F_addr_defined(dset->shared->layout.u.chunk.addr));
|
||||
|
||||
/* Go get the rank & dimensions */
|
||||
if(H5S_get_simple_extent_dims(dset->space, curr_dims, NULL) < 0)
|
||||
if(H5S_get_simple_extent_dims(dset->shared->space, curr_dims, NULL) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions");
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
@ -2812,7 +2815,7 @@ H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
|
||||
for(ent = rdcc->head; ent; ent = next) {
|
||||
next = ent->next;
|
||||
|
||||
for(u = 0; u < dset->layout.u.chunk.ndims - 1; u++) {
|
||||
for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++) {
|
||||
if((hsize_t)ent->offset[u] > curr_dims[u]) {
|
||||
found = 1;
|
||||
break;
|
||||
@ -2822,7 +2825,7 @@ H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
|
||||
if(found) {
|
||||
#ifdef H5D_ISTORE_DEBUG
|
||||
HDfputs("cache:remove:[", stderr);
|
||||
for(u = 0; u < dset->layout.u.chunk.ndims - 1; u++)
|
||||
for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++)
|
||||
HDfprintf(stderr, "%s%Hd", u ? ", " : "", ent->offset[u]);
|
||||
HDfputs("]\n", stderr);
|
||||
#endif
|
||||
@ -2842,10 +2845,10 @@ H5D_istore_prune_by_extent(H5F_t *f, const H5D_dxpl_cache_t *dxpl_cache,
|
||||
|
||||
HDmemset(&udata, 0, sizeof udata);
|
||||
udata.stream = stdout;
|
||||
udata.mesg = &dset->layout;
|
||||
udata.mesg = &dset->shared->layout;
|
||||
udata.dims = curr_dims;
|
||||
|
||||
if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5D_istore_prune_extent, dset->layout.u.chunk.addr, &udata) < 0)
|
||||
if(H5B_iterate(f, dxpl_id, H5B_ISTORE, H5D_istore_prune_extent, dset->shared->layout.u.chunk.addr, &udata) < 0)
|
||||
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, 0, "unable to iterate over B-tree");
|
||||
|
||||
done:
|
||||
@ -3012,12 +3015,12 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
|
||||
/* Check args */
|
||||
assert(f);
|
||||
assert(dxpl_cache);
|
||||
assert(dset && H5D_CHUNKED == dset->layout.type);
|
||||
assert(dset->layout.u.chunk.ndims > 0 && dset->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
|
||||
assert(H5F_addr_defined(dset->layout.u.chunk.addr));
|
||||
assert(dset && H5D_CHUNKED == dset->shared->layout.type);
|
||||
assert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
|
||||
assert(H5F_addr_defined(dset->shared->layout.u.chunk.addr));
|
||||
|
||||
/* Get dataset's creation property list */
|
||||
if (NULL == (dc_plist = H5I_object(dset->dcpl_id)))
|
||||
if (NULL == (dc_plist = H5I_object(dset->shared->dcpl_id)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset creation property list")
|
||||
|
||||
/* Get necessary properties from property list */
|
||||
@ -3033,7 +3036,7 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
|
||||
HDmemset(count, 0, sizeof(count));
|
||||
|
||||
/* Go get the rank & dimensions */
|
||||
if((srank = H5S_get_simple_extent_dims(dset->space, curr_dims, NULL)) < 0)
|
||||
if((srank = H5S_get_simple_extent_dims(dset->shared->space, curr_dims, NULL)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions");
|
||||
H5_ASSIGN_OVERFLOW(rank,srank,int,unsigned);
|
||||
|
||||
@ -3042,9 +3045,9 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
|
||||
size[u] = curr_dims[u];
|
||||
|
||||
/* Round up to the next integer # of chunks, to accomodate partial chunks */
|
||||
chunks[u] = ((curr_dims[u]+dset->layout.u.chunk.dim[u])-1) / dset->layout.u.chunk.dim[u];
|
||||
chunks[u] = ((curr_dims[u]+dset->shared->layout.u.chunk.dim[u])-1) / dset->shared->layout.u.chunk.dim[u];
|
||||
} /* end for */
|
||||
size[u] = dset->layout.u.chunk.dim[u];
|
||||
size[u] = dset->shared->layout.u.chunk.dim[u];
|
||||
|
||||
/* Get the "down" sizes for each dimension */
|
||||
if(H5V_array_down(rank,chunks,down_chunks)<0)
|
||||
@ -3052,7 +3055,7 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
|
||||
|
||||
/* Create a data space for a chunk & set the extent */
|
||||
for(u = 0; u < rank; u++)
|
||||
chunk_dims[u] = dset->layout.u.chunk.dim[u];
|
||||
chunk_dims[u] = dset->shared->layout.u.chunk.dim[u];
|
||||
if(NULL == (space_chunk = H5S_create_simple(rank,chunk_dims,NULL)))
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_CANTCREATE, FAIL, "can't create simple dataspace");
|
||||
|
||||
@ -3061,18 +3064,18 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
|
||||
* loop through the chunks copying each chunk from the application to the
|
||||
* chunk cache.
|
||||
*/
|
||||
for(u = 0; u < dset->layout.u.chunk.ndims; u++) {
|
||||
idx_max[u] = (size[u] - 1) / dset->layout.u.chunk.dim[u] + 1;
|
||||
for(u = 0; u < dset->shared->layout.u.chunk.ndims; u++) {
|
||||
idx_max[u] = (size[u] - 1) / dset->shared->layout.u.chunk.dim[u] + 1;
|
||||
idx_cur[u] = 0;
|
||||
} /* end for */
|
||||
|
||||
/* Loop over all chunks */
|
||||
carry=0;
|
||||
while(carry==0) {
|
||||
for(u = 0, naccessed = 1; u < dset->layout.u.chunk.ndims; u++) {
|
||||
for(u = 0, naccessed = 1; u < dset->shared->layout.u.chunk.ndims; u++) {
|
||||
/* The location and size of the chunk being accessed */
|
||||
chunk_offset[u] = idx_cur[u] * (hssize_t)(dset->layout.u.chunk.dim[u]);
|
||||
sub_size[u] = MIN((idx_cur[u] + 1) * dset->layout.u.chunk.dim[u],
|
||||
chunk_offset[u] = idx_cur[u] * (hssize_t)(dset->shared->layout.u.chunk.dim[u]);
|
||||
sub_size[u] = MIN((idx_cur[u] + 1) * dset->shared->layout.u.chunk.dim[u],
|
||||
size[u]) - chunk_offset[u];
|
||||
naccessed *= sub_size[u];
|
||||
} /* end for */
|
||||
@ -3081,8 +3084,8 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
|
||||
* Figure out what chunks have to be initialized. These are the chunks where the dataspace
|
||||
* extent boundary is within the chunk
|
||||
*/
|
||||
for(u = 0, found = 0; u < dset->layout.u.chunk.ndims - 1; u++) {
|
||||
end_chunk = chunk_offset[u] + dset->layout.u.chunk.dim[u];
|
||||
for(u = 0, found = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++) {
|
||||
end_chunk = chunk_offset[u] + dset->shared->layout.u.chunk.dim[u];
|
||||
if(end_chunk > size[u]) {
|
||||
found = 1;
|
||||
break;
|
||||
@ -3092,7 +3095,7 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
|
||||
if(found) {
|
||||
|
||||
/* Calculate the index of this chunk */
|
||||
if(H5V_chunk_index(rank,chunk_offset,dset->layout.u.chunk.dim,down_chunks,&store.chunk.index)<0)
|
||||
if(H5V_chunk_index(rank,chunk_offset,dset->shared->layout.u.chunk.dim,down_chunks,&store.chunk.index)<0)
|
||||
HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
|
||||
|
||||
store.chunk.offset=chunk_offset;
|
||||
@ -3104,15 +3107,15 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
|
||||
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "unable to select space");
|
||||
|
||||
for(u = 0; u < rank; u++)
|
||||
count[u] = MIN((idx_cur[u] + 1) * dset->layout.u.chunk.dim[u], size[u] - chunk_offset[u]);
|
||||
count[u] = MIN((idx_cur[u] + 1) * dset->shared->layout.u.chunk.dim[u], size[u] - chunk_offset[u]);
|
||||
|
||||
#ifdef H5D_ISTORE_DEBUG
|
||||
HDfputs("cache:initialize:offset:[", stdout);
|
||||
for(u = 0; u < dset->layout.u.chunk.ndims - 1; u++)
|
||||
for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++)
|
||||
HDfprintf(stdout, "%s%Hd", u ? ", " : "", chunk_offset[u]);
|
||||
HDfputs("]", stdout);
|
||||
HDfputs(":count:[", stdout);
|
||||
for(u = 0; u < dset->layout.u.chunk.ndims - 1; u++)
|
||||
for(u = 0; u < dset->shared->layout.u.chunk.ndims - 1; u++)
|
||||
HDfprintf(stdout, "%s%Hd", u ? ", " : "", count[u]);
|
||||
HDfputs("]\n", stdout);
|
||||
#endif
|
||||
@ -3135,7 +3138,7 @@ H5D_istore_initialize_by_extent(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_ca
|
||||
} /*found */
|
||||
|
||||
/* Increment indices */
|
||||
for(i = dset->layout.u.chunk.ndims - 1, carry = 1; i >= 0 && carry; --i) {
|
||||
for(i = dset->shared->layout.u.chunk.ndims - 1, carry = 1; i >= 0 && carry; --i) {
|
||||
if(++idx_cur[i] >= idx_max[i])
|
||||
idx_cur[i] = 0;
|
||||
else
|
||||
@ -3219,7 +3222,7 @@ done:
|
||||
herr_t
|
||||
H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
|
||||
{
|
||||
H5D_rdcc_t *rdcc = &(dset->cache.chunk); /*raw data chunk cache */
|
||||
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /*raw data chunk cache */
|
||||
H5D_rdcc_ent_t *ent, *next; /*cache entry */
|
||||
H5D_rdcc_ent_t *old_ent; /* Old cache entry */
|
||||
H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
|
||||
@ -3238,17 +3241,17 @@ H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
|
||||
|
||||
/* Check args */
|
||||
assert(f);
|
||||
assert(dset && H5D_CHUNKED == dset->layout.type);
|
||||
assert(dset->layout.u.chunk.ndims > 0 && dset->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
|
||||
assert(dset && H5D_CHUNKED == dset->shared->layout.type);
|
||||
assert(dset->shared->layout.u.chunk.ndims > 0 && dset->shared->layout.u.chunk.ndims <= H5O_LAYOUT_NDIMS);
|
||||
|
||||
/* Go get the rank & dimensions */
|
||||
if((srank = H5S_get_simple_extent_dims(dset->space, curr_dims, NULL)) < 0)
|
||||
if((srank = H5S_get_simple_extent_dims(dset->shared->space, curr_dims, NULL)) < 0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get dataset dimensions");
|
||||
H5_ASSIGN_OVERFLOW(rank,srank,int,unsigned);
|
||||
|
||||
/* Round up to the next integer # of chunks, to accomodate partial chunks */
|
||||
for(u = 0; u < rank; u++)
|
||||
chunks[u] = ((curr_dims[u]+dset->layout.u.chunk.dim[u])-1) / dset->layout.u.chunk.dim[u];
|
||||
chunks[u] = ((curr_dims[u]+dset->shared->layout.u.chunk.dim[u])-1) / dset->shared->layout.u.chunk.dim[u];
|
||||
|
||||
/* Get the "down" sizes for each dimension */
|
||||
if(H5V_array_down(rank,chunks,down_chunks)<0)
|
||||
@ -3263,12 +3266,12 @@ H5D_istore_update_cache(H5F_t *f, hid_t dxpl_id, H5D_t *dset)
|
||||
next=ent->next;
|
||||
|
||||
/* Calculate the index of this chunk */
|
||||
if(H5V_chunk_index(rank,ent->offset,dset->layout.u.chunk.dim,down_chunks,&idx)<0)
|
||||
if(H5V_chunk_index(rank,ent->offset,dset->shared->layout.u.chunk.dim,down_chunks,&idx)<0)
|
||||
HGOTO_ERROR (H5E_DATASPACE, H5E_BADRANGE, FAIL, "can't get chunk index")
|
||||
|
||||
/* Compute the index for the chunk entry */
|
||||
old_idx=ent->idx; /* Save for later */
|
||||
ent->idx=H5D_HASH(dset,idx);
|
||||
ent->idx=H5D_HASH(dset->shared,idx);
|
||||
|
||||
if(old_idx!=ent->idx) {
|
||||
/* Check if there is already a chunk at this chunk's new location */
|
||||
@ -3359,7 +3362,7 @@ done:
|
||||
herr_t
|
||||
H5D_istore_stats (H5D_t *dset, hbool_t headers)
|
||||
{
|
||||
H5D_rdcc_t *rdcc = &(dset->cache.chunk);
|
||||
H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk);
|
||||
double miss_rate;
|
||||
char ascii[32];
|
||||
herr_t ret_value=SUCCEED; /* Return value */
|
||||
|
@ -139,13 +139,13 @@ H5D_mpio_spaces_xfer(H5F_t *f, const H5D_t *dset, size_t elmt_size,
|
||||
HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't create MPI file type");
|
||||
|
||||
/* Get the base address of the contiguous dataset or the chunk */
|
||||
if(dset->layout.type == H5D_CONTIGUOUS)
|
||||
if(dset->shared->layout.type == H5D_CONTIGUOUS)
|
||||
addr = H5D_contig_get_addr(dset) + mpi_file_offset;
|
||||
else {
|
||||
haddr_t chunk_addr; /* for collective chunk IO */
|
||||
|
||||
assert(dset->layout.type == H5D_CHUNKED);
|
||||
chunk_addr=H5D_istore_get_addr(f,dxpl_id,&(dset->layout),store->chunk.offset,NULL);
|
||||
assert(dset->shared->layout.type == H5D_CHUNKED);
|
||||
chunk_addr=H5D_istore_get_addr(f,dxpl_id,&(dset->shared->layout),store->chunk.offset,NULL);
|
||||
addr = H5F_BASE_ADDR(f) + chunk_addr + mpi_file_offset;
|
||||
}
|
||||
|
||||
|
14
src/H5Dpkg.h
14
src/H5Dpkg.h
@ -74,10 +74,13 @@ typedef struct H5D_rdcdc_t {
|
||||
} H5D_rdcdc_t;
|
||||
|
||||
/*
|
||||
* A dataset is the following struct.
|
||||
* A dataset is made of two layers, an H5D_t struct that is unique to
|
||||
* each instance of an opened datset, and a shared struct that is only
|
||||
* created once for a given dataset. Thus, if a dataset is opened twice,
|
||||
* there will be two IDs and two H5D_t structs, both sharing one H5D_shared_t.
|
||||
*/
|
||||
struct H5D_t {
|
||||
H5G_entry_t ent; /* cached object header stuff */
|
||||
typedef struct H5D_shared_t {
|
||||
size_t fo_count; /* reference count */
|
||||
hid_t type_id; /* ID for dataset's datatype */
|
||||
H5T_t *type; /* datatype of this dataset */
|
||||
H5S_t *space; /* dataspace of this dataset */
|
||||
@ -101,6 +104,11 @@ struct H5D_t {
|
||||
*/
|
||||
H5D_rdcc_t chunk; /* Information about chunked data */
|
||||
}cache;
|
||||
} H5D_shared_t;
|
||||
|
||||
struct H5D_t {
|
||||
H5G_entry_t ent; /* cached object header stuff */
|
||||
H5D_shared_t *shared; /* cached information from file */
|
||||
};
|
||||
|
||||
/* Enumerated type for allocating dataset's storage */
|
||||
|
@ -201,7 +201,8 @@ typedef struct H5D_dcpl_cache_t {
|
||||
|
||||
/* Library-private functions defined in H5D package */
|
||||
H5_DLL herr_t H5D_init(void);
|
||||
H5_DLL hid_t H5D_open(H5G_entry_t *ent, hid_t dxpl_id);
|
||||
H5_DLL H5D_t *H5D_open(H5G_entry_t *ent, hid_t dxpl_id);
|
||||
H5_DLL herr_t H5D_close(H5D_t *dataset);
|
||||
H5_DLL htri_t H5D_isa(H5G_entry_t *ent, hid_t dxpl_id);
|
||||
H5_DLL H5G_entry_t *H5D_entof(H5D_t *dataset);
|
||||
H5_DLL H5T_t *H5D_typeof(const H5D_t *dset);
|
||||
|
@ -107,7 +107,7 @@ H5D_seq_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_i
|
||||
assert(mem_offset_arr);
|
||||
assert(buf);
|
||||
|
||||
switch (dset->layout.type) {
|
||||
switch (dset->shared->layout.type) {
|
||||
case H5D_CONTIGUOUS:
|
||||
/* Read directly from file if the dataset is in an external file */
|
||||
if (store && store->efl.nused>0) {
|
||||
@ -124,7 +124,7 @@ H5D_seq_readvv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache, hid_t dxpl_i
|
||||
} else {
|
||||
/* Pass along the vector of sequences to read */
|
||||
if((ret_value=H5D_contig_readvv(f, dxpl_id, dset,
|
||||
dset->layout.u.contig.addr, dset->layout.u.contig.size,
|
||||
dset->shared->layout.u.contig.addr, dset->shared->layout.u.contig.size,
|
||||
dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
|
||||
mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
|
||||
buf))<0)
|
||||
@ -215,7 +215,7 @@ H5D_seq_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
|
||||
assert(mem_offset_arr);
|
||||
assert(buf);
|
||||
|
||||
switch (dset->layout.type) {
|
||||
switch (dset->shared->layout.type) {
|
||||
case H5D_CONTIGUOUS:
|
||||
/* Write directly to file if the dataset is in an external file */
|
||||
if (store && store->efl.nused>0) {
|
||||
@ -232,7 +232,7 @@ H5D_seq_writevv(H5F_t *f, const struct H5D_dxpl_cache_t *dxpl_cache,
|
||||
} else {
|
||||
/* Pass along the vector of sequences to write */
|
||||
if ((ret_value=H5D_contig_writevv(f, dxpl_id, dset,
|
||||
dset->layout.u.contig.addr, dset->layout.u.contig.size,
|
||||
dset->shared->layout.u.contig.addr, dset->shared->layout.u.contig.size,
|
||||
dset_max_nseq, dset_curr_seq, dset_len_arr, dset_offset_arr,
|
||||
mem_max_nseq, mem_curr_seq, mem_len_arr, mem_offset_arr,
|
||||
buf))<0)
|
||||
|
@ -63,7 +63,7 @@ H5D_layout_version_test(hid_t did, unsigned *version)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
|
||||
|
||||
if(version)
|
||||
*version=dset->layout.version;
|
||||
*version=dset->shared->layout.version;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value);
|
||||
@ -102,8 +102,8 @@ H5D_layout_contig_size_test(hid_t did, hsize_t *size)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
|
||||
|
||||
if(size) {
|
||||
assert(dset->layout.type==H5D_CONTIGUOUS);
|
||||
*size=dset->layout.u.contig.size;
|
||||
assert(dset->shared->layout.type==H5D_CONTIGUOUS);
|
||||
*size=dset->shared->layout.u.contig.size;
|
||||
} /* end if */
|
||||
|
||||
done:
|
||||
|
@ -20,7 +20,6 @@
|
||||
#define _H5Edefin_H
|
||||
|
||||
/* Major error IDs */
|
||||
hid_t H5E_NONE_MAJOR_g = FAIL; /* No error */
|
||||
hid_t H5E_DATASET_g = FAIL; /* Dataset */
|
||||
hid_t H5E_FUNC_g = FAIL; /* Function entry/exit */
|
||||
hid_t H5E_STORAGE_g = FAIL; /* Data storage */
|
||||
@ -51,8 +50,6 @@ hid_t H5E_CACHE_g = FAIL; /* Object cache */
|
||||
|
||||
/* Minor error IDs */
|
||||
|
||||
hid_t H5E_NONE_MINOR_g = FAIL; /* No error */
|
||||
|
||||
/* Generic low-level file I/O errors */
|
||||
hid_t H5E_SEEKERROR_g = FAIL; /* Seek failed */
|
||||
hid_t H5E_READERROR_g = FAIL; /* Read failed */
|
||||
|
@ -22,11 +22,7 @@
|
||||
/*********************/
|
||||
/* Major error codes */
|
||||
/*********************/
|
||||
assert(H5E_NONE_MAJOR_g==(-1));
|
||||
if((msg = H5E_create_msg(cls, H5E_MAJOR, "No Error"))==NULL)
|
||||
HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
|
||||
if((H5E_NONE_MAJOR_g = H5I_register(H5I_ERROR_MSG, msg))<0)
|
||||
HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
|
||||
|
||||
assert(H5E_DATASET_g==(-1));
|
||||
if((msg = H5E_create_msg(cls, H5E_MAJOR, "Dataset"))==NULL)
|
||||
HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
|
||||
@ -166,12 +162,7 @@ if((H5E_CACHE_g = H5I_register(H5I_ERROR_MSG, msg))<0)
|
||||
/*********************/
|
||||
/* Minor error codes */
|
||||
/*********************/
|
||||
/* No error */
|
||||
assert(H5E_NONE_MINOR_g==(-1));
|
||||
if((msg = H5E_create_msg(cls, H5E_MINOR, "Read failed"))==NULL)
|
||||
HGOTO_ERROR(H5E_ERROR, H5E_CANTINIT, FAIL, "error message initialization failed")
|
||||
if((H5E_NONE_MINOR_g = H5I_register(H5I_ERROR_MSG, msg))<0)
|
||||
HGOTO_ERROR(H5E_ERROR, H5E_CANTREGISTER, FAIL, "can't register error message")
|
||||
|
||||
|
||||
/* Generic low-level file I/O errors */
|
||||
assert(H5E_SEEKERROR_g==(-1));
|
||||
|
@ -23,7 +23,6 @@
|
||||
/* Major error codes */
|
||||
/*********************/
|
||||
|
||||
#define H5E_NONE_MAJOR (H5OPEN H5E_NONE_MAJOR_g)
|
||||
#define H5E_DATASET (H5OPEN H5E_DATASET_g)
|
||||
#define H5E_FUNC (H5OPEN H5E_FUNC_g)
|
||||
#define H5E_STORAGE (H5OPEN H5E_STORAGE_g)
|
||||
@ -51,7 +50,6 @@
|
||||
#define H5E_ERROR (H5OPEN H5E_ERROR_g)
|
||||
#define H5E_PLINE (H5OPEN H5E_PLINE_g)
|
||||
#define H5E_CACHE (H5OPEN H5E_CACHE_g)
|
||||
H5_DLLVAR hid_t H5E_NONE_MAJOR_g; /* No error */
|
||||
H5_DLLVAR hid_t H5E_DATASET_g; /* Dataset */
|
||||
H5_DLLVAR hid_t H5E_FUNC_g; /* Function entry/exit */
|
||||
H5_DLLVAR hid_t H5E_STORAGE_g; /* Data storage */
|
||||
@ -83,9 +81,6 @@ H5_DLLVAR hid_t H5E_CACHE_g; /* Object cache */
|
||||
/*********************/
|
||||
/* Minor error codes */
|
||||
/*********************/
|
||||
/* No error */
|
||||
#define H5E_NONE_MINOR (H5OPEN H5E_NONE_MINOR_g)
|
||||
H5_DLLVAR hid_t H5E_NONE_MINOR_g; /* No error */
|
||||
|
||||
/* Generic low-level file I/O errors */
|
||||
#define H5E_SEEKERROR (H5OPEN H5E_SEEKERROR_g)
|
||||
|
@ -20,7 +20,7 @@
|
||||
#define _H5Eterm_H
|
||||
|
||||
/* Reset major error IDs */
|
||||
H5E_NONE_MAJOR_g=
|
||||
|
||||
H5E_DATASET_g=
|
||||
H5E_FUNC_g=
|
||||
H5E_STORAGE_g=
|
||||
@ -51,8 +51,6 @@ H5E_CACHE_g= (-1);
|
||||
|
||||
/* Reset minor error IDs */
|
||||
|
||||
/* No error */
|
||||
H5E_NONE_MINOR_g=
|
||||
|
||||
/* Generic low-level file I/O errors */
|
||||
H5E_SEEKERROR_g=
|
||||
|
58
src/H5F.c
58
src/H5F.c
@ -1564,7 +1564,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id)
|
||||
} /* end if */
|
||||
|
||||
/* Free the memory for the root group */
|
||||
H5FL_FREE(H5G_t,f->shared->root_grp);
|
||||
H5G_free(f->shared->root_grp);
|
||||
f->shared->root_grp=NULL;
|
||||
}
|
||||
if (H5AC_dest(f, dxpl_id)) {
|
||||
@ -3401,6 +3401,7 @@ H5F_mount(H5G_entry_t *loc, const char *name, H5F_t *child,
|
||||
unsigned lt, rt, md; /*binary search indices */
|
||||
int cmp; /*binary search comparison value*/
|
||||
H5G_entry_t *ent = NULL; /*temporary symbol table entry */
|
||||
H5G_entry_t mp_open_ent; /* entry of moint point to be opened */
|
||||
H5RS_str_t *name_r; /* Ref-counted version of name */
|
||||
herr_t ret_value = SUCCEED; /*return value */
|
||||
|
||||
@ -3416,15 +3417,17 @@ H5F_mount(H5G_entry_t *loc, const char *name, H5F_t *child,
|
||||
* that the mount wouldn't introduce a cycle in the mount tree.
|
||||
*/
|
||||
if (child->mtab.parent)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_MOUNT, FAIL, "file is already mounted")
|
||||
if (NULL==(mount_point=H5G_open(loc, name, dxpl_id)))
|
||||
HGOTO_ERROR(H5E_FILE, H5E_MOUNT, FAIL, "mount point not found")
|
||||
HGOTO_ERROR(H5E_FILE, H5E_MOUNT, FAIL, "file is already mounted")
|
||||
if (H5G_find(loc, name, NULL, &mp_open_ent/*out*/, H5AC_dxpl_id) < 0)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "group not found");
|
||||
if (NULL==(mount_point=H5G_open(&mp_open_ent, dxpl_id)))
|
||||
HGOTO_ERROR(H5E_FILE, H5E_MOUNT, FAIL, "mount point not found")
|
||||
|
||||
parent = H5G_fileof(mount_point);
|
||||
mp_ent = H5G_entof(mount_point);
|
||||
for (ancestor=parent; ancestor; ancestor=ancestor->mtab.parent) {
|
||||
if (ancestor==child)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_MOUNT, FAIL, "mount would introduce a cycle")
|
||||
if (ancestor==child)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_MOUNT, FAIL, "mount would introduce a cycle")
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3436,29 +3439,29 @@ H5F_mount(H5G_entry_t *loc, const char *name, H5F_t *child,
|
||||
rt=parent->mtab.nmounts;
|
||||
cmp = -1;
|
||||
while (lt<rt && cmp) {
|
||||
md = (lt+rt)/2;
|
||||
ent = H5G_entof(parent->mtab.child[md].group);
|
||||
cmp = H5F_addr_cmp(mp_ent->header, ent->header);
|
||||
if (cmp<0) {
|
||||
rt = md;
|
||||
} else if (cmp>0) {
|
||||
lt = md+1;
|
||||
}
|
||||
md = (lt+rt)/2;
|
||||
ent = H5G_entof(parent->mtab.child[md].group);
|
||||
cmp = H5F_addr_cmp(mp_ent->header, ent->header);
|
||||
if (cmp<0) {
|
||||
rt = md;
|
||||
} else if (cmp>0) {
|
||||
lt = md+1;
|
||||
}
|
||||
}
|
||||
if (cmp>0)
|
||||
md++;
|
||||
if (!cmp)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_MOUNT, FAIL, "mount point is already in use")
|
||||
HGOTO_ERROR(H5E_FILE, H5E_MOUNT, FAIL, "mount point is already in use")
|
||||
|
||||
/* Make room in the table */
|
||||
if (parent->mtab.nmounts>=parent->mtab.nalloc) {
|
||||
unsigned n = MAX(16, 2*parent->mtab.nalloc);
|
||||
H5F_mount_t *x = H5MM_realloc(parent->mtab.child,
|
||||
unsigned n = MAX(16, 2*parent->mtab.nalloc);
|
||||
H5F_mount_t *x = H5MM_realloc(parent->mtab.child,
|
||||
n*sizeof(parent->mtab.child[0]));
|
||||
if (!x)
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for mount table")
|
||||
parent->mtab.child = x;
|
||||
parent->mtab.nalloc = n;
|
||||
if (!x)
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for mount table")
|
||||
parent->mtab.child = x;
|
||||
parent->mtab.nalloc = n;
|
||||
}
|
||||
|
||||
/* Insert into table */
|
||||
@ -3475,9 +3478,9 @@ H5F_mount(H5G_entry_t *loc, const char *name, H5F_t *child,
|
||||
name_r=H5RS_wrap(name);
|
||||
assert(name_r);
|
||||
if (H5G_replace_name( H5G_UNKNOWN, loc, name_r, NULL, NULL, NULL, OP_MOUNT )<0)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_MOUNT, FAIL, "unable to replace name")
|
||||
HGOTO_ERROR(H5E_FILE, H5E_MOUNT, FAIL, "unable to replace name")
|
||||
if(H5RS_decr(name_r)<0)
|
||||
HGOTO_ERROR(H5E_FILE, H5E_CANTDEC, FAIL, "unable to decrement name string")
|
||||
HGOTO_ERROR(H5E_FILE, H5E_CANTDEC, FAIL, "unable to decrement name string")
|
||||
|
||||
done:
|
||||
if (ret_value<0 && mount_point)
|
||||
@ -3522,6 +3525,7 @@ H5F_unmount(H5G_entry_t *loc, const char *name, hid_t dxpl_id)
|
||||
H5F_t *child = NULL; /*mounted file */
|
||||
H5F_t *parent = NULL; /*file where mounted */
|
||||
H5G_entry_t *ent = NULL; /*temporary symbol table entry */
|
||||
H5G_entry_t mnt_open_ent; /* entry used to open mount point*/
|
||||
herr_t ret_value = FAIL; /*return value */
|
||||
unsigned i; /*coutners */
|
||||
unsigned lt, rt, md=0; /*binary search indices */
|
||||
@ -3537,14 +3541,16 @@ H5F_unmount(H5G_entry_t *loc, const char *name, hid_t dxpl_id)
|
||||
* If we get the root group and the file has a parent in the mount tree,
|
||||
* then we must have found the mount point.
|
||||
*/
|
||||
if (NULL==(mounted=H5G_open(loc, name, dxpl_id)))
|
||||
HGOTO_ERROR(H5E_FILE, H5E_MOUNT, FAIL, "mount point not found")
|
||||
if (H5G_find(loc, name, NULL, &mnt_open_ent/*out*/, H5AC_dxpl_id) < 0)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "group not found");
|
||||
if (NULL==(mounted=H5G_open(&mnt_open_ent, dxpl_id)))
|
||||
HGOTO_ERROR(H5E_FILE, H5E_MOUNT, FAIL, "mount point not found")
|
||||
child = H5G_fileof(mounted);
|
||||
mnt_ent = H5G_entof(mounted);
|
||||
ent = H5G_entof(child->shared->root_grp);
|
||||
|
||||
if (child->mtab.parent &&
|
||||
H5F_addr_eq(mnt_ent->header, ent->header)) {
|
||||
H5F_addr_eq(mnt_ent->header, ent->header)) {
|
||||
/*
|
||||
* We've been given the root group of the child. We do a reverse
|
||||
* lookup in the parent's mount table to find the correct entry.
|
||||
|
26
src/H5FO.c
26
src/H5FO.c
@ -38,7 +38,7 @@
|
||||
typedef struct H5FO_open_obj_t {
|
||||
haddr_t addr; /* Address of object header for object */
|
||||
/* THIS MUST BE FIRST FOR TBBT ROUTINES */
|
||||
hid_t id; /* Current ID for object */
|
||||
void *obj; /* Pointer to the object */
|
||||
hbool_t deleted; /* Flag to indicate that the object was deleted from the file */
|
||||
} H5FO_open_obj_t;
|
||||
|
||||
@ -90,12 +90,12 @@ done:
|
||||
PURPOSE
|
||||
Checks if an object at an address is already open in the file.
|
||||
USAGE
|
||||
hid_t H5FO_opened(f,addr)
|
||||
void * H5FO_opened(f,addr)
|
||||
const H5F_t *f; IN: File to check opened object info set
|
||||
haddr_t addr; IN: Address of object to check
|
||||
|
||||
RETURNS
|
||||
Returns a non-negative ID for the object on success, negative on failure
|
||||
Returns a pointer to the object on success, NULL on failure
|
||||
DESCRIPTION
|
||||
Check is an object at an address (the address of the object's object header)
|
||||
is already open in the file and return the ID for that object if it is open.
|
||||
@ -104,12 +104,12 @@ done:
|
||||
EXAMPLES
|
||||
REVISION LOG
|
||||
--------------------------------------------------------------------------*/
|
||||
hid_t
|
||||
void *
|
||||
H5FO_opened(const H5F_t *f, haddr_t addr)
|
||||
{
|
||||
H5TB_NODE *obj_node; /* TBBT node holding open object */
|
||||
H5FO_open_obj_t *open_obj; /* Information about open object */
|
||||
hid_t ret_value; /* Return value */
|
||||
void *ret_value; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI_NOFUNC(H5FO_opened)
|
||||
|
||||
@ -123,11 +123,11 @@ H5FO_opened(const H5F_t *f, haddr_t addr)
|
||||
if((obj_node=H5TB_dfind(f->shared->open_objs,&addr,NULL))!=NULL) {
|
||||
open_obj=H5TB_NODE_DATA(obj_node);
|
||||
assert(open_obj);
|
||||
ret_value=open_obj->id;
|
||||
ret_value=open_obj->obj;
|
||||
assert(ret_value>0);
|
||||
} /* end if */
|
||||
else
|
||||
ret_value=FAIL;
|
||||
ret_value=NULL;
|
||||
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
} /* end H5FO_opened() */
|
||||
@ -137,12 +137,12 @@ H5FO_opened(const H5F_t *f, haddr_t addr)
|
||||
NAME
|
||||
H5FO_insert
|
||||
PURPOSE
|
||||
Insert a newly opened object/ID pair into the opened object info set
|
||||
Insert a newly opened object/pointer pair into the opened object info set
|
||||
USAGE
|
||||
herr_t H5FO_insert(f,addr,id)
|
||||
herr_t H5FO_insert(f,addr,obj)
|
||||
H5F_t *f; IN/OUT: File's opened object info set
|
||||
haddr_t addr; IN: Address of object to insert
|
||||
hid_t id; IN: ID of object to insert
|
||||
void *obj; IN: Pointer to object to insert
|
||||
int type; IN: Type of object being inserted
|
||||
|
||||
RETURNS
|
||||
@ -155,7 +155,7 @@ H5FO_opened(const H5F_t *f, haddr_t addr)
|
||||
REVISION LOG
|
||||
--------------------------------------------------------------------------*/
|
||||
herr_t
|
||||
H5FO_insert(const H5F_t *f, haddr_t addr, hid_t id)
|
||||
H5FO_insert(const H5F_t *f, haddr_t addr, void *obj)
|
||||
{
|
||||
H5FO_open_obj_t *open_obj; /* Information about open object */
|
||||
herr_t ret_value=SUCCEED; /* Return value */
|
||||
@ -167,7 +167,7 @@ H5FO_insert(const H5F_t *f, haddr_t addr, hid_t id)
|
||||
assert(f->shared);
|
||||
assert(f->shared->open_objs);
|
||||
assert(H5F_addr_defined(addr));
|
||||
assert(id>0);
|
||||
assert(obj);
|
||||
|
||||
/* Allocate new opened object information structure */
|
||||
if((open_obj=H5FL_MALLOC(H5FO_open_obj_t))==NULL)
|
||||
@ -175,7 +175,7 @@ H5FO_insert(const H5F_t *f, haddr_t addr, hid_t id)
|
||||
|
||||
/* Assign information */
|
||||
open_obj->addr=addr;
|
||||
open_obj->id=id;
|
||||
open_obj->obj=obj;
|
||||
open_obj->deleted=0;
|
||||
|
||||
/* Insert into TBBT */
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
/* Private headers needed by this file */
|
||||
#include "H5private.h"
|
||||
#include "H5Fprivate.h"
|
||||
#include "H5TBprivate.h" /* TBBTs */
|
||||
|
||||
/* Typedefs */
|
||||
@ -35,8 +36,8 @@ typedef H5TB_TREE H5FO_t; /* Currently, all open objects are stored in TBB
|
||||
|
||||
/* Private routines */
|
||||
H5_DLL herr_t H5FO_create(const H5F_t *f);
|
||||
H5_DLL hid_t H5FO_opened(const H5F_t *f, haddr_t addr);
|
||||
H5_DLL herr_t H5FO_insert(const H5F_t *f, haddr_t addr, hid_t id);
|
||||
H5_DLL void *H5FO_opened(const H5F_t *f, haddr_t addr);
|
||||
H5_DLL herr_t H5FO_insert(const H5F_t *f, haddr_t addr, void *obj);
|
||||
H5_DLL herr_t H5FO_delete(H5F_t *f, hid_t dxpl_id, haddr_t addr);
|
||||
H5_DLL herr_t H5FO_mark(const H5F_t *f, haddr_t addr, hbool_t deleted);
|
||||
H5_DLL htri_t H5FO_marked(const H5F_t *f, haddr_t addr);
|
||||
|
190
src/H5G.c
190
src/H5G.c
@ -162,6 +162,7 @@ static size_t H5G_comp_alloc_g = 0; /*sizeof component buffer */
|
||||
|
||||
/* Declare a free list to manage the H5G_t struct */
|
||||
H5FL_DEFINE(H5G_t);
|
||||
H5FL_DEFINE(H5G_shared_t);
|
||||
|
||||
/* Declare extern the PQ free list for the wrapped strings */
|
||||
H5FL_BLK_EXTERN(str_buf);
|
||||
@ -180,6 +181,7 @@ static herr_t H5G_linkval(H5G_entry_t *loc, const char *name, size_t size,
|
||||
char *buf/*out*/, hid_t dxpl_id);
|
||||
static herr_t H5G_move(H5G_entry_t *src_loc, const char *src_name,
|
||||
H5G_entry_t *dst_loc, const char *dst_name, hid_t dxpl_it);
|
||||
static H5G_t * H5G_open_oid(H5G_entry_t *ent, hid_t dxpl_id);
|
||||
static herr_t H5G_unlink(H5G_entry_t *loc, const char *name, hid_t dxpl_id);
|
||||
static herr_t H5G_get_num_objs(H5G_entry_t *grp, hsize_t *num_objs, hid_t dxpl_id);
|
||||
static ssize_t H5G_get_objname_by_idx(H5G_entry_t *loc, hsize_t idx, char* name, size_t size, hid_t dxpl_id);
|
||||
@ -279,25 +281,31 @@ done:
|
||||
hid_t
|
||||
H5Gopen(hid_t loc_id, const char *name)
|
||||
{
|
||||
hid_t ret_value = FAIL;
|
||||
H5G_t *grp = NULL;
|
||||
hid_t ret_value = FAIL;
|
||||
H5G_t *grp = NULL;
|
||||
H5G_entry_t *loc = NULL;
|
||||
H5G_entry_t ent;
|
||||
hid_t dxpl_id = H5AC_dxpl_id; /* dxpl to use to open group */
|
||||
|
||||
FUNC_ENTER_API(H5Gopen, FAIL);
|
||||
H5TRACE2("i","is",loc_id,name);
|
||||
|
||||
/* Check args */
|
||||
if (NULL==(loc=H5G_loc(loc_id)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a location");
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a location");
|
||||
if (!name || !*name)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no name");
|
||||
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no name");
|
||||
|
||||
/* Open the parent group, making sure it's a group */
|
||||
if (H5G_find(loc, name, NULL, &ent/*out*/, dxpl_id) < 0)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "group not found");
|
||||
|
||||
/* Open the group */
|
||||
if (NULL == (grp = H5G_open(loc, name, H5AC_dxpl_id)))
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open group");
|
||||
if ((grp = H5G_open(&ent, H5AC_dxpl_id)) <0)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open group");
|
||||
/* Register an atom for the group */
|
||||
if ((ret_value = H5I_register(H5I_GROUP, grp)) < 0)
|
||||
HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to register group");
|
||||
HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to register group");
|
||||
|
||||
done:
|
||||
if(ret_value<0) {
|
||||
@ -381,7 +389,6 @@ H5Giterate(hid_t loc_id, const char *name, int *idx_p,
|
||||
{
|
||||
int idx;
|
||||
H5G_bt_ud2_t udata;
|
||||
H5G_entry_t *loc = NULL;
|
||||
H5G_t *grp = NULL;
|
||||
herr_t ret_value;
|
||||
|
||||
@ -389,27 +396,25 @@ H5Giterate(hid_t loc_id, const char *name, int *idx_p,
|
||||
H5TRACE5("e","is*Isxx",loc_id,name,idx_p,op,op_data);
|
||||
|
||||
/* Check args */
|
||||
if (NULL==(loc=H5G_loc (loc_id)))
|
||||
HGOTO_ERROR (H5E_ARGS, H5E_BADTYPE, FAIL, "not a location");
|
||||
if (!name || !*name)
|
||||
HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "no name specified");
|
||||
idx = (idx_p == NULL ? 0 : *idx_p);
|
||||
if (!idx_p)
|
||||
idx_p = &idx;
|
||||
if (idx<0)
|
||||
HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "invalid index specified");
|
||||
HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "invalid index specified");
|
||||
if (!op)
|
||||
HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "no operator specified");
|
||||
HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "no operator specified");
|
||||
|
||||
/*
|
||||
* Open the group on which to operate. We also create a group ID which
|
||||
* we can pass to the application-defined operator.
|
||||
*/
|
||||
if (NULL==(grp = H5G_open (loc, name, H5AC_dxpl_id)))
|
||||
HGOTO_ERROR (H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open group");
|
||||
if ((udata.group_id=H5I_register (H5I_GROUP, grp))<0) {
|
||||
H5G_close(grp);
|
||||
HGOTO_ERROR (H5E_SYM, H5E_CANTREGISTER, FAIL, "unable to register group");
|
||||
if ((udata.group_id = H5Gopen (loc_id, name)) <0)
|
||||
HGOTO_ERROR (H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open group");
|
||||
if ((grp=H5I_object(udata.group_id))==NULL) {
|
||||
H5Gclose(udata.group_id);
|
||||
HGOTO_ERROR (H5E_ATOM, H5E_BADATOM, FAIL, "bad group atom");
|
||||
}
|
||||
|
||||
/* Build udata to pass through H5B_iterate() to H5G_node_iterate() */
|
||||
@ -1728,9 +1733,13 @@ H5G_mkroot (H5F_t *f, hid_t dxpl_id, H5G_entry_t *ent)
|
||||
* never be closed.
|
||||
*/
|
||||
if (NULL==(f->shared->root_grp = H5FL_CALLOC (H5G_t)))
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed");
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed");
|
||||
if (NULL==(f->shared->root_grp->shared = H5FL_CALLOC (H5G_shared_t))) {
|
||||
H5FL_FREE(H5G_t, f->shared->root_grp);
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed");
|
||||
}
|
||||
f->shared->root_grp->ent = *ent;
|
||||
f->shared->root_grp->nref = 1;
|
||||
f->shared->root_grp->shared->fo_count = 1;
|
||||
assert (1==f->nopen_objs);
|
||||
f->nopen_objs = 0;
|
||||
|
||||
@ -1769,7 +1778,7 @@ H5G_create(H5G_entry_t *loc, const char *name, size_t size_hint, hid_t dxpl_id)
|
||||
{
|
||||
H5G_t *grp = NULL; /*new group */
|
||||
H5F_t *file = NULL; /* File new group will be in */
|
||||
unsigned stab_init=0; /* Flag to indicate that the symbol stable was created successfully */
|
||||
unsigned stab_init=0; /* Flag to indicate that the symbol table was created successfully */
|
||||
H5G_t *ret_value; /* Return value */
|
||||
|
||||
FUNC_ENTER_NOAPI(H5G_create, NULL);
|
||||
@ -1780,7 +1789,10 @@ H5G_create(H5G_entry_t *loc, const char *name, size_t size_hint, hid_t dxpl_id)
|
||||
|
||||
/* create an open group */
|
||||
if (NULL==(grp = H5FL_CALLOC(H5G_t)))
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
|
||||
if (NULL==(grp->shared = H5FL_CALLOC(H5G_t)))
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
|
||||
|
||||
|
||||
/* What file is the group being added to? */
|
||||
if (NULL==(file=H5G_insertion_file(loc, name, dxpl_id)))
|
||||
@ -1788,14 +1800,18 @@ H5G_create(H5G_entry_t *loc, const char *name, size_t size_hint, hid_t dxpl_id)
|
||||
|
||||
/* Create the group entry */
|
||||
if (H5G_stab_create(file, dxpl_id, size_hint, &(grp->ent)/*out*/) < 0)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, NULL, "can't create grp");
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, NULL, "can't create grp");
|
||||
stab_init=1; /* Indicate that the symbol table information is valid */
|
||||
|
||||
/* insert child name into parent */
|
||||
if(H5G_insert(loc,name,&(grp->ent), dxpl_id)<0)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, NULL, "can't insert group");
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, NULL, "can't insert group");
|
||||
|
||||
grp->nref = 1;
|
||||
/* Add group to list of open objects in file */
|
||||
if(H5FO_insert(grp->ent.file, grp->ent.header, grp->shared)<0)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, NULL, "can't insert group into list of open objects")
|
||||
|
||||
grp->shared->fo_count = 1;
|
||||
|
||||
/* Set return value */
|
||||
ret_value=grp;
|
||||
@ -1810,6 +1826,8 @@ done:
|
||||
HDONE_ERROR(H5E_SYM, H5E_CANTDELETE, NULL, "unable to delete object header");
|
||||
} /* end if */
|
||||
if(grp!=NULL)
|
||||
if(grp->shared != NULL)
|
||||
H5FL_FREE(H5G_shared_t, grp->shared);
|
||||
H5FL_FREE(H5G_t,grp);
|
||||
} /* end if */
|
||||
|
||||
@ -1911,25 +1929,47 @@ H5G_link_isa(H5G_entry_t *ent, hid_t UNUSED dxpl_id)
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
H5G_t *
|
||||
H5G_open(H5G_entry_t *loc, const char *name, hid_t dxpl_id)
|
||||
H5G_open(H5G_entry_t *ent, hid_t dxpl_id)
|
||||
{
|
||||
H5G_t *grp = NULL;
|
||||
H5G_t *ret_value = NULL;
|
||||
H5G_entry_t ent; /* group symbol table entry */
|
||||
H5G_t *grp = NULL;
|
||||
H5G_shared_t *shared_fo=NULL;
|
||||
H5G_t *ret_value=NULL;
|
||||
|
||||
FUNC_ENTER_NOAPI(H5G_open, NULL);
|
||||
|
||||
/* Check args */
|
||||
assert(loc);
|
||||
assert(name && *name);
|
||||
assert(ent);
|
||||
|
||||
/* Open the object, making sure it's a group */
|
||||
if (H5G_find(loc, name, NULL, &ent/*out*/, dxpl_id) < 0)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "group not found");
|
||||
/* Check if group was already open */
|
||||
if((shared_fo=H5FO_opened(ent->file, ent->header))==NULL) {
|
||||
|
||||
/* Open the group object */
|
||||
if ((grp=H5G_open_oid(&ent, dxpl_id)) ==NULL)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "not found");
|
||||
/* Clear any errors from H5FO_opened() */
|
||||
H5E_clear_stack(NULL);
|
||||
|
||||
/* Open the group object */
|
||||
if ((grp=H5G_open_oid(ent, dxpl_id)) ==NULL)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, NULL, "not found");
|
||||
|
||||
/* Add group to list of open objects in file */
|
||||
if(H5FO_insert(grp->ent.file, grp->ent.header, grp->shared)<0)
|
||||
{
|
||||
H5FL_FREE(H5G_shared_t, grp->shared);
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTINSERT, NULL, "can't insert group into list of open objects")
|
||||
}
|
||||
|
||||
grp->shared->fo_count =1;
|
||||
}
|
||||
else {
|
||||
if(NULL == (grp = H5FL_CALLOC(H5G_t)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "can't allocate space for group")
|
||||
|
||||
/* Shallow copy (take ownership) of the group entry object */
|
||||
if(H5G_ent_copy(&(grp->ent), ent, H5G_COPY_SHALLOW)<0)
|
||||
HGOTO_ERROR (H5E_SYM, H5E_CANTCOPY, NULL, "can't copy group entry")
|
||||
|
||||
grp->shared=shared_fo;
|
||||
shared_fo->fo_count++;
|
||||
}
|
||||
|
||||
/* Set return value */
|
||||
ret_value = grp;
|
||||
@ -1962,7 +2002,7 @@ done:
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
H5G_t *
|
||||
static H5G_t *
|
||||
H5G_open_oid(H5G_entry_t *ent, hid_t dxpl_id)
|
||||
{
|
||||
H5G_t *grp = NULL;
|
||||
@ -1977,6 +2017,8 @@ H5G_open_oid(H5G_entry_t *ent, hid_t dxpl_id)
|
||||
/* Open the object, making sure it's a group */
|
||||
if (NULL==(grp = H5FL_CALLOC(H5G_t)))
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
|
||||
if (NULL==(grp->shared = H5FL_CALLOC(H5G_t)))
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
|
||||
|
||||
/* Copy over (take ownership) of the group entry object */
|
||||
H5G_ent_copy(&(grp->ent),ent,H5G_COPY_SHALLOW);
|
||||
@ -1988,14 +2030,16 @@ H5G_open_oid(H5G_entry_t *ent, hid_t dxpl_id)
|
||||
H5O_close(&(grp->ent));
|
||||
HGOTO_ERROR (H5E_SYM, H5E_CANTOPENOBJ, NULL, "not a group");
|
||||
}
|
||||
grp->nref = 1;
|
||||
|
||||
/* Set return value */
|
||||
ret_value = grp;
|
||||
|
||||
done:
|
||||
if (!ret_value && grp)
|
||||
if (!ret_value && grp) {
|
||||
if(grp->shared)
|
||||
H5FL_FREE(H5G_shared_t, grp->shared);
|
||||
H5FL_FREE(H5G_t,grp);
|
||||
}
|
||||
|
||||
FUNC_LEAVE_NOAPI(ret_value);
|
||||
}
|
||||
@ -2061,19 +2105,63 @@ H5G_close(H5G_t *grp)
|
||||
FUNC_ENTER_NOAPI(H5G_close, FAIL);
|
||||
|
||||
/* Check args */
|
||||
assert(grp);
|
||||
assert(grp->nref > 0);
|
||||
assert(grp && grp->shared);
|
||||
assert(grp->shared->fo_count > 0);
|
||||
|
||||
if (1 == grp->nref) {
|
||||
assert (grp!=H5G_rootof(H5G_fileof(grp)));
|
||||
if (H5O_close(&(grp->ent)) < 0)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to close");
|
||||
grp->nref = 0;
|
||||
H5FL_FREE (H5G_t,grp);
|
||||
--grp->shared->fo_count;
|
||||
|
||||
if (0 == grp->shared->fo_count) {
|
||||
assert (grp!=H5G_rootof(H5G_fileof(grp)));
|
||||
|
||||
/* Remove the dataset from the list of opened objects in the file */
|
||||
if(H5FO_delete(grp->ent.file, H5AC_dxpl_id, grp->ent.header)<0)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTRELEASE, FAIL, "can't remove group from list of open objects")
|
||||
if (H5O_close(&(grp->ent)) < 0)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "unable to close");
|
||||
H5FL_FREE (H5G_shared_t, grp->shared);
|
||||
} else {
|
||||
--grp->nref;
|
||||
if(H5G_free_ent_name(&(grp->ent))<0)
|
||||
{
|
||||
H5FL_FREE (H5G_t,grp);
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTINIT, FAIL, "can't free group entry name");
|
||||
}
|
||||
}
|
||||
|
||||
H5FL_FREE (H5G_t,grp);
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value);
|
||||
}
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: H5G_free
|
||||
*
|
||||
* Purpose: Free memory used by an H5G_t struct (and its H5G_shared_t).
|
||||
* Does not close the group or decrement the reference count.
|
||||
* Used to free memory used by the root group.
|
||||
*
|
||||
* Return: Success: Non-negative
|
||||
* Failure: Negative
|
||||
*
|
||||
* Programmer: James Laird
|
||||
* Tuesday, September 7, 2004
|
||||
*
|
||||
* Modifications:
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
herr_t
|
||||
H5G_free(H5G_t *grp)
|
||||
{
|
||||
herr_t ret_value=SUCCEED; /* Return value */
|
||||
FUNC_ENTER_NOAPI(H5G_free, FAIL);
|
||||
|
||||
assert(grp && grp->shared);
|
||||
|
||||
H5FL_FREE(H5G_shared_t, grp->shared);
|
||||
H5FL_FREE(H5G_t, grp);
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value);
|
||||
}
|
||||
@ -3238,8 +3326,8 @@ H5G_free_grp_name(H5G_t *grp)
|
||||
FUNC_ENTER_NOAPI(H5G_free_grp_name, FAIL);
|
||||
|
||||
/* Check args */
|
||||
assert(grp);
|
||||
assert(grp->nref > 0);
|
||||
assert(grp && grp->shared);
|
||||
assert(grp->shared->fo_count > 0);
|
||||
|
||||
/* Get the entry for the group */
|
||||
if (NULL==( ent = H5G_entof(grp)))
|
||||
|
11
src/H5Gpkg.h
11
src/H5Gpkg.h
@ -46,13 +46,20 @@ typedef struct H5G_node_t {
|
||||
H5G_entry_t *entry; /*array of symbol table entries */
|
||||
} H5G_node_t;
|
||||
|
||||
/*
|
||||
* Reference count shared between all instances of an open group
|
||||
*/
|
||||
struct H5G_shared_t {
|
||||
int fo_count; /* open file object count */
|
||||
};
|
||||
|
||||
/*
|
||||
* A group handle passed around through layers of the library within and
|
||||
* above the H5G layer.
|
||||
*/
|
||||
struct H5G_t {
|
||||
int nref; /*open reference count */
|
||||
H5G_entry_t ent; /*info about the group */
|
||||
H5G_shared_t* shared; /*shared file object data */
|
||||
H5G_entry_t ent; /*info about the group */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -114,6 +114,7 @@ typedef struct H5G_entry_t {
|
||||
} H5G_entry_t;
|
||||
|
||||
typedef struct H5G_t H5G_t;
|
||||
typedef struct H5G_shared_t H5G_shared_t;
|
||||
|
||||
/* Type of operation being performed for call to H5G_replace_name() */
|
||||
typedef enum {
|
||||
@ -139,8 +140,8 @@ H5_DLL H5G_entry_t *H5G_loc(hid_t loc_id);
|
||||
H5_DLL herr_t H5G_mkroot(H5F_t *f, hid_t dxpl_id, H5G_entry_t *root_entry);
|
||||
H5_DLL H5G_entry_t *H5G_entof(H5G_t *grp);
|
||||
H5_DLL H5F_t *H5G_fileof(H5G_t *grp);
|
||||
H5_DLL H5G_t *H5G_open(H5G_entry_t *loc, const char *name, hid_t dxpl_id);
|
||||
H5_DLL H5G_t *H5G_open_oid(H5G_entry_t *ent, hid_t dxpl_id);
|
||||
H5_DLL herr_t H5G_free(H5G_t *grp);
|
||||
H5_DLL H5G_t *H5G_open(H5G_entry_t *ent, hid_t dxpl_id);
|
||||
H5_DLL herr_t H5G_close(H5G_t *grp);
|
||||
H5_DLL H5G_obj_t H5G_get_type(H5G_entry_t *ent, hid_t dxpl_id);
|
||||
H5_DLL herr_t H5G_get_objinfo(H5G_entry_t *loc, const char *name,
|
||||
|
@ -1244,7 +1244,7 @@ H5O_link(const H5G_entry_t *ent, int adjust, hid_t dxpl_id)
|
||||
/* Check if the object should be deleted */
|
||||
if(oh->nlink==0) {
|
||||
/* Check if the object is still open by the user */
|
||||
if(H5FO_opened(ent->file,ent->header)>=0) {
|
||||
if(H5FO_opened(ent->file,ent->header)!=NULL) {
|
||||
/* Flag the object to be deleted when it's closed */
|
||||
if(H5FO_mark(ent->file,ent->header,TRUE)<0)
|
||||
HGOTO_ERROR(H5E_OHDR, H5E_CANTDELETE, FAIL, "can't mark object for deletion");
|
||||
|
532
src/H5Odtype.c
532
src/H5Odtype.c
File diff suppressed because it is too large
Load Diff
27
src/H5R.c
27
src/H5R.c
@ -324,8 +324,9 @@ done:
|
||||
static hid_t
|
||||
H5R_dereference(H5F_t *file, hid_t dxpl_id, H5R_type_t ref_type, void *_ref)
|
||||
{
|
||||
H5D_t *dset; /* Pointer to dataset to open */
|
||||
H5T_t *type; /* Pointer to datatype to open */
|
||||
H5G_t *group; /* Pointer to group to open */
|
||||
H5T_t *datatype; /* Pointer to datatype to open */
|
||||
H5G_entry_t ent; /* Symbol table entry */
|
||||
uint8_t *p; /* Pointer to OID to store */
|
||||
int oid_type; /* type of object being dereferenced */
|
||||
@ -389,14 +390,14 @@ H5R_dereference(H5F_t *file, hid_t dxpl_id, H5R_type_t ref_type, void *_ref)
|
||||
if(H5O_link(&ent,0,dxpl_id)<=0)
|
||||
HGOTO_ERROR(H5E_REFERENCE, H5E_LINKCOUNT, FAIL, "dereferencing deleted object");
|
||||
|
||||
/* Open the dataset object */
|
||||
/* Open the object */
|
||||
oid_type=H5G_get_type(&ent,dxpl_id);
|
||||
switch(oid_type) {
|
||||
case H5G_GROUP:
|
||||
if ((group=H5G_open_oid(&ent,dxpl_id)) == NULL)
|
||||
if ((group=H5G_open(&ent,dxpl_id)) == NULL)
|
||||
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "not found");
|
||||
|
||||
/* Create an atom for the dataset */
|
||||
/* Create an atom for the group */
|
||||
if ((ret_value = H5I_register(H5I_GROUP, group)) < 0) {
|
||||
H5G_close(group);
|
||||
HGOTO_ERROR(H5E_SYM, H5E_CANTREGISTER, FAIL, "can't register group");
|
||||
@ -404,20 +405,26 @@ H5R_dereference(H5F_t *file, hid_t dxpl_id, H5R_type_t ref_type, void *_ref)
|
||||
break;
|
||||
|
||||
case H5G_TYPE:
|
||||
if ((datatype=H5T_open_oid(&ent, dxpl_id)) == NULL)
|
||||
if ((type=H5T_open(&ent, dxpl_id)) == NULL)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_NOTFOUND, FAIL, "not found");
|
||||
|
||||
/* Create an atom for the dataset */
|
||||
if ((ret_value = H5I_register(H5I_DATATYPE, datatype)) < 0) {
|
||||
H5T_close(datatype);
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "can't register group");
|
||||
/* Create an atom for the datatype */
|
||||
if ((ret_value = H5I_register(H5I_DATATYPE, type)) < 0) {
|
||||
H5T_close(type);
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "can't register datatype");
|
||||
}
|
||||
break;
|
||||
|
||||
case H5G_DATASET:
|
||||
/* Open the dataset */
|
||||
if ((ret_value=H5D_open(&ent,dxpl_id)) < 0)
|
||||
if ((dset=H5D_open(&ent,dxpl_id)) <0)
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_NOTFOUND, FAIL, "not found");
|
||||
|
||||
/* Create an atom for the dataset */
|
||||
if ((ret_value = H5I_register(H5I_DATASET, dset)) < 0) {
|
||||
H5D_close(dset);
|
||||
HGOTO_ERROR(H5E_DATASET, H5E_CANTREGISTER, FAIL, "can't register dataset");
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -34,6 +34,7 @@
|
||||
|
||||
/* Declare extern the free list for H5T_t's */
|
||||
H5FL_EXTERN(H5T_t);
|
||||
H5FL_EXTERN(H5T_shared_t);
|
||||
|
||||
|
||||
/*--------------------------------------------------------------------------
|
||||
@ -155,34 +156,37 @@ H5T_array_create(H5T_t *base, int ndims, const hsize_t dim[/* ndims */],
|
||||
/* Build new type */
|
||||
if (NULL==(ret_value = H5FL_CALLOC(H5T_t)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
|
||||
|
||||
if (NULL==(ret_value->shared=H5FL_CALLOC(H5T_shared_t))) {
|
||||
H5FL_FREE(H5T_t, ret_value);
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
|
||||
}
|
||||
ret_value->ent.header = HADDR_UNDEF;
|
||||
ret_value->type = H5T_ARRAY;
|
||||
ret_value->shared->type = H5T_ARRAY;
|
||||
|
||||
/* Copy the base type of the array */
|
||||
ret_value->parent = H5T_copy(base, H5T_COPY_ALL);
|
||||
ret_value->shared->parent = H5T_copy(base, H5T_COPY_ALL);
|
||||
|
||||
/* Set the array parameters */
|
||||
ret_value->u.array.ndims = ndims;
|
||||
ret_value->shared->u.array.ndims = ndims;
|
||||
|
||||
/* Copy the array dimensions & compute the # of elements in the array */
|
||||
for(i=0, ret_value->u.array.nelem=1; i<ndims; i++) {
|
||||
H5_ASSIGN_OVERFLOW(ret_value->u.array.dim[i],dim[i],hsize_t,size_t);
|
||||
ret_value->u.array.nelem *= (size_t)dim[i];
|
||||
for(i=0, ret_value->shared->u.array.nelem=1; i<ndims; i++) {
|
||||
H5_ASSIGN_OVERFLOW(ret_value->shared->u.array.dim[i],dim[i],hsize_t,size_t);
|
||||
ret_value->shared->u.array.nelem *= (size_t)dim[i];
|
||||
} /* end for */
|
||||
|
||||
/* Copy the dimension permutations */
|
||||
for(i=0; i<ndims; i++)
|
||||
ret_value->u.array.perm[i] = perm ? perm[i] : i;
|
||||
ret_value->shared->u.array.perm[i] = perm ? perm[i] : i;
|
||||
|
||||
/* Set the array's size (number of elements * element datatype's size) */
|
||||
ret_value->size = ret_value->parent->size * ret_value->u.array.nelem;
|
||||
ret_value->shared->size = ret_value->shared->parent->shared->size * ret_value->shared->u.array.nelem;
|
||||
|
||||
/*
|
||||
* Set the "force conversion" flag if the base datatype indicates
|
||||
*/
|
||||
if(base->force_conv==TRUE)
|
||||
ret_value->force_conv=TRUE;
|
||||
if(base->shared->force_conv==TRUE)
|
||||
ret_value->shared->force_conv=TRUE;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value);
|
||||
@ -216,7 +220,7 @@ H5Tget_array_ndims(hid_t type_id)
|
||||
/* Check args */
|
||||
if (NULL==(dt=H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype object");
|
||||
if(dt->type!=H5T_ARRAY)
|
||||
if(dt->shared->type!=H5T_ARRAY)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an array datatype");
|
||||
|
||||
/* Retrieve the number of dimensions */
|
||||
@ -251,10 +255,10 @@ H5T_get_array_ndims(H5T_t *dt)
|
||||
FUNC_ENTER_NOAPI(H5T_get_array_ndims, FAIL);
|
||||
|
||||
assert(dt);
|
||||
assert(dt->type==H5T_ARRAY);
|
||||
assert(dt->shared->type==H5T_ARRAY);
|
||||
|
||||
/* Retrieve the number of dimensions */
|
||||
ret_value=dt->u.array.ndims;
|
||||
ret_value=dt->shared->u.array.ndims;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value);
|
||||
@ -288,7 +292,7 @@ H5Tget_array_dims(hid_t type_id, hsize_t dims[], int perm[])
|
||||
/* Check args */
|
||||
if (NULL==(dt=H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype object");
|
||||
if(dt->type!=H5T_ARRAY)
|
||||
if(dt->shared->type!=H5T_ARRAY)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an array datatype");
|
||||
|
||||
/* Retrieve the sizes of the dimensions */
|
||||
@ -324,20 +328,20 @@ H5T_get_array_dims(H5T_t *dt, hsize_t dims[], int perm[])
|
||||
FUNC_ENTER_NOAPI(H5T_get_array_dims, FAIL);
|
||||
|
||||
assert(dt);
|
||||
assert(dt->type==H5T_ARRAY);
|
||||
assert(dt->shared->type==H5T_ARRAY);
|
||||
|
||||
/* Retrieve the sizes of the dimensions */
|
||||
if(dims)
|
||||
for(i=0; i<dt->u.array.ndims; i++)
|
||||
dims[i]=dt->u.array.dim[i];
|
||||
for(i=0; i<dt->shared->u.array.ndims; i++)
|
||||
dims[i]=dt->shared->u.array.dim[i];
|
||||
|
||||
/* Retrieve the dimension permutations */
|
||||
if(perm)
|
||||
for(i=0; i<dt->u.array.ndims; i++)
|
||||
perm[i]=dt->u.array.perm[i];
|
||||
for(i=0; i<dt->shared->u.array.ndims; i++)
|
||||
perm[i]=dt->shared->u.array.perm[i];
|
||||
|
||||
/* Pass along the array rank as the return value */
|
||||
ret_value=dt->u.array.ndims;
|
||||
ret_value=dt->shared->u.array.ndims;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value);
|
||||
|
@ -28,6 +28,7 @@
|
||||
|
||||
#include "H5private.h" /* Generic Functions */
|
||||
#include "H5Eprivate.h" /* Error handling */
|
||||
#include "H5FOprivate.h" /* File objects */
|
||||
#include "H5Iprivate.h" /* IDs */
|
||||
#include "H5Oprivate.h" /* Object headers */
|
||||
#include "H5Tpkg.h" /* Datatypes */
|
||||
@ -132,9 +133,9 @@ H5T_commit (H5G_entry_t *loc, const char *name, H5T_t *type, hid_t dxpl_id)
|
||||
* normally fails on such types (try H5Tclose(H5T_NATIVE_INT)) but closing
|
||||
* a named type should always succeed.
|
||||
*/
|
||||
if (H5T_STATE_NAMED==type->state || H5T_STATE_OPEN==type->state)
|
||||
if (H5T_STATE_NAMED==type->shared->state || H5T_STATE_OPEN==type->shared->state)
|
||||
HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "data type is already committed");
|
||||
if (H5T_STATE_IMMUTABLE==type->state)
|
||||
if (H5T_STATE_IMMUTABLE==type->shared->state)
|
||||
HGOTO_ERROR (H5E_ARGS, H5E_BADVALUE, FAIL, "data type is immutable");
|
||||
|
||||
/* Find the insertion file */
|
||||
@ -160,7 +161,12 @@ H5T_commit (H5G_entry_t *loc, const char *name, H5T_t *type, hid_t dxpl_id)
|
||||
HGOTO_ERROR (H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to update type header message");
|
||||
if (H5G_insert (loc, name, &(type->ent), dxpl_id)<0)
|
||||
HGOTO_ERROR (H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to name data type");
|
||||
type->state = H5T_STATE_OPEN;
|
||||
type->shared->state = H5T_STATE_OPEN;
|
||||
type->shared->fo_count=1;
|
||||
|
||||
/* Add datatype to the list of open objects in the file */
|
||||
if(H5FO_insert(type->ent.file, type->ent.header, type->shared)<0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINSERT, FAIL, "can't insert datatype into list of open objects")
|
||||
|
||||
/* Mark datatype as being on memory now. Since this datatype may still be used in memory
|
||||
* after committed to disk, change its size back as in memory. */
|
||||
@ -169,7 +175,7 @@ H5T_commit (H5G_entry_t *loc, const char *name, H5T_t *type, hid_t dxpl_id)
|
||||
|
||||
done:
|
||||
if (ret_value<0) {
|
||||
if ((type->state==H5T_STATE_TRANSIENT || type->state==H5T_STATE_RDONLY) && H5F_addr_defined(type->ent.header)) {
|
||||
if ((type->shared->state==H5T_STATE_TRANSIENT || type->shared->state==H5T_STATE_RDONLY) && H5F_addr_defined(type->ent.header)) {
|
||||
if(H5O_close(&(type->ent))<0)
|
||||
HDONE_ERROR(H5E_DATATYPE, H5E_CLOSEERROR, FAIL, "unable to release object header");
|
||||
if(H5O_delete(file, dxpl_id,type->ent.header)<0)
|
||||
@ -241,7 +247,7 @@ H5T_committed(H5T_t *type)
|
||||
|
||||
assert (type);
|
||||
|
||||
FUNC_LEAVE_NOAPI(H5T_STATE_OPEN==type->state || H5T_STATE_NAMED==type->state);
|
||||
FUNC_LEAVE_NOAPI(H5T_STATE_OPEN==type->shared->state || H5T_STATE_NAMED==type->shared->state);
|
||||
} /* end H5T_committed() */
|
||||
|
||||
|
||||
|
@ -100,9 +100,9 @@ H5Tget_member_offset(hid_t type_id, unsigned membno)
|
||||
H5TRACE2("z","iIu",type_id,membno);
|
||||
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)) || H5T_COMPOUND != dt->type)
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)) || H5T_COMPOUND != dt->shared->type)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not a compound datatype")
|
||||
if (membno >= dt->u.compnd.nmembs)
|
||||
if (membno >= dt->shared->u.compnd.nmembs)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, 0, "invalid member number")
|
||||
|
||||
/* Value */
|
||||
@ -142,10 +142,10 @@ H5T_get_member_offset(const H5T_t *dt, unsigned membno)
|
||||
FUNC_ENTER_NOAPI(H5T_get_member_offset, 0)
|
||||
|
||||
assert(dt);
|
||||
assert(membno < dt->u.compnd.nmembs);
|
||||
assert(membno < dt->shared->u.compnd.nmembs);
|
||||
|
||||
/* Value */
|
||||
ret_value = dt->u.compnd.memb[membno].offset;
|
||||
ret_value = dt->shared->u.compnd.memb[membno].offset;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
@ -186,13 +186,13 @@ H5Tget_member_class(hid_t type_id, unsigned membno)
|
||||
H5TRACE2("Tt","iIu",type_id,membno);
|
||||
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)) || H5T_COMPOUND != dt->type)
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)) || H5T_COMPOUND != dt->shared->type)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5T_NO_CLASS, "not a compound datatype")
|
||||
if (membno >= dt->u.compnd.nmembs)
|
||||
if (membno >= dt->shared->u.compnd.nmembs)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, H5T_NO_CLASS, "invalid member number")
|
||||
|
||||
/* Value */
|
||||
ret_value = dt->u.compnd.memb[membno].type->type;
|
||||
ret_value = dt->shared->u.compnd.memb[membno].type->shared->type;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -241,9 +241,9 @@ H5Tget_member_type(hid_t type_id, unsigned membno)
|
||||
H5TRACE2("i","iIs",type_id,membno);
|
||||
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)) || H5T_COMPOUND != dt->type)
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)) || H5T_COMPOUND != dt->shared->type)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a compound datatype")
|
||||
if (membno >= dt->u.compnd.nmembs)
|
||||
if (membno >= dt->shared->u.compnd.nmembs)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid member number")
|
||||
if ((memb_dt=H5T_get_member_type(dt, membno))==NULL)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to retrieve member type")
|
||||
@ -289,10 +289,10 @@ H5T_get_member_type(const H5T_t *dt, unsigned membno)
|
||||
FUNC_ENTER_NOAPI(H5T_get_member_type, NULL)
|
||||
|
||||
assert(dt);
|
||||
assert(membno < dt->u.compnd.nmembs);
|
||||
assert(membno < dt->shared->u.compnd.nmembs);
|
||||
|
||||
/* Copy datatype into an atom */
|
||||
if (NULL == (ret_value = H5T_copy(dt->u.compnd.memb[membno].type, H5T_COPY_REOPEN)))
|
||||
if (NULL == (ret_value = H5T_copy(dt->shared->u.compnd.memb[membno].type, H5T_COPY_REOPEN)))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to copy member datatype")
|
||||
|
||||
done:
|
||||
@ -337,9 +337,9 @@ H5Tinsert(hid_t parent_id, const char *name, size_t offset, hid_t member_id)
|
||||
/* Check args */
|
||||
if (parent_id==member_id)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "can't insert compound datatype within itself")
|
||||
if (NULL == (parent = H5I_object_verify(parent_id,H5I_DATATYPE)) || H5T_COMPOUND != parent->type)
|
||||
if (NULL == (parent = H5I_object_verify(parent_id,H5I_DATATYPE)) || H5T_COMPOUND != parent->shared->type)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a compound datatype")
|
||||
if (H5T_STATE_TRANSIENT!=parent->state)
|
||||
if (H5T_STATE_TRANSIENT!=parent->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "parent type read-only")
|
||||
if (!name || !*name)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no member name")
|
||||
@ -420,82 +420,82 @@ H5T_insert(H5T_t *parent, const char *name, size_t offset, const H5T_t *member)
|
||||
FUNC_ENTER_NOAPI(H5T_insert, FAIL)
|
||||
|
||||
/* check args */
|
||||
assert(parent && H5T_COMPOUND == parent->type);
|
||||
assert(H5T_STATE_TRANSIENT==parent->state);
|
||||
assert(parent && H5T_COMPOUND == parent->shared->type);
|
||||
assert(H5T_STATE_TRANSIENT==parent->shared->state);
|
||||
assert(member);
|
||||
assert(name && *name);
|
||||
|
||||
/* Does NAME already exist in PARENT? */
|
||||
for (i=0; i<parent->u.compnd.nmembs; i++) {
|
||||
if (!HDstrcmp(parent->u.compnd.memb[i].name, name))
|
||||
for (i=0; i<parent->shared->u.compnd.nmembs; i++) {
|
||||
if (!HDstrcmp(parent->shared->u.compnd.memb[i].name, name))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINSERT, FAIL, "member name is not unique")
|
||||
}
|
||||
|
||||
/* Does the new member overlap any existing member ? */
|
||||
total_size=member->size;
|
||||
for (i=0; i<parent->u.compnd.nmembs; i++) {
|
||||
if ((offset <= parent->u.compnd.memb[i].offset &&
|
||||
offset + total_size > parent->u.compnd.memb[i].offset) ||
|
||||
(parent->u.compnd.memb[i].offset <= offset &&
|
||||
parent->u.compnd.memb[i].offset +
|
||||
parent->u.compnd.memb[i].size > offset))
|
||||
total_size=member->shared->size;
|
||||
for (i=0; i<parent->shared->u.compnd.nmembs; i++) {
|
||||
if ((offset <= parent->shared->u.compnd.memb[i].offset &&
|
||||
offset + total_size > parent->shared->u.compnd.memb[i].offset) ||
|
||||
(parent->shared->u.compnd.memb[i].offset <= offset &&
|
||||
parent->shared->u.compnd.memb[i].offset +
|
||||
parent->shared->u.compnd.memb[i].size > offset))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINSERT, FAIL, "member overlaps with another member")
|
||||
}
|
||||
|
||||
/* Does the new member overlap the end of the compound type? */
|
||||
if(offset+total_size>parent->size)
|
||||
if(offset+total_size>parent->shared->size)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINSERT, FAIL, "member extends past end of compound type")
|
||||
|
||||
/* Increase member array if necessary */
|
||||
if (parent->u.compnd.nmembs >= parent->u.compnd.nalloc) {
|
||||
size_t na = parent->u.compnd.nalloc + H5T_COMPND_INC;
|
||||
H5T_cmemb_t *x = H5MM_realloc (parent->u.compnd.memb,
|
||||
if (parent->shared->u.compnd.nmembs >= parent->shared->u.compnd.nalloc) {
|
||||
size_t na = parent->shared->u.compnd.nalloc + H5T_COMPND_INC;
|
||||
H5T_cmemb_t *x = H5MM_realloc (parent->shared->u.compnd.memb,
|
||||
na * sizeof(H5T_cmemb_t));
|
||||
|
||||
if (!x)
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
|
||||
parent->u.compnd.nalloc = na;
|
||||
parent->u.compnd.memb = x;
|
||||
parent->shared->u.compnd.nalloc = na;
|
||||
parent->shared->u.compnd.memb = x;
|
||||
}
|
||||
|
||||
/* Add member to end of member array */
|
||||
idx = parent->u.compnd.nmembs;
|
||||
parent->u.compnd.memb[idx].name = H5MM_xstrdup(name);
|
||||
parent->u.compnd.memb[idx].offset = offset;
|
||||
parent->u.compnd.memb[idx].size = total_size;
|
||||
parent->u.compnd.memb[idx].type = H5T_copy (member, H5T_COPY_ALL);
|
||||
idx = parent->shared->u.compnd.nmembs;
|
||||
parent->shared->u.compnd.memb[idx].name = H5MM_xstrdup(name);
|
||||
parent->shared->u.compnd.memb[idx].offset = offset;
|
||||
parent->shared->u.compnd.memb[idx].size = total_size;
|
||||
parent->shared->u.compnd.memb[idx].type = H5T_copy (member, H5T_COPY_ALL);
|
||||
|
||||
parent->u.compnd.sorted = H5T_SORT_NONE;
|
||||
parent->u.compnd.nmembs++;
|
||||
parent->shared->u.compnd.sorted = H5T_SORT_NONE;
|
||||
parent->shared->u.compnd.nmembs++;
|
||||
|
||||
/* Determine if the compound datatype stayed packed */
|
||||
if(parent->u.compnd.packed) {
|
||||
if(parent->shared->u.compnd.packed) {
|
||||
/* Check if the member type is packed */
|
||||
if(H5T_is_packed(parent->u.compnd.memb[idx].type)>0) {
|
||||
if(H5T_is_packed(parent->shared->u.compnd.memb[idx].type)>0) {
|
||||
if(idx==0) {
|
||||
/* If the is the first member, the datatype is not packed
|
||||
* if the first member isn't at offset 0
|
||||
*/
|
||||
if(parent->u.compnd.memb[idx].offset>0)
|
||||
parent->u.compnd.packed=FALSE;
|
||||
if(parent->shared->u.compnd.memb[idx].offset>0)
|
||||
parent->shared->u.compnd.packed=FALSE;
|
||||
} /* end if */
|
||||
else {
|
||||
/* If the is not the first member, the datatype is not
|
||||
* packed if the new member isn't adjoining the previous member
|
||||
*/
|
||||
if(parent->u.compnd.memb[idx].offset!=(parent->u.compnd.memb[idx-1].offset+parent->u.compnd.memb[idx-1].size))
|
||||
parent->u.compnd.packed=FALSE;
|
||||
if(parent->shared->u.compnd.memb[idx].offset!=(parent->shared->u.compnd.memb[idx-1].offset+parent->shared->u.compnd.memb[idx-1].size))
|
||||
parent->shared->u.compnd.packed=FALSE;
|
||||
} /* end else */
|
||||
} /* end if */
|
||||
else
|
||||
parent->u.compnd.packed=FALSE;
|
||||
parent->shared->u.compnd.packed=FALSE;
|
||||
} /* end if */
|
||||
|
||||
/*
|
||||
* Set the "force conversion" flag if the field's datatype indicates
|
||||
*/
|
||||
if(member->force_conv==TRUE)
|
||||
parent->force_conv=TRUE;
|
||||
if(member->shared->force_conv==TRUE)
|
||||
parent->shared->force_conv=TRUE;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
@ -534,38 +534,38 @@ H5T_pack(H5T_t *dt)
|
||||
HGOTO_DONE(SUCCEED);
|
||||
|
||||
/* Check for packing unmodifiable datatype */
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "datatype is read-only")
|
||||
|
||||
if(dt->parent) {
|
||||
if (H5T_pack(dt->parent) < 0)
|
||||
if(dt->shared->parent) {
|
||||
if (H5T_pack(dt->shared->parent) < 0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to pack parent of datatype")
|
||||
|
||||
/* Adjust size of datatype appropriately */
|
||||
if(dt->type==H5T_ARRAY)
|
||||
dt->size = dt->parent->size * dt->u.array.nelem;
|
||||
else if(dt->type!=H5T_VLEN)
|
||||
dt->size = dt->parent->size;
|
||||
if(dt->shared->type==H5T_ARRAY)
|
||||
dt->shared->size = dt->shared->parent->shared->size * dt->shared->u.array.nelem;
|
||||
else if(dt->shared->type!=H5T_VLEN)
|
||||
dt->shared->size = dt->shared->parent->shared->size;
|
||||
} /* end if */
|
||||
else if(dt->type==H5T_COMPOUND) {
|
||||
else if(dt->shared->type==H5T_COMPOUND) {
|
||||
/* Recursively pack the members */
|
||||
for (i=0; i<dt->u.compnd.nmembs; i++)
|
||||
if (H5T_pack(dt->u.compnd.memb[i].type) < 0)
|
||||
for (i=0; i<dt->shared->u.compnd.nmembs; i++)
|
||||
if (H5T_pack(dt->shared->u.compnd.memb[i].type) < 0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to pack part of a compound datatype")
|
||||
|
||||
/* Remove padding between members */
|
||||
if(H5T_sort_value(dt, NULL)<0)
|
||||
HGOTO_ERROR(H5E_INTERNAL, H5E_CANTCOMPARE, FAIL, "value sort failed")
|
||||
for (i=0, offset=0; i<dt->u.compnd.nmembs; i++) {
|
||||
dt->u.compnd.memb[i].offset = offset;
|
||||
offset += dt->u.compnd.memb[i].size;
|
||||
for (i=0, offset=0; i<dt->shared->u.compnd.nmembs; i++) {
|
||||
dt->shared->u.compnd.memb[i].offset = offset;
|
||||
offset += dt->shared->u.compnd.memb[i].size;
|
||||
}
|
||||
|
||||
/* Change total size */
|
||||
dt->size = MAX(1, offset);
|
||||
dt->shared->size = MAX(1, offset);
|
||||
|
||||
/* Mark the type as packed now */
|
||||
dt->u.compnd.packed=TRUE;
|
||||
dt->shared->u.compnd.packed=TRUE;
|
||||
} /* end if */
|
||||
} /* end if */
|
||||
|
||||
@ -599,12 +599,12 @@ H5T_is_packed(const H5T_t *dt)
|
||||
assert(dt);
|
||||
|
||||
/* Go up the chain as far as possible */
|
||||
while(dt->parent)
|
||||
dt=dt->parent;
|
||||
while(dt->shared->parent)
|
||||
dt=dt->shared->parent;
|
||||
|
||||
/* If this is a compound datatype, check if it is packed */
|
||||
if(dt->type==H5T_COMPOUND)
|
||||
ret_value=(htri_t)dt->u.compnd.packed;
|
||||
if(dt->shared->type==H5T_COMPOUND)
|
||||
ret_value=(htri_t)dt->shared->u.compnd.packed;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
|
918
src/H5Tconv.c
918
src/H5Tconv.c
File diff suppressed because it is too large
Load Diff
@ -86,16 +86,16 @@ H5Tget_cset(hid_t type_id)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5T_CSET_ERROR, "not a data type")
|
||||
while (dt->parent && !H5T_IS_STRING(dt))
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (!H5T_IS_STRING(dt))
|
||||
while (dt->shared->parent && !H5T_IS_STRING(dt->shared))
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (!H5T_IS_STRING(dt->shared))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, H5T_CSET_ERROR, "operation not defined for data type class")
|
||||
|
||||
/* result */
|
||||
if(H5T_IS_FIXED_STRING(dt))
|
||||
ret_value = dt->u.atomic.u.s.cset;
|
||||
if(H5T_IS_FIXED_STRING(dt->shared))
|
||||
ret_value = dt->shared->u.atomic.u.s.cset;
|
||||
else
|
||||
ret_value = dt->u.vlen.cset;
|
||||
ret_value = dt->shared->u.vlen.cset;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -132,20 +132,20 @@ H5Tset_cset(hid_t type_id, H5T_cset_t cset)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "data type is read-only")
|
||||
if (cset < 0 || cset >= H5T_NCSET)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "illegal character set type")
|
||||
while (dt->parent && !H5T_IS_STRING(dt))
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (!H5T_IS_STRING(dt))
|
||||
while (dt->shared->parent && !H5T_IS_STRING(dt->shared))
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (!H5T_IS_STRING(dt->shared))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "operation not defined for data type class")
|
||||
|
||||
/* Commit */
|
||||
if(H5T_IS_FIXED_STRING(dt))
|
||||
dt->u.atomic.u.s.cset = cset;
|
||||
if(H5T_IS_FIXED_STRING(dt->shared))
|
||||
dt->shared->u.atomic.u.s.cset = cset;
|
||||
else
|
||||
dt->u.vlen.cset = cset;
|
||||
dt->shared->u.vlen.cset = cset;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
|
@ -33,8 +33,9 @@
|
||||
#include "H5MMprivate.h" /*memory management */
|
||||
#include "H5Tpkg.h" /*data-type functions */
|
||||
|
||||
/* Declare extern the free list for H5T_t's */
|
||||
/* Declare extern the free lists for H5T_t's and H5T_shared_t's */
|
||||
H5FL_EXTERN(H5T_t);
|
||||
H5FL_EXTERN(H5T_shared_t);
|
||||
|
||||
/* Static local functions */
|
||||
static char *H5T_enum_nameof(H5T_t *dt, const void *value, char *name/*out*/,
|
||||
@ -93,7 +94,7 @@ H5Tenum_create(hid_t parent_id)
|
||||
H5TRACE1("i","i",parent_id);
|
||||
|
||||
/* Check args */
|
||||
if (NULL==(parent=H5I_object_verify(parent_id,H5I_DATATYPE)) || H5T_INTEGER!=parent->type)
|
||||
if (NULL==(parent=H5I_object_verify(parent_id,H5I_DATATYPE)) || H5T_INTEGER!=parent->shared->type)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an integer data type")
|
||||
|
||||
/* Build new type */
|
||||
@ -137,11 +138,15 @@ H5T_enum_create(const H5T_t *parent)
|
||||
|
||||
/* Build new type */
|
||||
if (NULL==(ret_value = H5FL_CALLOC(H5T_t)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
|
||||
ret_value->type = H5T_ENUM;
|
||||
ret_value->parent = H5T_copy(parent, H5T_COPY_ALL);
|
||||
assert(ret_value->parent);
|
||||
ret_value->size = ret_value->parent->size;
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
|
||||
if (NULL==(ret_value->shared=H5FL_CALLOC(H5T_shared_t))) {
|
||||
H5FL_FREE(H5T_t, ret_value);
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
|
||||
}
|
||||
ret_value->shared->type = H5T_ENUM;
|
||||
ret_value->shared->parent = H5T_copy(parent, H5T_COPY_ALL);
|
||||
assert(ret_value->shared->parent);
|
||||
ret_value->shared->size = ret_value->shared->parent->shared->size;
|
||||
ret_value->ent.header = HADDR_UNDEF;
|
||||
|
||||
done:
|
||||
@ -182,7 +187,7 @@ H5Tenum_insert(hid_t type, const char *name, const void *value)
|
||||
/* Check args */
|
||||
if (NULL==(dt=H5I_object_verify(type,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
|
||||
if (H5T_ENUM!=dt->type)
|
||||
if (H5T_ENUM!=dt->shared->type)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an enumeration data type")
|
||||
if (!name || !*name)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no name specified")
|
||||
@ -232,31 +237,31 @@ H5T_enum_insert(H5T_t *dt, const char *name, const void *value)
|
||||
assert(value);
|
||||
|
||||
/* The name and value had better not already exist */
|
||||
for (i=0; i<dt->u.enumer.nmembs; i++) {
|
||||
if (!HDstrcmp(dt->u.enumer.name[i], name))
|
||||
for (i=0; i<dt->shared->u.enumer.nmembs; i++) {
|
||||
if (!HDstrcmp(dt->shared->u.enumer.name[i], name))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "name redefinition")
|
||||
if (!HDmemcmp(dt->u.enumer.value+i*dt->size, value, dt->size))
|
||||
if (!HDmemcmp(dt->shared->u.enumer.value+i*dt->shared->size, value, dt->shared->size))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "value redefinition")
|
||||
}
|
||||
|
||||
/* Increase table sizes */
|
||||
if (dt->u.enumer.nmembs >= dt->u.enumer.nalloc) {
|
||||
unsigned n = MAX(32, 2*dt->u.enumer.nalloc);
|
||||
if (NULL==(names=H5MM_realloc(dt->u.enumer.name, n*sizeof(char*))))
|
||||
if (dt->shared->u.enumer.nmembs >= dt->shared->u.enumer.nalloc) {
|
||||
unsigned n = MAX(32, 2*dt->shared->u.enumer.nalloc);
|
||||
if (NULL==(names=H5MM_realloc(dt->shared->u.enumer.name, n*sizeof(char*))))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
|
||||
dt->u.enumer.name = names;
|
||||
dt->shared->u.enumer.name = names;
|
||||
|
||||
if (NULL==(values=H5MM_realloc(dt->u.enumer.value, n*dt->size)))
|
||||
if (NULL==(values=H5MM_realloc(dt->shared->u.enumer.value, n*dt->shared->size)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
|
||||
dt->u.enumer.value = values;
|
||||
dt->u.enumer.nalloc = n;
|
||||
dt->shared->u.enumer.value = values;
|
||||
dt->shared->u.enumer.nalloc = n;
|
||||
}
|
||||
|
||||
/* Insert new member at end of member arrays */
|
||||
dt->u.enumer.sorted = H5T_SORT_NONE;
|
||||
i = dt->u.enumer.nmembs++;
|
||||
dt->u.enumer.name[i] = H5MM_xstrdup(name);
|
||||
HDmemcpy(dt->u.enumer.value+i*dt->size, value, dt->size);
|
||||
dt->shared->u.enumer.sorted = H5T_SORT_NONE;
|
||||
i = dt->shared->u.enumer.nmembs++;
|
||||
dt->shared->u.enumer.name[i] = H5MM_xstrdup(name);
|
||||
HDmemcpy(dt->shared->u.enumer.value+i*dt->shared->size, value, dt->shared->size);
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
@ -299,9 +304,9 @@ H5Tget_member_value(hid_t type, unsigned membno, void *value/*out*/)
|
||||
|
||||
if (NULL==(dt=H5I_object_verify(type,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
|
||||
if (H5T_ENUM!=dt->type)
|
||||
if (H5T_ENUM!=dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not defined for data type class")
|
||||
if (membno>=dt->u.enumer.nmembs)
|
||||
if (membno>=dt->shared->u.enumer.nmembs)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid member number")
|
||||
if (!value)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "null value buffer")
|
||||
@ -341,7 +346,7 @@ H5T_get_member_value(const H5T_t *dt, unsigned membno, void *value/*out*/)
|
||||
assert(dt);
|
||||
assert(value);
|
||||
|
||||
HDmemcpy(value, dt->u.enumer.value + membno*dt->size, dt->size);
|
||||
HDmemcpy(value, dt->shared->u.enumer.value + membno*dt->shared->size, dt->shared->size);
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
@ -383,7 +388,7 @@ H5Tenum_nameof(hid_t type, const void *value, char *name/*out*/, size_t size)
|
||||
/* Check args */
|
||||
if (NULL==(dt=H5I_object_verify(type,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
|
||||
if (H5T_ENUM!=dt->type)
|
||||
if (H5T_ENUM!=dt->shared->type)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an enumeration data type")
|
||||
if (!value)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no value supplied")
|
||||
@ -433,24 +438,24 @@ H5T_enum_nameof(H5T_t *dt, const void *value, char *name/*out*/, size_t size)
|
||||
FUNC_ENTER_NOAPI(H5T_enum_nameof, NULL)
|
||||
|
||||
/* Check args */
|
||||
assert(dt && H5T_ENUM==dt->type);
|
||||
assert(dt && H5T_ENUM==dt->shared->type);
|
||||
assert(value);
|
||||
assert(name || 0==size);
|
||||
if (name && size>0) *name = '\0';
|
||||
|
||||
/* Sanity check */
|
||||
if (dt->u.enumer.nmembs == 0)
|
||||
if (dt->shared->u.enumer.nmembs == 0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_NOTFOUND, NULL, "datatype has no members")
|
||||
|
||||
/* Do a binary search over the values to find the correct one */
|
||||
if(H5T_sort_value(dt, NULL)<0)
|
||||
HGOTO_ERROR(H5E_INTERNAL, H5E_CANTCOMPARE, NULL, "value sort failed")
|
||||
lt = 0;
|
||||
rt = dt->u.enumer.nmembs;
|
||||
rt = dt->shared->u.enumer.nmembs;
|
||||
|
||||
while (lt<rt) {
|
||||
md = (lt+rt)/2;
|
||||
cmp = HDmemcmp(value, dt->u.enumer.value+md*dt->size, dt->size);
|
||||
cmp = HDmemcmp(value, dt->shared->u.enumer.value+md*dt->shared->size, dt->shared->size);
|
||||
if (cmp<0) {
|
||||
rt = md;
|
||||
} else if (cmp>0) {
|
||||
@ -464,10 +469,10 @@ H5T_enum_nameof(H5T_t *dt, const void *value, char *name/*out*/, size_t size)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_NOTFOUND, NULL, "value is currently not defined")
|
||||
|
||||
/* Save result name */
|
||||
if (!name && NULL==(name=H5MM_malloc(HDstrlen(dt->u.enumer.name[md])+1)))
|
||||
if (!name && NULL==(name=H5MM_malloc(HDstrlen(dt->shared->u.enumer.name[md])+1)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
|
||||
HDstrncpy(name, dt->u.enumer.name[md], size);
|
||||
if (HDstrlen(dt->u.enumer.name[md])>=size)
|
||||
HDstrncpy(name, dt->shared->u.enumer.name[md], size);
|
||||
if (HDstrlen(dt->shared->u.enumer.name[md])>=size)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_NOSPACE, NULL, "name has been truncated")
|
||||
|
||||
/* Set return value */
|
||||
@ -509,7 +514,7 @@ H5Tenum_valueof(hid_t type, const char *name, void *value/*out*/)
|
||||
/* Check args */
|
||||
if (NULL==(dt=H5I_object_verify(type,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
|
||||
if (H5T_ENUM!=dt->type)
|
||||
if (H5T_ENUM!=dt->shared->type)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an enumeration data type")
|
||||
if (!name || !*name)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no name")
|
||||
@ -553,23 +558,23 @@ H5T_enum_valueof(H5T_t *dt, const char *name, void *value/*out*/)
|
||||
FUNC_ENTER_NOAPI(H5T_enum_valueof, FAIL)
|
||||
|
||||
/* Check args */
|
||||
assert(dt && H5T_ENUM==dt->type);
|
||||
assert(dt && H5T_ENUM==dt->shared->type);
|
||||
assert(name && *name);
|
||||
assert(value);
|
||||
|
||||
/* Sanity check */
|
||||
if (dt->u.enumer.nmembs == 0)
|
||||
if (dt->shared->u.enumer.nmembs == 0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_NOTFOUND, FAIL, "datatype has no members")
|
||||
|
||||
/* Do a binary search over the names to find the correct one */
|
||||
if(H5T_sort_name(dt, NULL)<0)
|
||||
HGOTO_ERROR(H5E_INTERNAL, H5E_CANTCOMPARE, FAIL, "value sort failed")
|
||||
lt = 0;
|
||||
rt = dt->u.enumer.nmembs;
|
||||
rt = dt->shared->u.enumer.nmembs;
|
||||
|
||||
while (lt<rt) {
|
||||
md = (lt+rt)/2;
|
||||
cmp = HDstrcmp(name, dt->u.enumer.name[md]);
|
||||
cmp = HDstrcmp(name, dt->shared->u.enumer.name[md]);
|
||||
if (cmp<0) {
|
||||
rt = md;
|
||||
} else if (cmp>0) {
|
||||
@ -582,7 +587,7 @@ H5T_enum_valueof(H5T_t *dt, const char *name, void *value/*out*/)
|
||||
if (cmp!=0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_NOTFOUND, FAIL, "string doesn't exist in the enumeration type")
|
||||
|
||||
HDmemcpy(value, dt->u.enumer.value+md*dt->size, dt->size);
|
||||
HDmemcpy(value, dt->shared->u.enumer.value+md*dt->shared->size, dt->shared->size);
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
|
138
src/H5Tfields.c
138
src/H5Tfields.c
@ -125,10 +125,10 @@ H5T_get_nmembers(const H5T_t *dt)
|
||||
|
||||
assert(dt);
|
||||
|
||||
if (H5T_COMPOUND==dt->type)
|
||||
ret_value = (int)dt->u.compnd.nmembs;
|
||||
else if (H5T_ENUM==dt->type)
|
||||
ret_value = (int)dt->u.enumer.nmembs;
|
||||
if (H5T_COMPOUND==dt->shared->type)
|
||||
ret_value = (int)dt->shared->u.compnd.nmembs;
|
||||
else if (H5T_ENUM==dt->shared->type)
|
||||
ret_value = (int)dt->shared->u.enumer.nmembs;
|
||||
else
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "operation not supported for type class")
|
||||
|
||||
@ -214,17 +214,17 @@ H5T_get_member_name(H5T_t const *dt, unsigned membno)
|
||||
|
||||
assert(dt);
|
||||
|
||||
switch (dt->type) {
|
||||
switch (dt->shared->type) {
|
||||
case H5T_COMPOUND:
|
||||
if (membno>=dt->u.compnd.nmembs)
|
||||
if (membno>=dt->shared->u.compnd.nmembs)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid member number")
|
||||
ret_value = H5MM_xstrdup(dt->u.compnd.memb[membno].name);
|
||||
ret_value = H5MM_xstrdup(dt->shared->u.compnd.memb[membno].name);
|
||||
break;
|
||||
|
||||
case H5T_ENUM:
|
||||
if (membno>=dt->u.enumer.nmembs)
|
||||
if (membno>=dt->shared->u.enumer.nmembs)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid member number")
|
||||
ret_value = H5MM_xstrdup(dt->u.enumer.name[membno]);
|
||||
ret_value = H5MM_xstrdup(dt->shared->u.enumer.name[membno]);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -270,16 +270,16 @@ H5Tget_member_index(hid_t type_id, const char *name)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
|
||||
|
||||
/* Locate member by name */
|
||||
switch (dt->type) {
|
||||
switch (dt->shared->type) {
|
||||
case H5T_COMPOUND:
|
||||
for(i=0; i< dt->u.compnd.nmembs; i++) {
|
||||
if(!HDstrcmp(dt->u.compnd.memb[i].name, name))
|
||||
for(i=0; i< dt->shared->u.compnd.nmembs; i++) {
|
||||
if(!HDstrcmp(dt->shared->u.compnd.memb[i].name, name))
|
||||
HGOTO_DONE((int)i)
|
||||
}
|
||||
break;
|
||||
case H5T_ENUM:
|
||||
for(i=0; i< dt->u.enumer.nmembs; i++) {
|
||||
if(!HDstrcmp(dt->u.enumer.name[i], name))
|
||||
for(i=0; i< dt->shared->u.enumer.nmembs; i++) {
|
||||
if(!HDstrcmp(dt->shared->u.enumer.name[i], name))
|
||||
HGOTO_DONE((int)i)
|
||||
}
|
||||
break;
|
||||
@ -323,20 +323,20 @@ H5T_sort_value(H5T_t *dt, int *map)
|
||||
|
||||
/* Check args */
|
||||
assert(dt);
|
||||
assert(H5T_COMPOUND==dt->type || H5T_ENUM==dt->type);
|
||||
assert(H5T_COMPOUND==dt->shared->type || H5T_ENUM==dt->shared->type);
|
||||
|
||||
/* Use a bubble sort because we can short circuit */
|
||||
if (H5T_COMPOUND==dt->type) {
|
||||
if (H5T_SORT_VALUE!=dt->u.compnd.sorted) {
|
||||
dt->u.compnd.sorted = H5T_SORT_VALUE;
|
||||
nmembs = dt->u.compnd.nmembs;
|
||||
if (H5T_COMPOUND==dt->shared->type) {
|
||||
if (H5T_SORT_VALUE!=dt->shared->u.compnd.sorted) {
|
||||
dt->shared->u.compnd.sorted = H5T_SORT_VALUE;
|
||||
nmembs = dt->shared->u.compnd.nmembs;
|
||||
for (i=nmembs-1, swapped=TRUE; i>0 && swapped; --i) {
|
||||
for (j=0, swapped=FALSE; j<i; j++) {
|
||||
if (dt->u.compnd.memb[j].offset >
|
||||
dt->u.compnd.memb[j+1].offset) {
|
||||
H5T_cmemb_t tmp = dt->u.compnd.memb[j];
|
||||
dt->u.compnd.memb[j] = dt->u.compnd.memb[j+1];
|
||||
dt->u.compnd.memb[j+1] = tmp;
|
||||
if (dt->shared->u.compnd.memb[j].offset >
|
||||
dt->shared->u.compnd.memb[j+1].offset) {
|
||||
H5T_cmemb_t tmp = dt->shared->u.compnd.memb[j];
|
||||
dt->shared->u.compnd.memb[j] = dt->shared->u.compnd.memb[j+1];
|
||||
dt->shared->u.compnd.memb[j+1] = tmp;
|
||||
if (map) {
|
||||
int x = map[j];
|
||||
map[j] = map[j+1];
|
||||
@ -349,32 +349,32 @@ H5T_sort_value(H5T_t *dt, int *map)
|
||||
#ifndef NDEBUG
|
||||
/* I never trust a sort :-) -RPM */
|
||||
for (i=0; i<nmembs-1; i++) {
|
||||
assert(dt->u.compnd.memb[i].offset <
|
||||
dt->u.compnd.memb[i+1].offset);
|
||||
assert(dt->shared->u.compnd.memb[i].offset <
|
||||
dt->shared->u.compnd.memb[i+1].offset);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
} else if (H5T_ENUM==dt->type) {
|
||||
if (H5T_SORT_VALUE!=dt->u.enumer.sorted) {
|
||||
dt->u.enumer.sorted = H5T_SORT_VALUE;
|
||||
nmembs = dt->u.enumer.nmembs;
|
||||
size = dt->size;
|
||||
} else if (H5T_ENUM==dt->shared->type) {
|
||||
if (H5T_SORT_VALUE!=dt->shared->u.enumer.sorted) {
|
||||
dt->shared->u.enumer.sorted = H5T_SORT_VALUE;
|
||||
nmembs = dt->shared->u.enumer.nmembs;
|
||||
size = dt->shared->size;
|
||||
assert(size<=sizeof(tbuf));
|
||||
for (i=nmembs-1, swapped=TRUE; i>0 && swapped; --i) {
|
||||
for (j=0, swapped=FALSE; j<i; j++) {
|
||||
if (HDmemcmp(dt->u.enumer.value+j*size,
|
||||
dt->u.enumer.value+(j+1)*size,
|
||||
if (HDmemcmp(dt->shared->u.enumer.value+j*size,
|
||||
dt->shared->u.enumer.value+(j+1)*size,
|
||||
size)>0) {
|
||||
/* Swap names */
|
||||
char *tmp = dt->u.enumer.name[j];
|
||||
dt->u.enumer.name[j] = dt->u.enumer.name[j+1];
|
||||
dt->u.enumer.name[j+1] = tmp;
|
||||
char *tmp = dt->shared->u.enumer.name[j];
|
||||
dt->shared->u.enumer.name[j] = dt->shared->u.enumer.name[j+1];
|
||||
dt->shared->u.enumer.name[j+1] = tmp;
|
||||
|
||||
/* Swap values */
|
||||
HDmemcpy(tbuf, dt->u.enumer.value+j*size, size);
|
||||
HDmemcpy(dt->u.enumer.value+j*size,
|
||||
dt->u.enumer.value+(j+1)*size, size);
|
||||
HDmemcpy(dt->u.enumer.value+(j+1)*size, tbuf, size);
|
||||
HDmemcpy(tbuf, dt->shared->u.enumer.value+j*size, size);
|
||||
HDmemcpy(dt->shared->u.enumer.value+j*size,
|
||||
dt->shared->u.enumer.value+(j+1)*size, size);
|
||||
HDmemcpy(dt->shared->u.enumer.value+(j+1)*size, tbuf, size);
|
||||
|
||||
/* Swap map */
|
||||
if (map) {
|
||||
@ -390,8 +390,8 @@ H5T_sort_value(H5T_t *dt, int *map)
|
||||
#ifndef NDEBUG
|
||||
/* I never trust a sort :-) -RPM */
|
||||
for (i=0; i<nmembs-1; i++) {
|
||||
assert(HDmemcmp(dt->u.enumer.value+i*size,
|
||||
dt->u.enumer.value+(i+1)*size,
|
||||
assert(HDmemcmp(dt->shared->u.enumer.value+i*size,
|
||||
dt->shared->u.enumer.value+(i+1)*size,
|
||||
size)<0);
|
||||
}
|
||||
#endif
|
||||
@ -434,20 +434,20 @@ H5T_sort_name(H5T_t *dt, int *map)
|
||||
|
||||
/* Check args */
|
||||
assert(dt);
|
||||
assert(H5T_COMPOUND==dt->type || H5T_ENUM==dt->type);
|
||||
assert(H5T_COMPOUND==dt->shared->type || H5T_ENUM==dt->shared->type);
|
||||
|
||||
/* Use a bubble sort because we can short circuit */
|
||||
if (H5T_COMPOUND==dt->type) {
|
||||
if (H5T_SORT_NAME!=dt->u.compnd.sorted) {
|
||||
dt->u.compnd.sorted = H5T_SORT_NAME;
|
||||
nmembs = dt->u.compnd.nmembs;
|
||||
if (H5T_COMPOUND==dt->shared->type) {
|
||||
if (H5T_SORT_NAME!=dt->shared->u.compnd.sorted) {
|
||||
dt->shared->u.compnd.sorted = H5T_SORT_NAME;
|
||||
nmembs = dt->shared->u.compnd.nmembs;
|
||||
for (i=nmembs-1, swapped=TRUE; i>0 && swapped; --i) {
|
||||
for (j=0, swapped=FALSE; j<i; j++) {
|
||||
if (HDstrcmp(dt->u.compnd.memb[j].name,
|
||||
dt->u.compnd.memb[j+1].name)>0) {
|
||||
H5T_cmemb_t tmp = dt->u.compnd.memb[j];
|
||||
dt->u.compnd.memb[j] = dt->u.compnd.memb[j+1];
|
||||
dt->u.compnd.memb[j+1] = tmp;
|
||||
if (HDstrcmp(dt->shared->u.compnd.memb[j].name,
|
||||
dt->shared->u.compnd.memb[j+1].name)>0) {
|
||||
H5T_cmemb_t tmp = dt->shared->u.compnd.memb[j];
|
||||
dt->shared->u.compnd.memb[j] = dt->shared->u.compnd.memb[j+1];
|
||||
dt->shared->u.compnd.memb[j+1] = tmp;
|
||||
swapped = TRUE;
|
||||
if (map) {
|
||||
int x = map[j];
|
||||
@ -460,31 +460,31 @@ H5T_sort_name(H5T_t *dt, int *map)
|
||||
#ifndef NDEBUG
|
||||
/* I never trust a sort :-) -RPM */
|
||||
for (i=0; i<nmembs-1; i++) {
|
||||
assert(HDstrcmp(dt->u.compnd.memb[i].name,
|
||||
dt->u.compnd.memb[i+1].name)<0);
|
||||
assert(HDstrcmp(dt->shared->u.compnd.memb[i].name,
|
||||
dt->shared->u.compnd.memb[i+1].name)<0);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
} else if (H5T_ENUM==dt->type) {
|
||||
if (H5T_SORT_NAME!=dt->u.enumer.sorted) {
|
||||
dt->u.enumer.sorted = H5T_SORT_NAME;
|
||||
nmembs = dt->u.enumer.nmembs;
|
||||
size = dt->size;
|
||||
} else if (H5T_ENUM==dt->shared->type) {
|
||||
if (H5T_SORT_NAME!=dt->shared->u.enumer.sorted) {
|
||||
dt->shared->u.enumer.sorted = H5T_SORT_NAME;
|
||||
nmembs = dt->shared->u.enumer.nmembs;
|
||||
size = dt->shared->size;
|
||||
assert(size<=sizeof(tbuf));
|
||||
for (i=nmembs-1, swapped=TRUE; i>0 && swapped; --i) {
|
||||
for (j=0, swapped=FALSE; j<i; j++) {
|
||||
if (HDstrcmp(dt->u.enumer.name[j],
|
||||
dt->u.enumer.name[j+1])>0) {
|
||||
if (HDstrcmp(dt->shared->u.enumer.name[j],
|
||||
dt->shared->u.enumer.name[j+1])>0) {
|
||||
/* Swap names */
|
||||
char *tmp = dt->u.enumer.name[j];
|
||||
dt->u.enumer.name[j] = dt->u.enumer.name[j+1];
|
||||
dt->u.enumer.name[j+1] = tmp;
|
||||
char *tmp = dt->shared->u.enumer.name[j];
|
||||
dt->shared->u.enumer.name[j] = dt->shared->u.enumer.name[j+1];
|
||||
dt->shared->u.enumer.name[j+1] = tmp;
|
||||
|
||||
/* Swap values */
|
||||
HDmemcpy(tbuf, dt->u.enumer.value+j*size, size);
|
||||
HDmemcpy(dt->u.enumer.value+j*size,
|
||||
dt->u.enumer.value+(j+1)*size, size);
|
||||
HDmemcpy(dt->u.enumer.value+(j+1)*size, tbuf, size);
|
||||
HDmemcpy(tbuf, dt->shared->u.enumer.value+j*size, size);
|
||||
HDmemcpy(dt->shared->u.enumer.value+j*size,
|
||||
dt->shared->u.enumer.value+(j+1)*size, size);
|
||||
HDmemcpy(dt->shared->u.enumer.value+(j+1)*size, tbuf, size);
|
||||
|
||||
/* Swap map */
|
||||
if (map) {
|
||||
@ -500,7 +500,7 @@ H5T_sort_name(H5T_t *dt, int *map)
|
||||
#ifndef NDEBUG
|
||||
/* I never trust a sort :-) -RPM */
|
||||
for (i=0; i<nmembs-1; i++)
|
||||
assert(HDstrcmp(dt->u.enumer.name[i], dt->u.enumer.name[i+1])<0);
|
||||
assert(HDstrcmp(dt->shared->u.enumer.name[i], dt->shared->u.enumer.name[i+1])<0);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -118,15 +118,15 @@ H5T_get_sign(H5T_t const *dt)
|
||||
assert(dt);
|
||||
|
||||
/* Defer to parent */
|
||||
while(dt->parent)
|
||||
dt = dt->parent;
|
||||
while(dt->shared->parent)
|
||||
dt = dt->shared->parent;
|
||||
|
||||
/* Check args */
|
||||
if (H5T_INTEGER!=dt->type)
|
||||
if (H5T_INTEGER!=dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5T_SGN_ERROR, "operation not defined for datatype class")
|
||||
|
||||
/* Sign */
|
||||
ret_value = dt->u.atomic.u.i.sign;
|
||||
ret_value = dt->shared->u.atomic.u.i.sign;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_NOAPI(ret_value)
|
||||
@ -162,19 +162,19 @@ H5Tset_sign(hid_t type_id, H5T_sign_t sign)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an integer datatype")
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "datatype is read-only")
|
||||
if (sign < 0 || sign >= H5T_NSGN)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "illegal sign type")
|
||||
if (H5T_ENUM==dt->type && dt->u.enumer.nmembs>0)
|
||||
if (H5T_ENUM==dt->shared->type && dt->shared->u.enumer.nmembs>0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not allowed after members are defined")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (H5T_INTEGER!=dt->type)
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (H5T_INTEGER!=dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not defined for datatype class")
|
||||
|
||||
/* Commit */
|
||||
dt->u.atomic.u.i.sign = sign;
|
||||
dt->shared->u.atomic.u.i.sign = sign;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
|
@ -91,17 +91,17 @@ H5Tget_fields(hid_t type_id, size_t *spos/*out*/,
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->type)
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not defined for datatype class")
|
||||
|
||||
/* Get values */
|
||||
if (spos) *spos = dt->u.atomic.u.f.sign;
|
||||
if (epos) *epos = dt->u.atomic.u.f.epos;
|
||||
if (esize) *esize = dt->u.atomic.u.f.esize;
|
||||
if (mpos) *mpos = dt->u.atomic.u.f.mpos;
|
||||
if (msize) *msize = dt->u.atomic.u.f.msize;
|
||||
if (spos) *spos = dt->shared->u.atomic.u.f.sign;
|
||||
if (epos) *epos = dt->shared->u.atomic.u.f.epos;
|
||||
if (esize) *esize = dt->shared->u.atomic.u.f.esize;
|
||||
if (mpos) *mpos = dt->shared->u.atomic.u.f.mpos;
|
||||
if (msize) *msize = dt->shared->u.atomic.u.f.msize;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -143,17 +143,17 @@ H5Tset_fields(hid_t type_id, size_t spos, size_t epos, size_t esize,
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "datatype is read-only")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->type)
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not defined for datatype class")
|
||||
if (epos + esize > dt->u.atomic.prec)
|
||||
if (epos + esize > dt->shared->u.atomic.prec)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "exponent bit field size/location is invalid")
|
||||
if (mpos + msize > dt->u.atomic.prec)
|
||||
if (mpos + msize > dt->shared->u.atomic.prec)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "mantissa bit field size/location is invalid")
|
||||
if (spos >= dt->u.atomic.prec)
|
||||
if (spos >= dt->shared->u.atomic.prec)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "sign location is not valid")
|
||||
|
||||
/* Check for overlap */
|
||||
@ -166,11 +166,11 @@ H5Tset_fields(hid_t type_id, size_t spos, size_t epos, size_t esize,
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "exponent and mantissa fields overlap")
|
||||
|
||||
/* Commit */
|
||||
dt->u.atomic.u.f.sign = spos;
|
||||
dt->u.atomic.u.f.epos = epos;
|
||||
dt->u.atomic.u.f.mpos = mpos;
|
||||
dt->u.atomic.u.f.esize = esize;
|
||||
dt->u.atomic.u.f.msize = msize;
|
||||
dt->shared->u.atomic.u.f.sign = spos;
|
||||
dt->shared->u.atomic.u.f.epos = epos;
|
||||
dt->shared->u.atomic.u.f.mpos = mpos;
|
||||
dt->shared->u.atomic.u.f.esize = esize;
|
||||
dt->shared->u.atomic.u.f.msize = msize;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -206,13 +206,13 @@ H5Tget_ebias(hid_t type_id)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not a datatype")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->type)
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, 0, "operation not defined for datatype class")
|
||||
|
||||
/* bias */
|
||||
H5_ASSIGN_OVERFLOW(ret_value,dt->u.atomic.u.f.ebias,uint64_t,size_t);
|
||||
H5_ASSIGN_OVERFLOW(ret_value,dt->shared->u.atomic.u.f.ebias,uint64_t,size_t);
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -247,15 +247,15 @@ H5Tset_ebias(hid_t type_id, size_t ebias)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "datatype is read-only")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->type)
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not defined for datatype class")
|
||||
|
||||
/* Commit */
|
||||
dt->u.atomic.u.f.ebias = ebias;
|
||||
dt->shared->u.atomic.u.f.ebias = ebias;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -293,13 +293,13 @@ H5Tget_norm(hid_t type_id)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5T_NORM_ERROR, "not a datatype")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->type)
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5T_NORM_ERROR, "operation not defined for datatype class")
|
||||
|
||||
/* norm */
|
||||
ret_value = dt->u.atomic.u.f.norm;
|
||||
ret_value = dt->shared->u.atomic.u.f.norm;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -335,17 +335,17 @@ H5Tset_norm(hid_t type_id, H5T_norm_t norm)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "datatype is read-only")
|
||||
if (norm < 0 || norm > H5T_NORM_NONE)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "illegal normalization")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->type)
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not defined for datatype class")
|
||||
|
||||
/* Commit */
|
||||
dt->u.atomic.u.f.norm = norm;
|
||||
dt->shared->u.atomic.u.f.norm = norm;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -385,13 +385,13 @@ H5Tget_inpad(hid_t type_id)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5T_PAD_ERROR, "not a datatype")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->type)
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, H5T_PAD_ERROR, "operation not defined for datatype class")
|
||||
|
||||
/* pad */
|
||||
ret_value = dt->u.atomic.u.f.pad;
|
||||
ret_value = dt->shared->u.atomic.u.f.pad;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -429,17 +429,17 @@ H5Tset_inpad(hid_t type_id, H5T_pad_t pad)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "datatype is read-only")
|
||||
if (pad < 0 || pad >= H5T_NPAD)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "illegal internal pad type")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->type)
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (H5T_FLOAT != dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not defined for datatype class")
|
||||
|
||||
/* Commit */
|
||||
dt->u.atomic.u.f.pad = pad;
|
||||
dt->shared->u.atomic.u.f.pad = pad;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
|
@ -184,7 +184,7 @@ H5T_get_native_type(H5T_t *dtype, H5T_direction_t direction, size_t *struct_alig
|
||||
if((sign = H5T_get_sign(dtype))==H5T_SGN_ERROR)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a valid signess")
|
||||
|
||||
prec = dtype->u.atomic.prec;
|
||||
prec = dtype->shared->u.atomic.prec;
|
||||
|
||||
if((ret_value = H5T_get_native_integer(prec, sign, direction, struct_align, offset, comp_size))==NULL)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "cannot retrieve integer type")
|
||||
@ -201,7 +201,7 @@ H5T_get_native_type(H5T_t *dtype, H5T_direction_t direction, size_t *struct_alig
|
||||
if((ret_value=H5T_copy(dtype, H5T_COPY_TRANSIENT))==NULL)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "cannot retrieve float type")
|
||||
|
||||
if(H5T_IS_VL_STRING(dtype)) {
|
||||
if(H5T_IS_VL_STRING(dtype->shared)) {
|
||||
/* Update size, offset and compound alignment for parent. */
|
||||
if(H5T_cmp_offset(comp_size, offset, sizeof(char *), 1, H5T_POINTER_COMP_ALIGN_g, struct_align)<0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "cannot compute compound offset")
|
||||
|
@ -103,13 +103,13 @@ H5Tget_offset(hid_t type_id)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an atomic data type")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (!H5T_IS_ATOMIC(dt))
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (!H5T_IS_ATOMIC(dt->shared))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "operation not defined for specified data type")
|
||||
|
||||
/* Offset */
|
||||
ret_value = (int)dt->u.atomic.offset;
|
||||
ret_value = (int)dt->shared->u.atomic.offset;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -167,13 +167,13 @@ H5Tset_offset(hid_t type_id, size_t offset)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an atomic data type")
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "data type is read-only")
|
||||
if (H5T_STRING == dt->type && offset != 0)
|
||||
if (H5T_STRING == dt->shared->type && offset != 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "offset must be zero for this type")
|
||||
if (H5T_ENUM==dt->type && dt->u.enumer.nmembs>0)
|
||||
if (H5T_ENUM==dt->shared->type && dt->shared->u.enumer.nmembs>0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not allowed after members are defined")
|
||||
if (H5T_COMPOUND==dt->type || H5T_REFERENCE==dt->type || H5T_OPAQUE==dt->type)
|
||||
if (H5T_COMPOUND==dt->shared->type || H5T_REFERENCE==dt->shared->type || H5T_OPAQUE==dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "operation not defined for this datatype")
|
||||
|
||||
/* Do the real work */
|
||||
@ -233,25 +233,25 @@ H5T_set_offset(H5T_t *dt, size_t offset)
|
||||
|
||||
/* Check args */
|
||||
assert(dt);
|
||||
assert(H5T_STRING!=dt->type || 0==offset);
|
||||
assert(H5T_REFERENCE!=dt->type);
|
||||
assert(H5T_OPAQUE!=dt->type);
|
||||
assert(H5T_COMPOUND!=dt->type);
|
||||
assert(!(H5T_ENUM==dt->type && 0==dt->u.enumer.nmembs));
|
||||
assert(H5T_STRING!=dt->shared->type || 0==offset);
|
||||
assert(H5T_REFERENCE!=dt->shared->type);
|
||||
assert(H5T_OPAQUE!=dt->shared->type);
|
||||
assert(H5T_COMPOUND!=dt->shared->type);
|
||||
assert(!(H5T_ENUM==dt->shared->type && 0==dt->shared->u.enumer.nmembs));
|
||||
|
||||
if (dt->parent) {
|
||||
if (H5T_set_offset(dt->parent, offset)<0)
|
||||
if (dt->shared->parent) {
|
||||
if (H5T_set_offset(dt->shared->parent, offset)<0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to set offset for base type")
|
||||
|
||||
/* Adjust size of datatype appropriately */
|
||||
if(dt->type==H5T_ARRAY)
|
||||
dt->size = dt->parent->size * dt->u.array.nelem;
|
||||
else if(dt->type!=H5T_VLEN)
|
||||
dt->size = dt->parent->size;
|
||||
if(dt->shared->type==H5T_ARRAY)
|
||||
dt->shared->size = dt->shared->parent->shared->size * dt->shared->u.array.nelem;
|
||||
else if(dt->shared->type!=H5T_VLEN)
|
||||
dt->shared->size = dt->shared->parent->shared->size;
|
||||
} else {
|
||||
if (offset+dt->u.atomic.prec > 8*dt->size)
|
||||
dt->size = (offset + dt->u.atomic.prec + 7) / 8;
|
||||
dt->u.atomic.offset = offset;
|
||||
if (offset+dt->shared->u.atomic.prec > 8*dt->shared->size)
|
||||
dt->shared->size = (offset + dt->shared->u.atomic.prec + 7) / 8;
|
||||
dt->shared->u.atomic.offset = offset;
|
||||
}
|
||||
|
||||
done:
|
||||
|
@ -81,18 +81,18 @@ H5Tset_tag(hid_t type_id, const char *tag)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "data type is read-only")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (H5T_OPAQUE!=dt->type)
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (H5T_OPAQUE!=dt->shared->type)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not an opaque data type")
|
||||
if (!tag)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no tag")
|
||||
|
||||
/* Commit */
|
||||
H5MM_xfree(dt->u.opaque.tag);
|
||||
dt->u.opaque.tag = H5MM_strdup(tag);
|
||||
H5MM_xfree(dt->shared->u.opaque.tag);
|
||||
dt->shared->u.opaque.tag = H5MM_strdup(tag);
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -125,13 +125,13 @@ H5Tget_tag(hid_t type_id)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a data type")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (H5T_OPAQUE != dt->type)
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (H5T_OPAQUE != dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "operation not defined for data type class")
|
||||
|
||||
/* result */
|
||||
if (NULL==(ret_value=H5MM_strdup(dt->u.opaque.tag)))
|
||||
if (NULL==(ret_value=H5MM_strdup(dt->shared->u.opaque.tag)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
|
||||
|
||||
done:
|
||||
|
@ -84,13 +84,13 @@ H5Tget_order(hid_t type_id)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5T_ORDER_ERROR, "not a data type")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (!H5T_IS_ATOMIC(dt))
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (!H5T_IS_ATOMIC(dt->shared))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, H5T_ORDER_ERROR, "operation not defined for specified data type")
|
||||
|
||||
/* Order */
|
||||
ret_value = dt->u.atomic.order;
|
||||
ret_value = dt->shared->u.atomic.order;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -125,19 +125,19 @@ H5Tset_order(hid_t type_id, H5T_order_t order)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "data type is read-only")
|
||||
if (order < 0 || order > H5T_ORDER_NONE)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "illegal byte order")
|
||||
if (H5T_ENUM==dt->type && dt->u.enumer.nmembs>0)
|
||||
if (H5T_ENUM==dt->shared->type && dt->shared->u.enumer.nmembs>0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not allowed after members are defined")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (!H5T_IS_ATOMIC(dt))
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (!H5T_IS_ATOMIC(dt->shared))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, H5T_ORDER_ERROR, "operation not defined for specified data type")
|
||||
|
||||
/* Commit */
|
||||
dt->u.atomic.order = order;
|
||||
dt->shared->u.atomic.order = order;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
|
24
src/H5Tpad.c
24
src/H5Tpad.c
@ -84,16 +84,16 @@ H5Tget_pad(hid_t type_id, H5T_pad_t *lsb/*out*/, H5T_pad_t *msb/*out*/)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (!H5T_IS_ATOMIC(dt))
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (!H5T_IS_ATOMIC(dt->shared))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "operation not defined for specified data type")
|
||||
|
||||
/* Get values */
|
||||
if (lsb)
|
||||
*lsb = dt->u.atomic.lsb_pad;
|
||||
*lsb = dt->shared->u.atomic.lsb_pad;
|
||||
if (msb)
|
||||
*msb = dt->u.atomic.msb_pad;
|
||||
*msb = dt->shared->u.atomic.msb_pad;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -128,20 +128,20 @@ H5Tset_pad(hid_t type_id, H5T_pad_t lsb, H5T_pad_t msb)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "data type is read-only")
|
||||
if (lsb < 0 || lsb >= H5T_NPAD || msb < 0 || msb >= H5T_NPAD)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid pad type")
|
||||
if (H5T_ENUM==dt->type && dt->u.enumer.nmembs>0)
|
||||
if (H5T_ENUM==dt->shared->type && dt->shared->u.enumer.nmembs>0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not allowed after members are defined")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (!H5T_IS_ATOMIC(dt))
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (!H5T_IS_ATOMIC(dt->shared))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "operation not defined for specified data type")
|
||||
|
||||
/* Commit */
|
||||
dt->u.atomic.lsb_pad = lsb;
|
||||
dt->u.atomic.msb_pad = msb;
|
||||
dt->shared->u.atomic.lsb_pad = lsb;
|
||||
dt->shared->u.atomic.msb_pad = msb;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
|
10
src/H5Tpkg.h
10
src/H5Tpkg.h
@ -195,9 +195,10 @@ typedef enum H5T_state_t {
|
||||
H5T_STATE_OPEN /*named constant, open object header */
|
||||
} H5T_state_t;
|
||||
|
||||
struct H5T_t {
|
||||
/* This struct is shared between all occurances of an open named type */
|
||||
typedef struct H5T_shared_t {
|
||||
hsize_t fo_count; /* number of references to this file object */
|
||||
H5T_state_t state; /*current state of the type */
|
||||
H5G_entry_t ent; /*the type is a named type */
|
||||
H5F_t *sh_file;/*file pointer if this is a shared type */
|
||||
H5T_class_t type; /*which class of type is this? */
|
||||
size_t size; /*total size of an instance of this type */
|
||||
@ -211,6 +212,11 @@ struct H5T_t {
|
||||
H5T_opaque_t opaque; /* an opaque datatype */
|
||||
H5T_array_t array; /* an array datatype */
|
||||
} u;
|
||||
} H5T_shared_t;
|
||||
|
||||
struct H5T_t {
|
||||
H5G_entry_t ent; /* entry information if the type is a named type */
|
||||
H5T_shared_t *shared; /* all other information */
|
||||
};
|
||||
|
||||
/* A compound datatype member */
|
||||
|
@ -91,13 +91,13 @@ H5Tget_precision(hid_t type_id)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not a datatype")
|
||||
while (dt->parent)
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (!H5T_IS_ATOMIC(dt))
|
||||
while (dt->shared->parent)
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (!H5T_IS_ATOMIC(dt->shared))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, 0, "operation not defined for specified datatype")
|
||||
|
||||
/* Precision */
|
||||
ret_value = dt->u.atomic.prec;
|
||||
ret_value = dt->shared->u.atomic.prec;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -145,15 +145,15 @@ H5Tset_precision(hid_t type_id, size_t prec)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_CANTSET, FAIL, "datatype is read-only")
|
||||
if (prec == 0)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "precision must be positive")
|
||||
if (H5T_ENUM==dt->type && dt->u.enumer.nmembs>0)
|
||||
if (H5T_ENUM==dt->shared->type && dt->shared->u.enumer.nmembs>0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTSET, FAIL, "operation not allowed after members are defined")
|
||||
if (H5T_STRING==dt->type)
|
||||
if (H5T_STRING==dt->shared->type)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, FAIL, "precision for this type is read-only")
|
||||
if (H5T_COMPOUND==dt->type || H5T_OPAQUE==dt->type)
|
||||
if (H5T_COMPOUND==dt->shared->type || H5T_OPAQUE==dt->shared->type)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "operation not defined for specified datatype")
|
||||
|
||||
/* Do the work */
|
||||
@ -205,25 +205,25 @@ H5T_set_precision(H5T_t *dt, size_t prec)
|
||||
/* Check args */
|
||||
assert(dt);
|
||||
assert(prec>0);
|
||||
assert(H5T_OPAQUE!=dt->type);
|
||||
assert(H5T_COMPOUND!=dt->type);
|
||||
assert(H5T_STRING!=dt->type);
|
||||
assert(!(H5T_ENUM==dt->type && 0==dt->u.enumer.nmembs));
|
||||
assert(H5T_OPAQUE!=dt->shared->type);
|
||||
assert(H5T_COMPOUND!=dt->shared->type);
|
||||
assert(H5T_STRING!=dt->shared->type);
|
||||
assert(!(H5T_ENUM==dt->shared->type && 0==dt->shared->u.enumer.nmembs));
|
||||
|
||||
if (dt->parent) {
|
||||
if (H5T_set_precision(dt->parent, prec)<0)
|
||||
if (dt->shared->parent) {
|
||||
if (H5T_set_precision(dt->shared->parent, prec)<0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTSET, FAIL, "unable to set precision for base type")
|
||||
|
||||
/* Adjust size of datatype appropriately */
|
||||
if(dt->type==H5T_ARRAY)
|
||||
dt->size = dt->parent->size * dt->u.array.nelem;
|
||||
else if(dt->type!=H5T_VLEN)
|
||||
dt->size = dt->parent->size;
|
||||
if(dt->shared->type==H5T_ARRAY)
|
||||
dt->shared->size = dt->shared->parent->shared->size * dt->shared->u.array.nelem;
|
||||
else if(dt->shared->type!=H5T_VLEN)
|
||||
dt->shared->size = dt->shared->parent->shared->size;
|
||||
} else {
|
||||
if (H5T_IS_ATOMIC(dt)) {
|
||||
if (H5T_IS_ATOMIC(dt->shared)) {
|
||||
/* Adjust the offset and size */
|
||||
offset = dt->u.atomic.offset;
|
||||
size = dt->size;
|
||||
offset = dt->shared->u.atomic.offset;
|
||||
size = dt->shared->size;
|
||||
if (prec > 8*size)
|
||||
offset = 0;
|
||||
else if (offset+prec > 8 * size)
|
||||
@ -232,7 +232,7 @@ H5T_set_precision(H5T_t *dt, size_t prec)
|
||||
size = (prec+7) / 8;
|
||||
|
||||
/* Check that things are still kosher */
|
||||
switch (dt->type) {
|
||||
switch (dt->shared->type) {
|
||||
case H5T_INTEGER:
|
||||
case H5T_TIME:
|
||||
case H5T_BITFIELD:
|
||||
@ -245,9 +245,9 @@ H5T_set_precision(H5T_t *dt, size_t prec)
|
||||
* first when decreasing the precision of a floating point
|
||||
* type.
|
||||
*/
|
||||
if (dt->u.atomic.u.f.sign >= prec ||
|
||||
dt->u.atomic.u.f.epos + dt->u.atomic.u.f.esize > prec ||
|
||||
dt->u.atomic.u.f.mpos + dt->u.atomic.u.f.msize > prec)
|
||||
if (dt->shared->u.atomic.u.f.sign >= prec ||
|
||||
dt->shared->u.atomic.u.f.epos + dt->shared->u.atomic.u.f.esize > prec ||
|
||||
dt->shared->u.atomic.u.f.mpos + dt->shared->u.atomic.u.f.msize > prec)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "adjust sign, mantissa, and exponent fields first")
|
||||
break;
|
||||
|
||||
@ -257,9 +257,9 @@ H5T_set_precision(H5T_t *dt, size_t prec)
|
||||
} /* end switch */
|
||||
|
||||
/* Commit */
|
||||
dt->size = size;
|
||||
dt->u.atomic.offset = offset;
|
||||
dt->u.atomic.prec = prec;
|
||||
dt->shared->size = size;
|
||||
dt->shared->u.atomic.offset = offset;
|
||||
dt->shared->u.atomic.prec = prec;
|
||||
} /* end if */
|
||||
else
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not defined for specified datatype")
|
||||
|
@ -67,7 +67,7 @@ typedef struct H5T_conv_cb_t {
|
||||
H5_DLL herr_t H5TN_init_interface(void);
|
||||
H5_DLL herr_t H5T_init(void);
|
||||
H5_DLL htri_t H5T_isa(H5G_entry_t *ent, hid_t dxpl_id);
|
||||
H5_DLL H5T_t *H5T_open_oid(H5G_entry_t *ent, hid_t dxpl_id);
|
||||
H5_DLL H5T_t *H5T_open(H5G_entry_t *ent, hid_t dxpl_id);
|
||||
H5_DLL H5T_t *H5T_copy(const H5T_t *old_dt, H5T_copy_t method);
|
||||
H5_DLL herr_t H5T_lock(H5T_t *dt, hbool_t immutable);
|
||||
H5_DLL herr_t H5T_close(H5T_t *dt);
|
||||
|
@ -87,16 +87,16 @@ H5Tget_strpad(hid_t type_id)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5T_STR_ERROR, "not a datatype")
|
||||
while (dt->parent && !H5T_IS_STRING(dt))
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (!H5T_IS_STRING(dt))
|
||||
while (dt->shared->parent && !H5T_IS_STRING(dt->shared))
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (!H5T_IS_STRING(dt->shared))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, H5T_STR_ERROR, "operation not defined for datatype class")
|
||||
|
||||
/* result */
|
||||
if(H5T_IS_FIXED_STRING(dt))
|
||||
ret_value = dt->u.atomic.u.s.pad;
|
||||
if(H5T_IS_FIXED_STRING(dt->shared))
|
||||
ret_value = dt->shared->u.atomic.u.s.pad;
|
||||
else
|
||||
ret_value = dt->u.vlen.pad;
|
||||
ret_value = dt->shared->u.vlen.pad;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
@ -144,20 +144,20 @@ H5Tset_strpad(hid_t type_id, H5T_str_t strpad)
|
||||
/* Check args */
|
||||
if (NULL == (dt = H5I_object_verify(type_id,H5I_DATATYPE)))
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
|
||||
if (H5T_STATE_TRANSIENT!=dt->state)
|
||||
if (H5T_STATE_TRANSIENT!=dt->shared->state)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "datatype is read-only")
|
||||
if (strpad < 0 || strpad >= H5T_NSTR)
|
||||
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "illegal string pad type")
|
||||
while (dt->parent && !H5T_IS_STRING(dt))
|
||||
dt = dt->parent; /*defer to parent*/
|
||||
if (!H5T_IS_STRING(dt))
|
||||
while (dt->shared->parent && !H5T_IS_STRING(dt->shared))
|
||||
dt = dt->shared->parent; /*defer to parent*/
|
||||
if (!H5T_IS_STRING(dt->shared))
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_UNSUPPORTED, FAIL, "operation not defined for datatype class")
|
||||
|
||||
/* Commit */
|
||||
if(H5T_IS_FIXED_STRING(dt))
|
||||
dt->u.atomic.u.s.pad = strpad;
|
||||
if(H5T_IS_FIXED_STRING(dt->shared))
|
||||
dt->shared->u.atomic.u.s.pad = strpad;
|
||||
else
|
||||
dt->u.vlen.pad = strpad;
|
||||
dt->shared->u.vlen.pad = strpad;
|
||||
|
||||
done:
|
||||
FUNC_LEAVE_API(ret_value)
|
||||
|
@ -67,8 +67,9 @@ static H5T_vlen_alloc_info_t H5T_vlen_def_vl_alloc_info ={
|
||||
H5D_XFER_VLEN_FREE_INFO_DEF
|
||||
};
|
||||
|
||||
/* Declare extern the free list for H5T_t's */
|
||||
/* Declare extern the free lists for H5T_t's and H5T_shared_t's */
|
||||
H5FL_EXTERN(H5T_t);
|
||||
H5FL_EXTERN(H5T_shared_t);
|
||||
|
||||
|
||||
/*--------------------------------------------------------------------------
|
||||
@ -168,18 +169,22 @@ H5T_vlen_create(const H5T_t *base)
|
||||
/* Build new type */
|
||||
if (NULL==(dt = H5FL_CALLOC(H5T_t)))
|
||||
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
|
||||
if (NULL==(dt->shared = H5FL_CALLOC(H5T_shared_t))) {
|
||||
H5FL_FREE(H5T_t, dt);
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, NULL,"memory allocation failed")
|
||||
}
|
||||
dt->ent.header = HADDR_UNDEF;
|
||||
dt->type = H5T_VLEN;
|
||||
dt->shared->type = H5T_VLEN;
|
||||
|
||||
/*
|
||||
* Force conversions (i.e. memory to memory conversions should duplicate
|
||||
* data, not point to the same VL sequences)
|
||||
*/
|
||||
dt->force_conv = TRUE;
|
||||
dt->parent = H5T_copy(base, H5T_COPY_ALL);
|
||||
dt->shared->force_conv = TRUE;
|
||||
dt->shared->parent = H5T_copy(base, H5T_COPY_ALL);
|
||||
|
||||
/* This is a sequence, not a string */
|
||||
dt->u.vlen.type = H5T_VLEN_SEQUENCE;
|
||||
dt->shared->u.vlen.type = H5T_VLEN_SEQUENCE;
|
||||
|
||||
/* Set up VL information */
|
||||
if (H5T_set_loc(dt, NULL, H5T_LOC_MEMORY)<0)
|
||||
@ -223,7 +228,7 @@ H5T_vlen_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc)
|
||||
assert(loc>H5T_LOC_BADLOC && loc<H5T_LOC_MAXLOC);
|
||||
|
||||
/* Only change the location if it's different */
|
||||
if(loc!=dt->u.vlen.loc) {
|
||||
if(loc!=dt->shared->u.vlen.loc) {
|
||||
/* Indicate that the location changed */
|
||||
ret_value=TRUE;
|
||||
|
||||
@ -232,62 +237,62 @@ H5T_vlen_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc)
|
||||
assert(f==NULL);
|
||||
|
||||
/* Mark this type as being stored in memory */
|
||||
dt->u.vlen.loc=H5T_LOC_MEMORY;
|
||||
dt->shared->u.vlen.loc=H5T_LOC_MEMORY;
|
||||
|
||||
if(dt->u.vlen.type==H5T_VLEN_SEQUENCE) {
|
||||
if(dt->shared->u.vlen.type==H5T_VLEN_SEQUENCE) {
|
||||
/* size in memory, disk size is different */
|
||||
dt->size = sizeof(hvl_t);
|
||||
dt->shared->size = sizeof(hvl_t);
|
||||
|
||||
/* Set up the function pointers to access the VL sequence in memory */
|
||||
dt->u.vlen.getlen=H5T_vlen_seq_mem_getlen;
|
||||
dt->u.vlen.getptr=H5T_vlen_seq_mem_getptr;
|
||||
dt->u.vlen.isnull=H5T_vlen_seq_mem_isnull;
|
||||
dt->u.vlen.read=H5T_vlen_seq_mem_read;
|
||||
dt->u.vlen.write=H5T_vlen_seq_mem_write;
|
||||
dt->u.vlen.setnull=H5T_vlen_seq_mem_setnull;
|
||||
} else if(dt->u.vlen.type==H5T_VLEN_STRING) {
|
||||
dt->shared->u.vlen.getlen=H5T_vlen_seq_mem_getlen;
|
||||
dt->shared->u.vlen.getptr=H5T_vlen_seq_mem_getptr;
|
||||
dt->shared->u.vlen.isnull=H5T_vlen_seq_mem_isnull;
|
||||
dt->shared->u.vlen.read=H5T_vlen_seq_mem_read;
|
||||
dt->shared->u.vlen.write=H5T_vlen_seq_mem_write;
|
||||
dt->shared->u.vlen.setnull=H5T_vlen_seq_mem_setnull;
|
||||
} else if(dt->shared->u.vlen.type==H5T_VLEN_STRING) {
|
||||
/* size in memory, disk size is different */
|
||||
dt->size = sizeof(char *);
|
||||
dt->shared->size = sizeof(char *);
|
||||
|
||||
/* Set up the function pointers to access the VL string in memory */
|
||||
dt->u.vlen.getlen=H5T_vlen_str_mem_getlen;
|
||||
dt->u.vlen.getptr=H5T_vlen_str_mem_getptr;
|
||||
dt->u.vlen.isnull=H5T_vlen_str_mem_isnull;
|
||||
dt->u.vlen.read=H5T_vlen_str_mem_read;
|
||||
dt->u.vlen.write=H5T_vlen_str_mem_write;
|
||||
dt->u.vlen.setnull=H5T_vlen_str_mem_setnull;
|
||||
dt->shared->u.vlen.getlen=H5T_vlen_str_mem_getlen;
|
||||
dt->shared->u.vlen.getptr=H5T_vlen_str_mem_getptr;
|
||||
dt->shared->u.vlen.isnull=H5T_vlen_str_mem_isnull;
|
||||
dt->shared->u.vlen.read=H5T_vlen_str_mem_read;
|
||||
dt->shared->u.vlen.write=H5T_vlen_str_mem_write;
|
||||
dt->shared->u.vlen.setnull=H5T_vlen_str_mem_setnull;
|
||||
} else {
|
||||
assert(0 && "Invalid VL type");
|
||||
}
|
||||
|
||||
/* Reset file ID (since this VL is in memory) */
|
||||
dt->u.vlen.f=NULL;
|
||||
dt->shared->u.vlen.f=NULL;
|
||||
break;
|
||||
|
||||
case H5T_LOC_DISK: /* Disk based VL datatype */
|
||||
assert(f);
|
||||
|
||||
/* Mark this type as being stored on disk */
|
||||
dt->u.vlen.loc=H5T_LOC_DISK;
|
||||
dt->shared->u.vlen.loc=H5T_LOC_DISK;
|
||||
|
||||
/*
|
||||
* Size of element on disk is 4 bytes for the length, plus the size
|
||||
* of an address in this file, plus 4 bytes for the size of a heap
|
||||
* ID. Memory size is different
|
||||
*/
|
||||
dt->size = 4 + H5F_SIZEOF_ADDR(f) + 4;
|
||||
dt->shared->size = 4 + H5F_SIZEOF_ADDR(f) + 4;
|
||||
|
||||
/* Set up the function pointers to access the VL information on disk */
|
||||
/* VL sequences and VL strings are stored identically on disk, so use the same functions */
|
||||
dt->u.vlen.getlen=H5T_vlen_disk_getlen;
|
||||
dt->u.vlen.getptr=H5T_vlen_disk_getptr;
|
||||
dt->u.vlen.isnull=H5T_vlen_disk_isnull;
|
||||
dt->u.vlen.read=H5T_vlen_disk_read;
|
||||
dt->u.vlen.write=H5T_vlen_disk_write;
|
||||
dt->u.vlen.setnull=H5T_vlen_disk_setnull;
|
||||
dt->shared->u.vlen.getlen=H5T_vlen_disk_getlen;
|
||||
dt->shared->u.vlen.getptr=H5T_vlen_disk_getptr;
|
||||
dt->shared->u.vlen.isnull=H5T_vlen_disk_isnull;
|
||||
dt->shared->u.vlen.read=H5T_vlen_disk_read;
|
||||
dt->shared->u.vlen.write=H5T_vlen_disk_write;
|
||||
dt->shared->u.vlen.setnull=H5T_vlen_disk_setnull;
|
||||
|
||||
/* Set file ID (since this VL is on disk) */
|
||||
dt->u.vlen.f=f;
|
||||
dt->shared->u.vlen.f=f;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -1011,16 +1016,16 @@ H5T_vlen_reclaim_recurse(void *elem, const H5T_t *dt, H5MM_free_t free_func, voi
|
||||
assert(dt);
|
||||
|
||||
/* Check the datatype of this element */
|
||||
switch(dt->type) {
|
||||
switch(dt->shared->type) {
|
||||
case H5T_ARRAY:
|
||||
/* Recurse on each element, if the array's base type is array, VL, enum or compound */
|
||||
if(H5T_IS_COMPLEX(dt->parent->type)) {
|
||||
if(H5T_IS_COMPLEX(dt->shared->parent->shared->type)) {
|
||||
void *off; /* offset of field */
|
||||
|
||||
/* Calculate the offset member and recurse on it */
|
||||
for(i=0; i<dt->u.array.nelem; i++) {
|
||||
off=((uint8_t *)elem)+i*(dt->parent->size);
|
||||
if(H5T_vlen_reclaim_recurse(off,dt->parent,free_func,free_info)<0)
|
||||
for(i=0; i<dt->shared->u.array.nelem; i++) {
|
||||
off=((uint8_t *)elem)+i*(dt->shared->parent->shared->size);
|
||||
if(H5T_vlen_reclaim_recurse(off,dt->shared->parent,free_func,free_info)<0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTFREE, FAIL, "Unable to free array element")
|
||||
} /* end for */
|
||||
} /* end if */
|
||||
@ -1028,14 +1033,14 @@ H5T_vlen_reclaim_recurse(void *elem, const H5T_t *dt, H5MM_free_t free_func, voi
|
||||
|
||||
case H5T_COMPOUND:
|
||||
/* Check each field and recurse on VL, compound, enum or array ones */
|
||||
for (i=0; i<dt->u.compnd.nmembs; i++) {
|
||||
for (i=0; i<dt->shared->u.compnd.nmembs; i++) {
|
||||
/* Recurse if it's VL, compound, enum or array */
|
||||
if(H5T_IS_COMPLEX(dt->u.compnd.memb[i].type->type)) {
|
||||
if(H5T_IS_COMPLEX(dt->shared->u.compnd.memb[i].type->shared->type)) {
|
||||
void *off; /* offset of field */
|
||||
|
||||
/* Calculate the offset member and recurse on it */
|
||||
off=((uint8_t *)elem)+dt->u.compnd.memb[i].offset;
|
||||
if(H5T_vlen_reclaim_recurse(off,dt->u.compnd.memb[i].type,free_func,free_info)<0)
|
||||
off=((uint8_t *)elem)+dt->shared->u.compnd.memb[i].offset;
|
||||
if(H5T_vlen_reclaim_recurse(off,dt->shared->u.compnd.memb[i].type,free_func,free_info)<0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTFREE, FAIL, "Unable to free compound field")
|
||||
} /* end if */
|
||||
} /* end for */
|
||||
@ -1043,19 +1048,19 @@ H5T_vlen_reclaim_recurse(void *elem, const H5T_t *dt, H5MM_free_t free_func, voi
|
||||
|
||||
case H5T_VLEN:
|
||||
/* Recurse on the VL information if it's VL, compound, enum or array, then free VL sequence */
|
||||
if(dt->u.vlen.type==H5T_VLEN_SEQUENCE) {
|
||||
if(dt->shared->u.vlen.type==H5T_VLEN_SEQUENCE) {
|
||||
hvl_t *vl=(hvl_t *)elem; /* Temp. ptr to the vl info */
|
||||
|
||||
/* Check if there is anything actually in this sequence */
|
||||
if(vl->len!=0) {
|
||||
/* Recurse if it's VL, array, enum or compound */
|
||||
if(H5T_IS_COMPLEX(dt->parent->type)) {
|
||||
if(H5T_IS_COMPLEX(dt->shared->parent->shared->type)) {
|
||||
void *off; /* offset of field */
|
||||
|
||||
/* Calculate the offset of each array element and recurse on it */
|
||||
while(vl->len>0) {
|
||||
off=((uint8_t *)vl->p)+(vl->len-1)*dt->parent->size;
|
||||
if(H5T_vlen_reclaim_recurse(off,dt->parent,free_func,free_info)<0)
|
||||
off=((uint8_t *)vl->p)+(vl->len-1)*dt->shared->parent->shared->size;
|
||||
if(H5T_vlen_reclaim_recurse(off,dt->shared->parent,free_func,free_info)<0)
|
||||
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTFREE, FAIL, "Unable to free VL element")
|
||||
vl->len--;
|
||||
} /* end while */
|
||||
@ -1067,7 +1072,7 @@ H5T_vlen_reclaim_recurse(void *elem, const H5T_t *dt, H5MM_free_t free_func, voi
|
||||
else
|
||||
H5MM_xfree(vl->p);
|
||||
} /* end if */
|
||||
} else if(dt->u.vlen.type==H5T_VLEN_STRING) {
|
||||
} else if(dt->shared->u.vlen.type==H5T_VLEN_STRING) {
|
||||
/* Free the VL string */
|
||||
if(free_func!=NULL)
|
||||
(*free_func)(*(char **)elem,free_info);
|
||||
|
@ -507,8 +507,9 @@ print_results(int nd, detected_t *d, int na, malign_t *misc_align)
|
||||
#include \"H5FLprivate.h\"\n\
|
||||
#include \"H5Tpkg.h\"\n\
|
||||
\n\
|
||||
/* Declare external the free list for H5T_t's */\n\
|
||||
/* Declare external the free lists for H5T_t's and H5T_shared_t's */\n\
|
||||
H5FL_EXTERN(H5T_t);\n\
|
||||
H5FL_EXTERN(H5T_shared_t);\n\
|
||||
\n\
|
||||
\n");
|
||||
|
||||
@ -545,15 +546,20 @@ H5TN_init_interface(void)\n\
|
||||
printf("\
|
||||
if (NULL==(dt = H5FL_CALLOC (H5T_t)))\n\
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL,\"memory allocation failed\");\n\
|
||||
dt->state = H5T_STATE_IMMUTABLE;\n\
|
||||
if (NULL==(dt->shared = H5FL_CALLOC(H5T_shared_t)))\n\
|
||||
{ \
|
||||
H5FL_FREE(H5T_t, dt);\
|
||||
HGOTO_ERROR (H5E_RESOURCE, H5E_NOSPACE, FAIL, \"memory allocation failed\");\n\
|
||||
} \
|
||||
dt->shared->state = H5T_STATE_IMMUTABLE;\n\
|
||||
dt->ent.header = HADDR_UNDEF;\n\
|
||||
dt->type = H5T_%s;\n\
|
||||
dt->size = %d;\n\
|
||||
dt->u.atomic.order = H5T_ORDER_%s;\n\
|
||||
dt->u.atomic.offset = %d;\n\
|
||||
dt->u.atomic.prec = %d;\n\
|
||||
dt->u.atomic.lsb_pad = H5T_PAD_ZERO;\n\
|
||||
dt->u.atomic.msb_pad = H5T_PAD_ZERO;\n",
|
||||
dt->shared->type = H5T_%s;\n\
|
||||
dt->shared->size = %d;\n\
|
||||
dt->shared->u.atomic.order = H5T_ORDER_%s;\n\
|
||||
dt->shared->u.atomic.offset = %d;\n\
|
||||
dt->shared->u.atomic.prec = %d;\n\
|
||||
dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO;\n\
|
||||
dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO;\n",
|
||||
d[i].msize ? "FLOAT" : "INTEGER",/*class */
|
||||
d[i].size, /*size */
|
||||
d[i].perm[0] ? "BE" : "LE", /*byte order */
|
||||
@ -564,19 +570,19 @@ H5TN_init_interface(void)\n\
|
||||
if (0 == d[i].msize) {
|
||||
/* The part unique to fixed point types */
|
||||
printf("\
|
||||
dt->u.atomic.u.i.sign = H5T_SGN_%s;\n",
|
||||
dt->shared->u.atomic.u.i.sign = H5T_SGN_%s;\n",
|
||||
d[i].sign ? "2" : "NONE");
|
||||
} else {
|
||||
/* The part unique to floating point types */
|
||||
printf("\
|
||||
dt->u.atomic.u.f.sign = %d;\n\
|
||||
dt->u.atomic.u.f.epos = %d;\n\
|
||||
dt->u.atomic.u.f.esize = %d;\n\
|
||||
dt->u.atomic.u.f.ebias = 0x%08lx;\n\
|
||||
dt->u.atomic.u.f.mpos = %d;\n\
|
||||
dt->u.atomic.u.f.msize = %d;\n\
|
||||
dt->u.atomic.u.f.norm = H5T_NORM_%s;\n\
|
||||
dt->u.atomic.u.f.pad = H5T_PAD_ZERO;\n",
|
||||
dt->shared->u.atomic.u.f.sign = %d;\n\
|
||||
dt->shared->u.atomic.u.f.epos = %d;\n\
|
||||
dt->shared->u.atomic.u.f.esize = %d;\n\
|
||||
dt->shared->u.atomic.u.f.ebias = 0x%08lx;\n\
|
||||
dt->shared->u.atomic.u.f.mpos = %d;\n\
|
||||
dt->shared->u.atomic.u.f.msize = %d;\n\
|
||||
dt->shared->u.atomic.u.f.norm = H5T_NORM_%s;\n\
|
||||
dt->shared->u.atomic.u.f.pad = H5T_PAD_ZERO;\n",
|
||||
d[i].sign, /*sign location */
|
||||
d[i].epos, /*exponent loc */
|
||||
d[i].esize, /*exponent size */
|
||||
|
100
test/getname.c
100
test/getname.c
@ -65,7 +65,7 @@ int main( void )
|
||||
hid_t group_id, group2_id, group3_id, group4_id, group5_id, group6_id, group7_id;
|
||||
hid_t dataset_id, dataset2_id;
|
||||
hid_t space_id;
|
||||
hid_t type_id;
|
||||
hid_t type_id, type2_id;
|
||||
hsize_t dims[1] = { 5 };
|
||||
|
||||
/*buffer to hold name and its size */
|
||||
@ -1212,6 +1212,31 @@ int main( void )
|
||||
PASSED();
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Test H5Iget_name with objects that have two names
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
TESTING("H5Iget_name with datasets that have two names");
|
||||
|
||||
/* Open dataset named "d"*/
|
||||
if ((dataset_id = H5Dopen( file_id, "/g17/d"))<0) goto out;
|
||||
|
||||
/* Create link to dataset named "link" */
|
||||
if (H5Glink2(dataset_id,".",H5G_LINK_HARD,file_id,"/g17/link")<0) goto out;
|
||||
if ((dataset2_id = H5Dopen( file_id, "/g17/link"))<0) goto out;
|
||||
|
||||
/* Make sure that the two IDs use two different names */
|
||||
if(H5Iget_name(dataset_id, name, size)<0) goto out;
|
||||
if(check_name(name, "/g17/d")!=0) goto out;
|
||||
|
||||
if(H5Iget_name(dataset2_id, name, size)<0) goto out;
|
||||
if(check_name(name, "/g17/link")!=0) goto out;
|
||||
|
||||
if(H5Dclose(dataset_id)<0) goto out;
|
||||
if(H5Dclose(dataset2_id)<0) goto out;
|
||||
|
||||
PASSED();
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
@ -1418,6 +1443,14 @@ int main( void )
|
||||
if ((group_id = H5Gcreate( file_id, "/g18", 0 ))<0) goto out;
|
||||
if ((group2_id = H5Gcreate( file_id, "/g18/g2", 0 ))<0) goto out;
|
||||
|
||||
/* Also create a dataset and a datatype */
|
||||
if ((space_id = H5Screate_simple( 1, dims, NULL ))<0) goto out;
|
||||
if ((type_id = H5Tcopy(H5T_NATIVE_INT))<0) goto out;
|
||||
if ((dataset_id = H5Dcreate( file_id, "g18/d2", type_id, space_id,
|
||||
H5P_DEFAULT ))<0) goto out;
|
||||
|
||||
if (H5Tcommit(file_id, "g18/t2", type_id) <0) goto out;
|
||||
|
||||
/* Create second file and group "/g3/g4/g5" in it */
|
||||
file1_id = H5Fcreate(filename1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
|
||||
if ((group3_id = H5Gcreate( file1_id, "/g3", 0 ))<0) goto out;
|
||||
@ -1427,39 +1460,70 @@ int main( void )
|
||||
/* Mount first file at "g3/g4" in the second file */
|
||||
if (H5Fmount(file1_id, "/g3/g4", file_id, H5P_DEFAULT)<0) goto out;
|
||||
|
||||
/* Get name for the ID of the first file, should be "/g18/g2" still */
|
||||
/* Get name for the group ID in the first file, should be "/g18/g2" still */
|
||||
if (H5Iget_name( group2_id, name, size )<0) goto out;
|
||||
|
||||
/* Verify */
|
||||
if (check_name( name, "/g18/g2" )!=0) goto out;
|
||||
|
||||
/* Open the mounted group */
|
||||
if ((group6_id = H5Gopen( file_id, "/g3/g4/g18/g2" ))<0) goto out;
|
||||
/* Get name for the dataset ID in the first file, should be "/g18/g2/d2" still */
|
||||
if (H5Iget_name( dataset_id, name, size )<0) goto out;
|
||||
if (check_name( name, "/g18/d2" )!=0) goto out;
|
||||
|
||||
/* Get name */
|
||||
/* Get name for the datatype ID in the first file, should be "/g18/g2/t2" still */
|
||||
if (H5Iget_name( type_id, name, size )<0) goto out;
|
||||
if (check_name( name, "/g18/t2" )!=0) goto out;
|
||||
|
||||
/* Open the mounted group, dataset, and datatype through their new names */
|
||||
if ((group6_id = H5Gopen( file1_id, "/g3/g4/g18/g2" ))<0) goto out;
|
||||
if ((dataset2_id = H5Dopen( file1_id, "/g3/g4/g18/d2" ))<0) goto out;
|
||||
if ((type2_id = H5Topen( file1_id, "/g3/g4/g18/t2" ))<0) goto out;
|
||||
|
||||
/* Verify names */
|
||||
if (H5Iget_name( group6_id, name, size )<0) goto out;
|
||||
|
||||
/* Verify */
|
||||
if (check_name( name, "/g3/g4/g18/g2" )!=0) goto out;
|
||||
|
||||
if (H5Iget_name( dataset2_id, name, size )<0) goto out;
|
||||
if (check_name( name, "/g3/g4/g18/d2" )!=0) goto out;
|
||||
|
||||
if (H5Iget_name( type2_id, name, size )<0) goto out;
|
||||
if (check_name( name, "/g3/g4/g18/t2" )!=0) goto out;
|
||||
|
||||
/* Verify that old IDs still refer to objects by their old names */
|
||||
if (H5Iget_name( group2_id, name, size )<0) goto out;
|
||||
if (check_name( name, "/g18/g2" )!=0) goto out;
|
||||
|
||||
if (H5Iget_name( dataset_id, name, size )<0) goto out;
|
||||
if (check_name( name, "/g18/d2" )!=0) goto out;
|
||||
|
||||
if (H5Iget_name( type_id, name, size )<0) goto out;
|
||||
if (check_name( name, "/g18/t2" )!=0) goto out;
|
||||
|
||||
/* Unmount */
|
||||
if (H5Funmount(file1_id, "/g3/g4")<0) goto out;
|
||||
|
||||
/* Get name for the ID of the first file, should be "/g18/g2" still */
|
||||
/* Get name for the IDs of the first file, should be unchanged */
|
||||
if (H5Iget_name( group2_id, name, size )<0) goto out;
|
||||
if (check_name( name, "/g18/g2" )!=0) goto out;
|
||||
|
||||
/* Verify */
|
||||
if (check_name( name, "/g18/g2" )!=0)
|
||||
goto out;
|
||||
if (H5Iget_name( dataset_id, name, size )<0) goto out;
|
||||
if (check_name( name, "/g18/d2" )!=0) goto out;
|
||||
|
||||
/* Get name for the ID of the secondt file, should be "" */
|
||||
if (H5Iget_name( type_id, name, size )<0) goto out;
|
||||
if (check_name( name, "/g18/t2" )!=0) goto out;
|
||||
|
||||
/* Get name for the IDs of the second file, should be "" */
|
||||
if (H5Iget_name( group6_id, name, size )<0) goto out;
|
||||
if (check_name( name, "" )!=0) goto out;
|
||||
|
||||
/* Verify */
|
||||
if (check_name( name, "" )!=0)
|
||||
goto out;
|
||||
if (H5Iget_name( dataset2_id, name, size )<0) goto out;
|
||||
if (check_name( name, "" )!=0) goto out;
|
||||
|
||||
/* Close */
|
||||
if (H5Iget_name( type2_id, name, size )<0) goto out;
|
||||
if (check_name( name, "" )!=0) goto out;
|
||||
|
||||
H5Tclose( type_id );
|
||||
H5Tclose( type2_id );
|
||||
H5Dclose( dataset_id );
|
||||
H5Dclose( dataset2_id );
|
||||
H5Gclose( group_id );
|
||||
H5Gclose( group2_id );
|
||||
H5Gclose( group3_id );
|
||||
|
149
test/unlink.c
149
test/unlink.c
@ -29,7 +29,9 @@ const char *FILENAME[] = {
|
||||
"lunlink",
|
||||
"filespace",
|
||||
"slashes",
|
||||
"resurrect",
|
||||
"resurrect_set",
|
||||
"resurrect_type",
|
||||
"resurrect_group",
|
||||
"unlink_chunked",
|
||||
NULL
|
||||
};
|
||||
@ -44,6 +46,7 @@ const char *FILENAME[] = {
|
||||
#define DATASET2NAME "dataset2"
|
||||
#define ATTRNAME "attribute"
|
||||
#define TYPENAME "datatype"
|
||||
#define TYPE2NAME "datatype2"
|
||||
#define FILESPACE_NDIMS 3
|
||||
#define FILESPACE_DIM0 20
|
||||
#define FILESPACE_DIM1 20
|
||||
@ -1837,6 +1840,9 @@ test_resurrect_dataset(void)
|
||||
/* Unlink the dataset while it's open (will mark it for deletion when closed) */
|
||||
if(H5Gunlink(f, DATASETNAME)<0) TEST_ERROR;
|
||||
|
||||
/* Check that dataset name is NULL */
|
||||
if(H5Iget_name(d, NULL, 0) != 0) TEST_ERROR;
|
||||
|
||||
/* Re-link the dataset to the group hierarchy (shouldn't get deleted now) */
|
||||
if(H5Glink2(d, ".", H5G_LINK_HARD, f, DATASET2NAME)<0) TEST_ERROR;
|
||||
|
||||
@ -1868,6 +1874,145 @@ error:
|
||||
return 1;
|
||||
} /* end test_resurrect_dataset() */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: test_resurrect_datatype
|
||||
*
|
||||
* Purpose: Tests deleting a datatype while it's still open and then
|
||||
* "resurrecting" it by creating a link to it again.
|
||||
*
|
||||
* Return: Success: 0
|
||||
* Failure: number of errors
|
||||
*
|
||||
* Programmer: James Laird
|
||||
* Wednesday, July 28, 2004
|
||||
*
|
||||
* Modifications:
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
static int
|
||||
test_resurrect_datatype(void)
|
||||
{
|
||||
hid_t file=-1, type=-1, fapl=-1, loc_id=-1;
|
||||
char filename[1024];
|
||||
|
||||
TESTING("Resurrecting datatype after deletion");
|
||||
|
||||
/* Create file */
|
||||
fapl = h5_fileaccess();
|
||||
h5_fixname(FILENAME[7], fapl, filename, sizeof filename);
|
||||
|
||||
/* Create the file */
|
||||
if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl))<0) TEST_ERROR;
|
||||
|
||||
/* Create a named datatype in the file */
|
||||
if((type = H5Tcopy (H5T_NATIVE_INT))<0) TEST_ERROR;
|
||||
if(H5Tcommit (file, TYPENAME, type)<0) TEST_ERROR;
|
||||
|
||||
/* Unlink the datatype while it's open (will mark it for deletion when closed) */
|
||||
if(H5Gunlink(file, TYPENAME)<0) TEST_ERROR;
|
||||
|
||||
/* Check that datatype name is NULL */
|
||||
if(H5Iget_name(type, NULL, 0) != 0) TEST_ERROR;
|
||||
|
||||
/* Re-link the datatype to the group hierarchy (shouldn't get deleted now) */
|
||||
if(H5Glink2(type, ".", H5G_LINK_HARD, file, TYPE2NAME) < 0) TEST_ERROR;
|
||||
|
||||
/* Close things */
|
||||
if(H5Tclose(type)<0) TEST_ERROR;
|
||||
if(H5Fclose(file)<0) TEST_ERROR;
|
||||
|
||||
/* Re-open the file */
|
||||
if((file=H5Fopen(filename, H5F_ACC_RDONLY, fapl))<0) TEST_ERROR;
|
||||
|
||||
/* Attempt to open the datatype under the new name */
|
||||
if((type=H5Topen(file,TYPE2NAME))<0) TEST_ERROR;
|
||||
|
||||
/* Close things */
|
||||
if(H5Tclose(type)<0) TEST_ERROR;
|
||||
if(H5Fclose(file)<0) TEST_ERROR;
|
||||
|
||||
PASSED();
|
||||
return 0;
|
||||
|
||||
error:
|
||||
H5E_BEGIN_TRY {
|
||||
H5Tclose(type);
|
||||
H5Fclose(file);
|
||||
} H5E_END_TRY;
|
||||
return 1;
|
||||
} /* end test_resurrect_datatype() */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: test_resurrect_group
|
||||
*
|
||||
* Purpose: Tests deleting a group while it's still open and then
|
||||
* "resurrecting" it by creating a link to it again.
|
||||
*
|
||||
* Return: Success: 0
|
||||
* Failure: number of errors
|
||||
*
|
||||
* Programmer: James Laird
|
||||
* Wednesday, July 28, 2004
|
||||
*
|
||||
* Modifications:
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
static int
|
||||
test_resurrect_group(void)
|
||||
{
|
||||
hid_t file=-1, group=-1, fapl=-1;
|
||||
char filename[1024];
|
||||
|
||||
TESTING("Resurrecting group after deletion");
|
||||
|
||||
/* Create file */
|
||||
fapl = h5_fileaccess();
|
||||
h5_fixname(FILENAME[8], fapl, filename, sizeof filename);
|
||||
|
||||
/* Create the file */
|
||||
if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl))<0) TEST_ERROR;
|
||||
|
||||
/* Create a group in the file */
|
||||
if((group = H5Gcreate (file, GROUPNAME, 0))<0) TEST_ERROR;
|
||||
|
||||
/* Unlink the group while it's open (will mark it for deletion when closed) */
|
||||
if(H5Gunlink(file, GROUPNAME)<0) TEST_ERROR;
|
||||
|
||||
/* Check that group's name is NULL */
|
||||
if(H5Iget_name(group, NULL, 0) != 0) TEST_ERROR;
|
||||
|
||||
/* Re-link the group into the group hierarchy (shouldn't get deleted now) */
|
||||
if(H5Glink2(group, ".", H5G_LINK_HARD, file, GROUP2NAME)<0) TEST_ERROR;
|
||||
|
||||
/* Close things */
|
||||
if(H5Gclose(group)<0) TEST_ERROR;
|
||||
if(H5Fclose(file)<0) TEST_ERROR;
|
||||
|
||||
/* Re-open the file */
|
||||
if((file=H5Fopen(filename, H5F_ACC_RDONLY, fapl))<0) TEST_ERROR;
|
||||
|
||||
/* Attempt to open the datatype under the new name */
|
||||
if((group=H5Gopen(file,GROUP2NAME))<0) TEST_ERROR;
|
||||
|
||||
/* Close things */
|
||||
if(H5Gclose(group)<0) TEST_ERROR;
|
||||
if(H5Fclose(file)<0) TEST_ERROR;
|
||||
|
||||
PASSED();
|
||||
return 0;
|
||||
|
||||
error:
|
||||
H5E_BEGIN_TRY {
|
||||
H5Gclose(group);
|
||||
H5Fclose(file);
|
||||
} H5E_END_TRY;
|
||||
return 1;
|
||||
} /* end test_resurrect_group() */
|
||||
|
||||
|
||||
/*-------------------------------------------------------------------------
|
||||
* Function: test_unlink_chunked_dataset
|
||||
@ -2035,6 +2180,8 @@ main(void)
|
||||
|
||||
/* Test "resurrecting" objects */
|
||||
nerrors += test_resurrect_dataset();
|
||||
nerrors += test_resurrect_datatype();
|
||||
nerrors += test_resurrect_group();
|
||||
|
||||
/* Test unlinking chunked datasets */
|
||||
nerrors += test_unlink_chunked_dataset();
|
||||
|
@ -748,8 +748,14 @@ static const char* MapIdToName(hid_t refobj_id,
|
||||
{
|
||||
hid_t id;
|
||||
hid_t fid;
|
||||
H5G_stat_t refstat; /* Stat for the refobj id */
|
||||
H5G_stat_t objstat; /* Stat for objects in the file */
|
||||
int i;
|
||||
|
||||
/* obtain information to identify the referenced object uniquely */
|
||||
if(H5Gget_objinfo(refobj_id, ".", 0, &refstat) <0)
|
||||
return NULL;
|
||||
|
||||
/* obtains the file ID given an object ID. This ID must be closed */
|
||||
if ((fid = H5Iget_file_id(refobj_id))<0)
|
||||
{
|
||||
@ -773,9 +779,11 @@ static const char* MapIdToName(hid_t refobj_id,
|
||||
|
||||
if ((id = H5Dopen(fid,travt->objs[i].name))<0)
|
||||
return NULL;
|
||||
if(H5Gget_objinfo(id, ".", 0, &objstat) <0)
|
||||
return NULL;
|
||||
if (H5Dclose(id)<0)
|
||||
return NULL;
|
||||
if (id==refobj_id)
|
||||
if (refstat.fileno==objstat.fileno && refstat.objno==objstat.objno)
|
||||
{
|
||||
H5Fclose(fid);
|
||||
return travt->objs[i].name;
|
||||
|
BIN
windows/all.zip
BIN
windows/all.zip
Binary file not shown.
Loading…
x
Reference in New Issue
Block a user