2017-12-03 19:06:56 +08:00
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @file
|
|
|
|
* This file is part of netcdf-4, a netCDF-like interface for HDF5, or a
|
|
|
|
* HDF5 backend for netCDF, depending on your point of view.
|
|
|
|
*
|
|
|
|
* This file contains functions internal to the netcdf4 library. None of
|
|
|
|
* the functions in this file are exposed in the exetnal API. These
|
|
|
|
* functions handle the HDF interface.
|
|
|
|
*
|
|
|
|
* Copyright 2003, University Corporation for Atmospheric
|
|
|
|
* Research. See the COPYRIGHT file for copying and redistribution
|
|
|
|
* conditions.
|
|
|
|
*
|
|
|
|
* @author Ed Hartnett, Dennis Heimbigner, Ward Fisher
|
2017-12-03 22:11:51 +08:00
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
#include "config.h"
|
|
|
|
#include "nc4internal.h"
|
2012-12-13 04:05:06 +08:00
|
|
|
#include "nc4dispatch.h"
|
2010-06-03 21:24:43 +08:00
|
|
|
#include <H5DSpublic.h>
|
|
|
|
#include <math.h>
|
|
|
|
|
2018-01-10 05:58:25 +08:00
|
|
|
#ifdef HAVE_INTTYPES_H
|
|
|
|
#define __STDC_FORMAT_MACROS
|
|
|
|
#include <inttypes.h>
|
|
|
|
#endif
|
|
|
|
|
2015-09-13 06:41:14 +08:00
|
|
|
#ifdef USE_PARALLEL
|
|
|
|
#include "netcdf_par.h"
|
2013-01-15 11:46:46 +08:00
|
|
|
#endif
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
#define NC3_STRICT_ATT_NAME "_nc3_strict" /**< @internal Indicates classic model. */
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
#define NC_HDF5_MAX_NAME 1024 /**< @internal Max size of HDF5 name. */
|
|
|
|
|
|
|
|
#define MAXNAME 1024 /**< Max HDF5 name. */
|
|
|
|
|
|
|
|
/** @internal HDF5 object types. */
|
|
|
|
static unsigned int OTYPES[5] = {H5F_OBJ_FILE, H5F_OBJ_DATASET, H5F_OBJ_GROUP,
|
|
|
|
H5F_OBJ_DATATYPE, H5F_OBJ_ATTR};
|
2016-05-04 11:17:06 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Flag attributes in a linked list as dirty.
|
2016-06-15 00:47:24 +08:00
|
|
|
*
|
2017-12-03 22:11:51 +08:00
|
|
|
* @param attlist List of attributes, may be NULL.
|
2016-06-15 00:47:24 +08:00
|
|
|
*
|
2017-12-03 22:11:51 +08:00
|
|
|
* @return NC_NOERR No error.
|
2016-06-15 00:47:24 +08:00
|
|
|
*/
|
2017-12-03 22:11:51 +08:00
|
|
|
static int
|
2018-03-17 01:46:18 +08:00
|
|
|
flag_atts_dirty(NCindex *attlist) {
|
2016-06-15 00:22:06 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_ATT_INFO_T *att = NULL;
|
2018-03-17 01:46:18 +08:00
|
|
|
int i;
|
2016-06-15 00:22:06 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
if(attlist == NULL) {
|
|
|
|
return NC_NOERR;
|
|
|
|
}
|
2016-06-15 00:22:06 +08:00
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(attlist);i++) {
|
|
|
|
att = (NC_ATT_INFO_T*)ncindexith(attlist,i);
|
|
|
|
if(att == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
att->dirty = NC_TRUE;
|
|
|
|
}
|
2016-06-15 00:22:06 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
2016-06-15 00:22:06 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal This function is needed to handle one special case: what
|
|
|
|
* if the user defines a dim, writes metadata, then goes back into
|
|
|
|
* define mode and adds a coordinate var for the already existing
|
|
|
|
* dim. In that case, I need to recreate the dim's dimension scale
|
|
|
|
* dataset, and then I need to go to every var in the file which uses
|
|
|
|
* that dimension, and attach the new dimension scale.
|
|
|
|
*
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
* @param dimid Dimension ID.
|
|
|
|
* @param dimscaleid HDF5 dimension scale ID.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned an error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2014-11-24 23:36:58 +08:00
|
|
|
int
|
2010-06-03 21:24:43 +08:00
|
|
|
rec_reattach_scales(NC_GRP_INFO_T *grp, int dimid, hid_t dimscaleid)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_VAR_INFO_T *var;
|
|
|
|
NC_GRP_INFO_T *child_grp;
|
|
|
|
int d, i;
|
|
|
|
int retval;
|
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(grp && grp->hdr.name && dimid >= 0 && dimscaleid >= 0);
|
|
|
|
LOG((3, "%s: grp->hdr.name %s", __func__, grp->hdr.name));
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* If there are any child groups, attach dimscale there, if needed. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->children);i++) {
|
|
|
|
child_grp = (NC_GRP_INFO_T*)ncindexith(grp->children,i);
|
|
|
|
if(child_grp == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((retval = rec_reattach_scales(child_grp, dimid, dimscaleid)))
|
|
|
|
return retval;
|
2018-03-17 01:46:18 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* Find any vars that use this dimension id. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->vars);i++)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,i);
|
|
|
|
if(var == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
for (d = 0; d < var->ndims; d++)
|
|
|
|
if (var->dimids[d] == dimid && !var->dimscale)
|
|
|
|
{
|
|
|
|
LOG((2, "%s: attaching scale for dimid %d to var %s",
|
2018-03-17 01:46:18 +08:00
|
|
|
__func__, var->dimids[d], var->hdr.name));
|
2017-12-03 22:11:51 +08:00
|
|
|
if (var->created)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
if (H5DSattach_scale(var->hdf_datasetid, dimscaleid, d) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
var->dimscale_attached[d] = NC_TRUE;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal This function is needed to handle one special case: what
|
|
|
|
* if the user defines a dim, writes metadata, then goes back into
|
|
|
|
* define mode and adds a coordinate var for the already existing
|
|
|
|
* dim. In that case, I need to recreate the dim's dimension scale
|
|
|
|
* dataset, and then I need to go to every var in the file which uses
|
|
|
|
* that dimension, and attach the new dimension scale.
|
|
|
|
*
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
* @param dimid Dimension ID.
|
|
|
|
* @param dimscaleid HDF5 dimension scale ID.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned an error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2013-12-01 13:20:28 +08:00
|
|
|
int
|
2010-06-03 21:24:43 +08:00
|
|
|
rec_detach_scales(NC_GRP_INFO_T *grp, int dimid, hid_t dimscaleid)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_VAR_INFO_T *var;
|
|
|
|
NC_GRP_INFO_T *child_grp;
|
|
|
|
int d, i;
|
|
|
|
int retval;
|
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(grp && grp->hdr.name && dimid >= 0 && dimscaleid >= 0);
|
|
|
|
LOG((3, "%s: grp->hdr.name %s", __func__, grp->hdr.name));
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* If there are any child groups, detach dimscale there, if needed. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->children);i++) {
|
|
|
|
child_grp = (NC_GRP_INFO_T*)ncindexith(grp->children,i);
|
|
|
|
if(child_grp == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((retval = rec_detach_scales(child_grp, dimid, dimscaleid)))
|
|
|
|
return retval;
|
2018-03-17 01:46:18 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* Find any vars that use this dimension id. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->vars);i++) {
|
|
|
|
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,i);
|
|
|
|
if(var == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
for (d = 0; d < var->ndims; d++)
|
|
|
|
if (var->dimids[d] == dimid && !var->dimscale)
|
|
|
|
{
|
|
|
|
LOG((2, "%s: detaching scale for dimid %d to var %s",
|
2018-03-17 01:46:18 +08:00
|
|
|
__func__, var->dimids[d], var->hdr.name));
|
2017-12-03 22:11:51 +08:00
|
|
|
if (var->created)
|
|
|
|
if (var->dimscale_attached && var->dimscale_attached[d])
|
|
|
|
{
|
|
|
|
if (H5DSdetach_scale(var->hdf_datasetid, dimscaleid, d) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
var->dimscale_attached[d] = NC_FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Open a HDF5 dataset and leave it open.
|
|
|
|
*
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
* @param varid Variable ID.
|
|
|
|
* @param dataset Pointer that gets the HDF5 dataset ID.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned an error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2014-11-12 06:17:08 +08:00
|
|
|
int
|
2010-06-03 21:24:43 +08:00
|
|
|
nc4_open_var_grp2(NC_GRP_INFO_T *grp, int varid, hid_t *dataset)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_VAR_INFO_T *var;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Find the requested varid. */
|
2018-03-17 01:46:18 +08:00
|
|
|
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,varid);
|
2016-07-07 22:28:24 +08:00
|
|
|
if (!var) return NC_ENOTVAR;
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(var->hdr.id == varid);
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Open this dataset if necessary. */
|
|
|
|
if (!var->hdf_datasetid)
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((var->hdf_datasetid = H5Dopen2(grp->hdf_grpid, var->hdr.name,
|
2017-12-03 22:11:51 +08:00
|
|
|
H5P_DEFAULT)) < 0)
|
|
|
|
return NC_ENOTVAR;
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
*dataset = var->hdf_datasetid;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Get the default fill value for an atomic type. Memory for
|
|
|
|
* fill_value must already be allocated, or you are DOOMED!
|
|
|
|
*
|
|
|
|
* @param type_info Pointer to type info struct.
|
|
|
|
* @param fill_value Pointer that gets the default fill value.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EINVAL Can't find atomic type.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
int
|
2013-07-11 04:09:31 +08:00
|
|
|
nc4_get_default_fill_value(const NC_TYPE_INFO_T *type_info, void *fill_value)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
switch (type_info->hdr.id)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
case NC_CHAR:
|
2014-11-12 06:24:38 +08:00
|
|
|
*(char *)fill_value = NC_FILL_CHAR;
|
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_STRING:
|
2014-11-12 06:24:38 +08:00
|
|
|
*(char **)fill_value = strdup(NC_FILL_STRING);
|
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_BYTE:
|
2014-11-12 06:24:38 +08:00
|
|
|
*(signed char *)fill_value = NC_FILL_BYTE;
|
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_SHORT:
|
2014-11-12 06:24:38 +08:00
|
|
|
*(short *)fill_value = NC_FILL_SHORT;
|
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_INT:
|
2014-11-12 06:24:38 +08:00
|
|
|
*(int *)fill_value = NC_FILL_INT;
|
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_UBYTE:
|
2014-11-12 06:24:38 +08:00
|
|
|
*(unsigned char *)fill_value = NC_FILL_UBYTE;
|
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_USHORT:
|
2014-11-12 06:24:38 +08:00
|
|
|
*(unsigned short *)fill_value = NC_FILL_USHORT;
|
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_UINT:
|
2014-11-12 06:24:38 +08:00
|
|
|
*(unsigned int *)fill_value = NC_FILL_UINT;
|
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_INT64:
|
2014-11-12 06:24:38 +08:00
|
|
|
*(long long *)fill_value = NC_FILL_INT64;
|
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_UINT64:
|
2014-11-12 06:24:38 +08:00
|
|
|
*(unsigned long long *)fill_value = NC_FILL_UINT64;
|
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_FLOAT:
|
2014-11-12 06:24:38 +08:00
|
|
|
*(float *)fill_value = NC_FILL_FLOAT;
|
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_DOUBLE:
|
2014-11-12 06:24:38 +08:00
|
|
|
*(double *)fill_value = NC_FILL_DOUBLE;
|
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
default:
|
2014-11-12 06:24:38 +08:00
|
|
|
return NC_EINVAL;
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal What fill value should be used for a variable?
|
|
|
|
*
|
|
|
|
* @param h5 Pointer to HDF5 file info struct.
|
|
|
|
* @param var Pointer to variable info struct.
|
|
|
|
* @param fillp Pointer that gets pointer to fill value.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_ENOMEM Out of memory.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static int
|
|
|
|
get_fill_value(NC_HDF5_FILE_INFO_T *h5, NC_VAR_INFO_T *var, void **fillp)
|
2014-11-12 06:17:08 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
size_t size;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
/* Find out how much space we need for this type's fill value. */
|
|
|
|
if (var->type_info->nc_type_class == NC_VLEN)
|
|
|
|
size = sizeof(nc_vlen_t);
|
|
|
|
else if (var->type_info->nc_type_class == NC_STRING)
|
|
|
|
size = sizeof(char *);
|
|
|
|
else
|
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((retval = nc4_get_typelen_mem(h5, var->type_info->hdr.id, &size)))
|
2017-12-03 22:11:51 +08:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
assert(size);
|
|
|
|
|
|
|
|
/* Allocate the space. */
|
|
|
|
if (!((*fillp) = calloc(1, size)))
|
|
|
|
return NC_ENOMEM;
|
|
|
|
|
|
|
|
/* If the user has set a fill_value for this var, use, otherwise
|
|
|
|
* find the default fill value. */
|
|
|
|
if (var->fill_value)
|
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((4, "Found a fill value for var %s", var->hdr.name));
|
2014-11-12 06:17:08 +08:00
|
|
|
if (var->type_info->nc_type_class == NC_VLEN)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
nc_vlen_t *in_vlen = (nc_vlen_t *)(var->fill_value), *fv_vlen = (nc_vlen_t *)(*fillp);
|
|
|
|
|
|
|
|
fv_vlen->len = in_vlen->len;
|
|
|
|
if (!(fv_vlen->p = malloc(size * in_vlen->len)))
|
|
|
|
{
|
|
|
|
free(*fillp);
|
|
|
|
*fillp = NULL;
|
|
|
|
return NC_ENOMEM;
|
|
|
|
}
|
|
|
|
memcpy(fv_vlen->p, in_vlen->p, in_vlen->len * size);
|
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
else if (var->type_info->nc_type_class == NC_STRING)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if (*(char **)var->fill_value)
|
2014-11-12 06:24:38 +08:00
|
|
|
if (!(**(char ***)fillp = strdup(*(char **)var->fill_value)))
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
free(*fillp);
|
|
|
|
*fillp = NULL;
|
|
|
|
return NC_ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
else
|
2017-12-03 22:11:51 +08:00
|
|
|
memcpy((*fillp), var->fill_value, size);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-02-12 07:12:08 +08:00
|
|
|
if (nc4_get_default_fill_value(var->type_info, *fillp))
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
/* Note: release memory, but don't return error on failure */
|
|
|
|
free(*fillp);
|
|
|
|
*fillp = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Given a netcdf type, return appropriate HDF typeid. (All
|
|
|
|
* hdf_typeid's returned from this routine must be H5Tclosed by the
|
|
|
|
* caller).
|
|
|
|
*
|
|
|
|
* @param h5 Pointer to HDF5 file info struct.
|
|
|
|
* @param xtype NetCDF type ID.
|
|
|
|
* @param hdf_typeid Pointer that gets the HDF5 type ID.
|
|
|
|
* @param endianness Desired endianness in HDF5 type.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_ECHAR Conversions of NC_CHAR forbidden.
|
|
|
|
* @returns NC_EVARMETA HDF5 returning error creating datatype.
|
|
|
|
* @returns NC_EHDFERR HDF5 returning error.
|
|
|
|
* @returns NC_EBADTYE Type not found.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
int
|
2014-11-12 06:17:08 +08:00
|
|
|
nc4_get_hdf_typeid(NC_HDF5_FILE_INFO_T *h5, nc_type xtype,
|
2014-11-12 06:24:38 +08:00
|
|
|
hid_t *hdf_typeid, int endianness)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_TYPE_INFO_T *type;
|
|
|
|
hid_t typeid = 0;
|
|
|
|
int retval = NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
assert(hdf_typeid && h5);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
*hdf_typeid = -1;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Determine an appropriate HDF5 datatype */
|
|
|
|
if (xtype == NC_NAT)
|
|
|
|
/* NAT = 'Not A Type' (c.f. NaN) */
|
|
|
|
return NC_EBADTYPE;
|
|
|
|
else if (xtype == NC_CHAR || xtype == NC_STRING)
|
|
|
|
{
|
2014-02-12 07:12:08 +08:00
|
|
|
/* NC_CHAR & NC_STRING types create a new HDF5 datatype */
|
|
|
|
if (xtype == NC_CHAR)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if ((typeid = H5Tcopy(H5T_C_S1)) < 0)
|
2014-02-12 07:12:08 +08:00
|
|
|
return NC_EHDFERR;
|
2017-12-03 22:11:51 +08:00
|
|
|
if (H5Tset_strpad(typeid, H5T_STR_NULLTERM) < 0)
|
|
|
|
BAIL(NC_EVARMETA);
|
|
|
|
if(H5Tset_cset(typeid, H5T_CSET_ASCII) < 0)
|
2014-02-12 07:12:08 +08:00
|
|
|
BAIL(NC_EVARMETA);
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Take ownership of the newly created HDF5 datatype */
|
|
|
|
*hdf_typeid = typeid;
|
|
|
|
typeid = 0;
|
|
|
|
}
|
2014-02-12 07:12:08 +08:00
|
|
|
else
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if ((typeid = H5Tcopy(H5T_C_S1)) < 0)
|
2014-02-12 07:12:08 +08:00
|
|
|
return NC_EHDFERR;
|
2017-12-03 22:11:51 +08:00
|
|
|
if (H5Tset_size(typeid, H5T_VARIABLE) < 0)
|
|
|
|
BAIL(NC_EVARMETA);
|
|
|
|
if(H5Tset_cset(typeid, H5T_CSET_UTF8) < 0)
|
2014-02-12 07:12:08 +08:00
|
|
|
BAIL(NC_EVARMETA);
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* Take ownership of the newly created HDF5 datatype */
|
|
|
|
*hdf_typeid = typeid;
|
|
|
|
typeid = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-02-12 07:12:08 +08:00
|
|
|
/* All other types use an existing HDF5 datatype */
|
|
|
|
switch (xtype)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
case NC_BYTE: /* signed 1 byte integer */
|
|
|
|
if (endianness == NC_ENDIAN_LITTLE)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_I8LE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else if (endianness == NC_ENDIAN_BIG)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_I8BE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_NATIVE_SCHAR;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_SHORT: /* signed 2 byte integer */
|
|
|
|
if (endianness == NC_ENDIAN_LITTLE)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_I16LE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else if (endianness == NC_ENDIAN_BIG)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_I16BE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_NATIVE_SHORT;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_INT:
|
|
|
|
if (endianness == NC_ENDIAN_LITTLE)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_I32LE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else if (endianness == NC_ENDIAN_BIG)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_I32BE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_NATIVE_INT;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_UBYTE:
|
|
|
|
if (endianness == NC_ENDIAN_LITTLE)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_U8LE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else if (endianness == NC_ENDIAN_BIG)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_U8BE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_NATIVE_UCHAR;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_USHORT:
|
|
|
|
if (endianness == NC_ENDIAN_LITTLE)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_U16LE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else if (endianness == NC_ENDIAN_BIG)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_U16BE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_NATIVE_USHORT;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_UINT:
|
|
|
|
if (endianness == NC_ENDIAN_LITTLE)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_U32LE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else if (endianness == NC_ENDIAN_BIG)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_U32BE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_NATIVE_UINT;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_INT64:
|
|
|
|
if (endianness == NC_ENDIAN_LITTLE)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_I64LE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else if (endianness == NC_ENDIAN_BIG)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_I64BE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_NATIVE_LLONG;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_UINT64:
|
|
|
|
if (endianness == NC_ENDIAN_LITTLE)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_U64LE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else if (endianness == NC_ENDIAN_BIG)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_STD_U64BE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_NATIVE_ULLONG;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_FLOAT:
|
|
|
|
if (endianness == NC_ENDIAN_LITTLE)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_IEEE_F32LE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else if (endianness == NC_ENDIAN_BIG)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_IEEE_F32BE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_NATIVE_FLOAT;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_DOUBLE:
|
|
|
|
if (endianness == NC_ENDIAN_LITTLE)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_IEEE_F64LE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else if (endianness == NC_ENDIAN_BIG)
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_IEEE_F64BE;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2014-11-12 06:24:38 +08:00
|
|
|
typeid = H5T_NATIVE_DOUBLE;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
default:
|
|
|
|
/* Maybe this is a user defined type? */
|
|
|
|
if (nc4_find_type(h5, xtype, &type))
|
2014-11-12 06:24:38 +08:00
|
|
|
return NC_EBADTYPE;
|
2017-12-03 22:11:51 +08:00
|
|
|
if (!type)
|
2014-11-12 06:24:38 +08:00
|
|
|
return NC_EBADTYPE;
|
2017-12-03 22:11:51 +08:00
|
|
|
typeid = type->hdf_typeid;
|
|
|
|
break;
|
|
|
|
}
|
2014-02-12 07:12:08 +08:00
|
|
|
assert(typeid);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2014-02-12 07:12:08 +08:00
|
|
|
/* Copy the HDF5 datatype, so the function operates uniformly */
|
2014-11-12 06:17:08 +08:00
|
|
|
if ((*hdf_typeid = H5Tcopy(typeid)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EHDFERR;
|
2014-02-12 07:12:08 +08:00
|
|
|
typeid = 0;
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
assert(*hdf_typeid != -1);
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
exit:
|
|
|
|
if (typeid > 0 && H5Tclose(typeid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
return retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Do some common check for nc4_put_vara and
|
|
|
|
* nc4_get_vara. These checks have to be done when both reading and
|
2017-12-05 03:21:14 +08:00
|
|
|
* writing data.
|
|
|
|
*
|
|
|
|
* @param mem_nc_type Pointer to type of data in memory.
|
|
|
|
* @param var Pointer to var info struct.
|
|
|
|
* @param h5 Pointer to HDF5 file info struct.
|
2017-12-03 22:11:51 +08:00
|
|
|
*
|
2017-12-05 03:21:14 +08:00
|
|
|
* @return ::NC_NOERR No error.
|
|
|
|
* @author Ed Hartnett
|
2017-12-03 22:11:51 +08:00
|
|
|
*/
|
2014-11-12 06:17:08 +08:00
|
|
|
static int
|
2010-06-03 21:24:43 +08:00
|
|
|
check_for_vara(nc_type *mem_nc_type, NC_VAR_INFO_T *var, NC_HDF5_FILE_INFO_T *h5)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
int retval;
|
|
|
|
|
|
|
|
/* If mem_nc_type is NC_NAT, it means we want to use the file type
|
|
|
|
* as the mem type as well. */
|
|
|
|
assert(mem_nc_type);
|
|
|
|
if (*mem_nc_type == NC_NAT)
|
2018-03-17 01:46:18 +08:00
|
|
|
*mem_nc_type = var->type_info->hdr.id;
|
2017-12-03 22:11:51 +08:00
|
|
|
assert(*mem_nc_type);
|
|
|
|
|
|
|
|
/* No NC_CHAR conversions, you pervert! */
|
2018-03-17 01:46:18 +08:00
|
|
|
if (var->type_info->hdr.id != *mem_nc_type &&
|
|
|
|
(var->type_info->hdr.id == NC_CHAR || *mem_nc_type == NC_CHAR))
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_ECHAR;
|
|
|
|
|
|
|
|
/* If we're in define mode, we can't read or write data. */
|
|
|
|
if (h5->flags & NC_INDEF)
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
if (h5->cmode & NC_CLASSIC_MODEL)
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EINDEFINE;
|
2010-06-03 21:24:43 +08:00
|
|
|
if ((retval = nc4_enddef_netcdf4_file(h5)))
|
2017-12-03 22:11:51 +08:00
|
|
|
return retval;
|
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef LOGGING
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
2018-06-09 01:18:08 +08:00
|
|
|
* @intarnal Print some debug info about dimensions to the log.
|
2017-12-03 22:11:51 +08:00
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static void
|
2014-11-12 06:17:08 +08:00
|
|
|
log_dim_info(NC_VAR_INFO_T *var, hsize_t *fdims, hsize_t *fmaxdims,
|
2010-06-03 21:24:43 +08:00
|
|
|
hsize_t *start, hsize_t *count)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
int d2;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Print some debugging info... */
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((4, "%s: var name %s ndims %d", __func__, var->hdr.name, var->ndims));
|
2017-12-03 22:11:51 +08:00
|
|
|
LOG((4, "File space, and requested:"));
|
|
|
|
for (d2 = 0; d2 < var->ndims; d2++)
|
|
|
|
{
|
2014-11-12 06:17:08 +08:00
|
|
|
LOG((4, "fdims[%d]=%Ld fmaxdims[%d]=%Ld", d2, fdims[d2], d2,
|
2010-06-03 21:24:43 +08:00
|
|
|
fmaxdims[d2]));
|
|
|
|
LOG((4, "start[%d]=%Ld count[%d]=%Ld", d2, start[d2], d2, count[d2]));
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
#endif /* LOGGING */
|
|
|
|
|
2015-08-16 06:26:35 +08:00
|
|
|
#ifdef USE_PARALLEL4
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Set the parallel access for a var (collective
|
|
|
|
* vs. independent).
|
|
|
|
*
|
|
|
|
* @param h5 Pointer to HDF5 file info struct.
|
|
|
|
* @param var Pointer to var info struct.
|
|
|
|
* @param xfer_plistid H5FD_MPIO_COLLECTIVE or H5FD_MPIO_INDEPENDENT.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static int
|
|
|
|
set_par_access(NC_HDF5_FILE_INFO_T *h5, NC_VAR_INFO_T *var, hid_t xfer_plistid)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
/* If netcdf is built with parallel I/O, then parallel access can
|
|
|
|
* be used, and, if this file was opened or created for parallel
|
|
|
|
* access, we need to set the transfer mode. */
|
|
|
|
if (h5->parallel)
|
|
|
|
{
|
2014-11-12 06:24:38 +08:00
|
|
|
H5FD_mpio_xfer_t hdf5_xfer_mode;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Decide on collective or independent. */
|
|
|
|
hdf5_xfer_mode = (var->parallel_access != NC_INDEPENDENT) ?
|
2017-12-03 22:11:51 +08:00
|
|
|
H5FD_MPIO_COLLECTIVE : H5FD_MPIO_INDEPENDENT;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* Set the mode in the transfer property list. */
|
|
|
|
if (H5Pset_dxpl_mpio(xfer_plistid, hdf5_xfer_mode) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EPARINIT;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2014-11-12 06:17:08 +08:00
|
|
|
LOG((4, "%s: %d H5FD_MPIO_COLLECTIVE: %d H5FD_MPIO_INDEPENDENT: %d",
|
2013-12-01 13:20:28 +08:00
|
|
|
__func__, (int)hdf5_xfer_mode, H5FD_MPIO_COLLECTIVE, H5FD_MPIO_INDEPENDENT));
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-12-03 19:06:56 +08:00
|
|
|
/**
|
|
|
|
* @internal Write an array of data to a variable.
|
|
|
|
*
|
|
|
|
* @param nc Pointer to the file NC struct.
|
|
|
|
* @param ncid File ID.
|
|
|
|
* @param varid Variable ID.
|
2018-04-24 06:38:08 +08:00
|
|
|
* @param startp Array of start indices.
|
2017-12-03 19:06:56 +08:00
|
|
|
* @param countp Array of counts.
|
|
|
|
* @param mem_nc_type The type of the data in memory.
|
2018-06-09 05:50:39 +08:00
|
|
|
* @param is_long True only if NC_LONG is the memory type.
|
2017-12-03 19:06:56 +08:00
|
|
|
* @param data The data to be written.
|
|
|
|
*
|
2017-12-03 22:11:51 +08:00
|
|
|
* @returns ::NC_NOERR No error.
|
|
|
|
* @returns ::NC_EBADID Bad ncid.
|
|
|
|
* @returns ::NC_ENOTVAR Var not found.
|
|
|
|
* @returns ::NC_EHDFERR HDF5 function returned error.
|
|
|
|
* @returns ::NC_EINVALCOORDS Incorrect start.
|
|
|
|
* @returns ::NC_EEDGE Incorrect start/count.
|
|
|
|
* @returns ::NC_ENOMEM Out of memory.
|
|
|
|
* @returns ::NC_EMPI MPI library error (parallel only)
|
|
|
|
* @returns ::NC_ECANTEXTEND Can't extend dimension for write.
|
|
|
|
* @returns ::NC_ERANGE Data conversion error.
|
2017-12-03 19:06:56 +08:00
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2014-11-12 06:17:08 +08:00
|
|
|
int
|
|
|
|
nc4_put_vara(NC *nc, int ncid, int varid, const size_t *startp,
|
2018-06-09 05:50:39 +08:00
|
|
|
const size_t *countp, nc_type mem_nc_type, int is_long, void *data)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_GRP_INFO_T *grp;
|
|
|
|
NC_HDF5_FILE_INFO_T *h5;
|
|
|
|
NC_VAR_INFO_T *var;
|
|
|
|
NC_DIM_INFO_T *dim;
|
|
|
|
hid_t file_spaceid = 0, mem_spaceid = 0, xfer_plistid = 0;
|
|
|
|
long long unsigned xtend_size[NC_MAX_VAR_DIMS];
|
|
|
|
hsize_t fdims[NC_MAX_VAR_DIMS], fmaxdims[NC_MAX_VAR_DIMS];
|
|
|
|
hsize_t start[NC_MAX_VAR_DIMS], count[NC_MAX_VAR_DIMS];
|
|
|
|
char *name_to_use;
|
|
|
|
int need_to_extend = 0;
|
2017-10-25 18:28:24 +08:00
|
|
|
#ifdef USE_PARALLEL4
|
2017-12-03 22:11:51 +08:00
|
|
|
int extend_possible = 0;
|
2017-10-25 18:28:24 +08:00
|
|
|
#endif
|
2017-12-03 22:11:51 +08:00
|
|
|
int retval = NC_NOERR, range_error = 0, i, d2;
|
|
|
|
void *bufr = NULL;
|
2018-06-09 05:50:39 +08:00
|
|
|
#ifndef HDF5_CONVERT
|
2017-12-03 22:11:51 +08:00
|
|
|
int need_to_convert = 0;
|
|
|
|
size_t len = 1;
|
2018-06-09 05:50:39 +08:00
|
|
|
#endif
|
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
hid_t mem_typeid = 0;
|
|
|
|
#endif
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Find our metadata for this file, group, and var. */
|
|
|
|
assert(nc);
|
|
|
|
if ((retval = nc4_find_g_var_nc(nc, ncid, varid, &grp, &var)))
|
|
|
|
return retval;
|
|
|
|
h5 = NC4_DATA(nc);
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(grp && h5 && var && var->hdr.name);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2018-06-09 05:50:39 +08:00
|
|
|
LOG((3, "%s: var->hdr.name %s mem_nc_type %d is_long %d",
|
|
|
|
__func__, var->hdr.name, mem_nc_type, is_long));
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Check some stuff about the type and the file. If the file must
|
|
|
|
* be switched from define mode, it happens here. */
|
|
|
|
if ((retval = check_for_vara(&mem_nc_type, var, h5)))
|
|
|
|
return retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Convert from size_t and ptrdiff_t to hssize_t, and hsize_t. */
|
|
|
|
for (i = 0; i < var->ndims; i++)
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
start[i] = startp[i];
|
|
|
|
count[i] = countp[i];
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Open this dataset if necessary, also checking for a weird case:
|
|
|
|
* a non-coordinate (and non-scalar) variable that has the same
|
|
|
|
* name as a dimension. */
|
|
|
|
if (var->hdf5_name && strlen(var->hdf5_name) >= strlen(NON_COORD_PREPEND) &&
|
|
|
|
strncmp(var->hdf5_name, NON_COORD_PREPEND, strlen(NON_COORD_PREPEND)) == 0 &&
|
|
|
|
var->ndims)
|
|
|
|
name_to_use = var->hdf5_name;
|
|
|
|
else
|
2018-03-17 01:46:18 +08:00
|
|
|
name_to_use = var->hdr.name;
|
2017-12-03 22:11:51 +08:00
|
|
|
if (!var->hdf_datasetid)
|
|
|
|
if ((var->hdf_datasetid = H5Dopen2(grp->hdf_grpid, name_to_use, H5P_DEFAULT)) < 0)
|
|
|
|
return NC_ENOTVAR;
|
|
|
|
|
|
|
|
/* Get file space of data. */
|
|
|
|
if ((file_spaceid = H5Dget_space(var->hdf_datasetid)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Check to ensure the user selection is
|
|
|
|
* valid. H5Sget_simple_extent_dims gets the sizes of all the dims
|
|
|
|
* and put them in fdims. */
|
|
|
|
if (H5Sget_simple_extent_dims(file_spaceid, fdims, fmaxdims) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
#ifdef LOGGING
|
2017-12-03 22:11:51 +08:00
|
|
|
log_dim_info(var, fdims, fmaxdims, start, count);
|
2010-06-03 21:24:43 +08:00
|
|
|
#endif
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Check dimension bounds. Remember that unlimited dimensions can
|
|
|
|
* put data beyond their current length. */
|
|
|
|
for (d2 = 0; d2 < var->ndims; d2++)
|
|
|
|
{
|
2016-03-05 00:14:15 +08:00
|
|
|
dim = var->dim[d2];
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(dim && dim->hdr.id == var->dimids[d2]);
|
2016-03-05 00:14:15 +08:00
|
|
|
if (!dim->unlimited)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2016-11-13 13:58:09 +08:00
|
|
|
#ifdef RELAX_COORD_BOUND
|
2017-12-03 22:11:51 +08:00
|
|
|
if (start[d2] > (hssize_t)fdims[d2] ||
|
|
|
|
(start[d2] == (hssize_t)fdims[d2] && count[d2] > 0))
|
2016-10-10 11:21:32 +08:00
|
|
|
#else
|
2017-12-03 22:11:51 +08:00
|
|
|
if (start[d2] >= (hssize_t)fdims[d2])
|
2016-10-10 11:21:32 +08:00
|
|
|
#endif
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL_QUIET(NC_EINVALCOORDS);
|
|
|
|
if (start[d2] + count[d2] > fdims[d2])
|
2016-03-05 00:14:15 +08:00
|
|
|
BAIL_QUIET(NC_EEDGE);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now you would think that no one would be crazy enough to write
|
|
|
|
a scalar dataspace with one of the array function calls, but you
|
|
|
|
would be wrong. So let's check to see if the dataset is
|
|
|
|
scalar. If it is, we won't try to set up a hyperslab. */
|
|
|
|
if (H5Sget_simple_extent_type(file_spaceid) == H5S_SCALAR)
|
|
|
|
{
|
2014-11-12 06:17:08 +08:00
|
|
|
if ((mem_spaceid = H5Screate(H5S_SCALAR)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-11-12 06:17:08 +08:00
|
|
|
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET, start, NULL,
|
2010-06-03 21:24:43 +08:00
|
|
|
count, NULL) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* Create a space for the memory, just big enough to hold the slab
|
|
|
|
we want. */
|
2014-11-12 06:17:08 +08:00
|
|
|
if ((mem_spaceid = H5Screate_simple(var->ndims, count, NULL)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2018-06-09 05:50:39 +08:00
|
|
|
#ifndef HDF5_CONVERT
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Are we going to convert any data? (No converting of compound or
|
|
|
|
* opaque types.) */
|
2018-06-09 05:50:39 +08:00
|
|
|
if ((mem_nc_type != var->type_info->hdr.id || (var->type_info->hdr.id == NC_INT && is_long)) &&
|
2017-12-03 22:11:51 +08:00
|
|
|
mem_nc_type != NC_COMPOUND && mem_nc_type != NC_OPAQUE)
|
|
|
|
{
|
2014-02-12 07:12:08 +08:00
|
|
|
size_t file_type_size;
|
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
/* We must convert - allocate a buffer. */
|
|
|
|
need_to_convert++;
|
|
|
|
if (var->ndims)
|
2017-12-03 22:11:51 +08:00
|
|
|
for (d2=0; d2<var->ndims; d2++)
|
|
|
|
len *= countp[d2];
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((4, "converting data for var %s type=%d len=%d", var->hdr.name,
|
|
|
|
var->type_info->hdr.id, len));
|
2014-02-12 07:12:08 +08:00
|
|
|
|
|
|
|
/* Later on, we will need to know the size of this type in the
|
|
|
|
* file. */
|
|
|
|
assert(var->type_info->size);
|
|
|
|
file_type_size = var->type_info->size;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* If we're reading, we need bufr to have enough memory to store
|
|
|
|
* the data in the file. If we're writing, we need bufr to be
|
|
|
|
* big enough to hold all the data in the file's type. */
|
2015-05-21 01:11:19 +08:00
|
|
|
if(len > 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
if (!(bufr = malloc(len * file_type_size)))
|
|
|
|
BAIL(NC_ENOMEM);
|
|
|
|
}
|
|
|
|
else
|
2018-06-09 05:50:39 +08:00
|
|
|
#endif /* ifndef HDF5_CONVERT */
|
2017-12-03 22:11:51 +08:00
|
|
|
bufr = data;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2018-06-09 05:50:39 +08:00
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
/* Get the HDF type of the data in memory. */
|
|
|
|
if ((retval = nc4_get_hdf_typeid(h5, mem_nc_type, &mem_typeid,
|
|
|
|
var->type_info->endianness)))
|
|
|
|
BAIL(retval);
|
|
|
|
#endif
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Create the data transfer property list. */
|
|
|
|
if ((xfer_plistid = H5Pcreate(H5P_DATASET_XFER)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2018-06-09 05:50:39 +08:00
|
|
|
/* Apply the callback function which will detect range
|
|
|
|
* errors. Which one to call depends on the length of the
|
|
|
|
* destination buffer type. */
|
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
if (H5Pset_type_conv_cb(xfer_plistid, except_func, &range_error) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
#endif
|
|
|
|
|
2015-08-16 06:26:35 +08:00
|
|
|
#ifdef USE_PARALLEL4
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Set up parallel I/O, if needed. */
|
|
|
|
if ((retval = set_par_access(h5, var, xfer_plistid)))
|
|
|
|
BAIL(retval);
|
2010-06-03 21:24:43 +08:00
|
|
|
#endif
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Read/write this hyperslab into memory. */
|
|
|
|
/* Does the dataset have to be extended? If it's already
|
|
|
|
extended to the required size, it will do no harm to reextend
|
|
|
|
it to that size. */
|
|
|
|
if (var->ndims)
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
for (d2 = 0; d2 < var->ndims; d2++)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
dim = var->dim[d2];
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(dim && dim->hdr.id == var->dimids[d2]);
|
2017-12-03 22:11:51 +08:00
|
|
|
if (dim->unlimited)
|
|
|
|
{
|
2017-10-25 18:28:24 +08:00
|
|
|
#ifdef USE_PARALLEL4
|
2017-12-03 22:11:51 +08:00
|
|
|
extend_possible = 1;
|
2017-10-25 18:28:24 +08:00
|
|
|
#endif
|
2017-12-03 22:11:51 +08:00
|
|
|
if (start[d2] + count[d2] > fdims[d2])
|
|
|
|
{
|
|
|
|
xtend_size[d2] = (long long unsigned)(start[d2] + count[d2]);
|
|
|
|
need_to_extend++;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
|
|
|
xtend_size[d2] = (long long unsigned)fdims[d2];
|
|
|
|
|
|
|
|
if (start[d2] + count[d2] > dim->len)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
dim->len = start[d2] + count[d2];
|
|
|
|
dim->extended = NC_TRUE;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
xtend_size[d2] = (long long unsigned)dim->len;
|
|
|
|
}
|
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2015-08-16 06:26:35 +08:00
|
|
|
#ifdef USE_PARALLEL4
|
2014-11-12 06:24:38 +08:00
|
|
|
/* Check if anyone wants to extend */
|
2016-12-10 00:52:15 +08:00
|
|
|
if (extend_possible && h5->parallel && NC_COLLECTIVE == var->parallel_access)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
/* Form consensus opinion among all processes about whether to perform
|
|
|
|
* collective I/O
|
|
|
|
*/
|
|
|
|
if(MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &need_to_extend, 1, MPI_INT, MPI_BOR, h5->comm))
|
2013-08-19 09:45:17 +08:00
|
|
|
BAIL(NC_EMPI);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2015-08-16 06:26:35 +08:00
|
|
|
#endif /* USE_PARALLEL4 */
|
2013-08-19 09:45:17 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
/* If we need to extend it, we also need a new file_spaceid
|
|
|
|
to reflect the new size of the space. */
|
|
|
|
if (need_to_extend)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
LOG((4, "extending dataset"));
|
2015-08-16 06:26:35 +08:00
|
|
|
#ifdef USE_PARALLEL4
|
2017-12-03 22:11:51 +08:00
|
|
|
if (h5->parallel)
|
|
|
|
{
|
|
|
|
if(NC_COLLECTIVE != var->parallel_access)
|
|
|
|
BAIL(NC_ECANTEXTEND);
|
|
|
|
|
|
|
|
/* Reach consensus about dimension sizes to extend to */
|
|
|
|
if(MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, xtend_size, var->ndims, MPI_UNSIGNED_LONG_LONG, MPI_MAX, h5->comm))
|
|
|
|
BAIL(NC_EMPI);
|
|
|
|
}
|
2015-08-16 06:26:35 +08:00
|
|
|
#endif /* USE_PARALLEL4 */
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Convert xtend_size back to hsize_t for use with H5Dset_extent */
|
|
|
|
for (d2 = 0; d2 < var->ndims; d2++)
|
2016-06-22 02:04:15 +08:00
|
|
|
fdims[d2] = (hsize_t)xtend_size[d2];
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
if (H5Dset_extent(var->hdf_datasetid, fdims) < 0)
|
2010-06-03 21:24:43 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
if (file_spaceid > 0 && H5Sclose(file_spaceid) < 0)
|
2014-11-12 06:24:38 +08:00
|
|
|
BAIL2(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((file_spaceid = H5Dget_space(var->hdf_datasetid)) < 0)
|
2010-06-03 21:24:43 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET,
|
|
|
|
start, NULL, count, NULL) < 0)
|
2010-06-03 21:24:43 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2018-06-09 05:50:39 +08:00
|
|
|
#ifndef HDF5_CONVERT
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Do we need to convert the data? */
|
|
|
|
if (need_to_convert)
|
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((retval = nc4_convert_type(data, bufr, mem_nc_type, var->type_info->hdr.id,
|
2010-06-03 21:24:43 +08:00
|
|
|
len, &range_error, var->fill_value,
|
2018-06-09 05:50:39 +08:00
|
|
|
(h5->cmode & NC_CLASSIC_MODEL), is_long, 0)))
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(retval);
|
|
|
|
}
|
2018-06-09 05:50:39 +08:00
|
|
|
#endif
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Write the data. At last! */
|
|
|
|
LOG((4, "about to H5Dwrite datasetid 0x%x mem_spaceid 0x%x "
|
|
|
|
"file_spaceid 0x%x", var->hdf_datasetid, mem_spaceid, file_spaceid));
|
|
|
|
if (H5Dwrite(var->hdf_datasetid, var->type_info->hdf_typeid,
|
|
|
|
mem_spaceid, file_spaceid, xfer_plistid, bufr) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* Remember that we have written to this var so that Fill Value
|
|
|
|
* can't be set for it. */
|
|
|
|
if (!var->written_to)
|
|
|
|
var->written_to = NC_TRUE;
|
|
|
|
|
|
|
|
/* For strict netcdf-3 rules, ignore erange errors between UBYTE
|
|
|
|
* and BYTE types. */
|
|
|
|
if ((h5->cmode & NC_CLASSIC_MODEL) &&
|
2018-03-17 01:46:18 +08:00
|
|
|
(var->type_info->hdr.id == NC_UBYTE || var->type_info->hdr.id == NC_BYTE) &&
|
2017-12-03 22:11:51 +08:00
|
|
|
(mem_nc_type == NC_UBYTE || mem_nc_type == NC_BYTE) &&
|
|
|
|
range_error)
|
|
|
|
range_error = 0;
|
|
|
|
|
|
|
|
exit:
|
2018-06-09 05:50:39 +08:00
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
if (mem_typeid > 0 && H5Tclose(mem_typeid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
#endif
|
2017-12-03 22:11:51 +08:00
|
|
|
if (file_spaceid > 0 && H5Sclose(file_spaceid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
if (mem_spaceid > 0 && H5Sclose(mem_spaceid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
if (xfer_plistid && (H5Pclose(xfer_plistid) < 0))
|
|
|
|
BAIL2(NC_EPARINIT);
|
2018-06-09 05:50:39 +08:00
|
|
|
#ifndef HDF5_CONVERT
|
2017-12-03 22:11:51 +08:00
|
|
|
if (need_to_convert && bufr) free(bufr);
|
2018-06-09 05:50:39 +08:00
|
|
|
#endif
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* If there was an error return it, otherwise return any potential
|
|
|
|
range error value. If none, return NC_NOERR as usual.*/
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
if (range_error)
|
|
|
|
return NC_ERANGE;
|
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Read an array of data from a variable.
|
|
|
|
*
|
|
|
|
* @param nc Pointer to the file NC struct.
|
|
|
|
* @param ncid File ID.
|
|
|
|
* @param varid Variable ID.
|
2018-04-24 06:38:08 +08:00
|
|
|
* @param startp Array of start indices.
|
2017-12-03 22:11:51 +08:00
|
|
|
* @param countp Array of counts.
|
|
|
|
* @param mem_nc_type The type of the data in memory. (Convert to this
|
|
|
|
* type from file type.)
|
2018-06-09 05:50:39 +08:00
|
|
|
* @param is_long True only if NC_LONG is the memory type.
|
2017-12-03 22:11:51 +08:00
|
|
|
* @param data The data to be written.
|
|
|
|
*
|
|
|
|
* @returns ::NC_NOERR No error.
|
|
|
|
* @returns ::NC_EBADID Bad ncid.
|
|
|
|
* @returns ::NC_ENOTVAR Var not found.
|
|
|
|
* @returns ::NC_EHDFERR HDF5 function returned error.
|
|
|
|
* @returns ::NC_EINVALCOORDS Incorrect start.
|
|
|
|
* @returns ::NC_EEDGE Incorrect start/count.
|
|
|
|
* @returns ::NC_ENOMEM Out of memory.
|
|
|
|
* @returns ::NC_EMPI MPI library error (parallel only)
|
|
|
|
* @returns ::NC_ECANTEXTEND Can't extend dimension for write.
|
|
|
|
* @returns ::NC_ERANGE Data conversion error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2014-11-12 06:17:08 +08:00
|
|
|
int
|
|
|
|
nc4_get_vara(NC *nc, int ncid, int varid, const size_t *startp,
|
2018-06-09 05:50:39 +08:00
|
|
|
const size_t *countp, nc_type mem_nc_type, int is_long, void *data)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_GRP_INFO_T *grp;
|
|
|
|
NC_HDF5_FILE_INFO_T *h5;
|
|
|
|
NC_VAR_INFO_T *var;
|
|
|
|
NC_DIM_INFO_T *dim;
|
|
|
|
hid_t file_spaceid = 0, mem_spaceid = 0;
|
|
|
|
hid_t xfer_plistid = 0;
|
|
|
|
size_t file_type_size;
|
|
|
|
hsize_t *xtend_size = NULL, count[NC_MAX_VAR_DIMS];
|
|
|
|
hsize_t fdims[NC_MAX_VAR_DIMS], fmaxdims[NC_MAX_VAR_DIMS];
|
|
|
|
hsize_t start[NC_MAX_VAR_DIMS];
|
|
|
|
char *name_to_use;
|
|
|
|
void *fillvalue = NULL;
|
|
|
|
int no_read = 0, provide_fill = 0;
|
|
|
|
int fill_value_size[NC_MAX_VAR_DIMS];
|
|
|
|
int scalar = 0, retval = NC_NOERR, range_error = 0, i, d2;
|
|
|
|
void *bufr = NULL;
|
2018-06-09 05:50:39 +08:00
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
hid_t mem_typeid = 0;
|
|
|
|
#endif
|
|
|
|
#ifndef HDF5_CONVERT
|
2017-12-03 22:11:51 +08:00
|
|
|
int need_to_convert = 0;
|
|
|
|
size_t len = 1;
|
2018-06-09 05:50:39 +08:00
|
|
|
#endif
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Find our metadata for this file, group, and var. */
|
|
|
|
assert(nc);
|
|
|
|
if ((retval = nc4_find_g_var_nc(nc, ncid, varid, &grp, &var)))
|
|
|
|
return retval;
|
|
|
|
h5 = NC4_DATA(nc);
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(grp && h5 && var && var->hdr.name);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2018-06-09 05:50:39 +08:00
|
|
|
LOG((3, "%s: var->hdr.name %s mem_nc_type %d is_long %d",
|
|
|
|
__func__, var->hdr.name, mem_nc_type, is_long));
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Check some stuff about the type and the file. */
|
|
|
|
if ((retval = check_for_vara(&mem_nc_type, var, h5)))
|
|
|
|
return retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Convert from size_t and ptrdiff_t to hssize_t, and hsize_t. */
|
|
|
|
for (i = 0; i < var->ndims; i++)
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
start[i] = startp[i];
|
|
|
|
count[i] = countp[i];
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Open this dataset if necessary, also checking for a weird case:
|
|
|
|
* a non-coordinate (and non-scalar) variable that has the same
|
|
|
|
* name as a dimension. */
|
|
|
|
if (var->hdf5_name && strlen(var->hdf5_name) >= strlen(NON_COORD_PREPEND) &&
|
|
|
|
strncmp(var->hdf5_name, NON_COORD_PREPEND, strlen(NON_COORD_PREPEND)) == 0 &&
|
|
|
|
var->ndims)
|
|
|
|
name_to_use = var->hdf5_name;
|
|
|
|
else
|
2018-03-17 01:46:18 +08:00
|
|
|
name_to_use = var->hdr.name;
|
2017-12-03 22:11:51 +08:00
|
|
|
if (!var->hdf_datasetid)
|
|
|
|
if ((var->hdf_datasetid = H5Dopen2(grp->hdf_grpid, name_to_use, H5P_DEFAULT)) < 0)
|
|
|
|
return NC_ENOTVAR;
|
|
|
|
|
|
|
|
/* Get file space of data. */
|
|
|
|
if ((file_spaceid = H5Dget_space(var->hdf_datasetid)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Check to ensure the user selection is
|
|
|
|
* valid. H5Sget_simple_extent_dims gets the sizes of all the dims
|
|
|
|
* and put them in fdims. */
|
|
|
|
if (H5Sget_simple_extent_dims(file_spaceid, fdims, fmaxdims) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
#ifdef LOGGING
|
2017-12-03 22:11:51 +08:00
|
|
|
log_dim_info(var, fdims, fmaxdims, start, count);
|
2014-11-12 06:24:38 +08:00
|
|
|
#endif
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Check dimension bounds. Remember that unlimited dimensions can
|
|
|
|
* put data beyond their current length. */
|
|
|
|
for (d2 = 0; d2 < var->ndims; d2++) {
|
|
|
|
dim = var->dim[d2];
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(dim && dim->hdr.id == var->dimids[d2]);
|
2017-12-03 22:11:51 +08:00
|
|
|
if (dim->unlimited)
|
2016-03-05 00:14:15 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
size_t ulen;
|
2016-03-05 00:14:15 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* We can't go beyond the largest current extent of
|
|
|
|
the unlimited dim. */
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((retval = NC4_inq_dim(ncid, dim->hdr.id, NULL, &ulen)))
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(retval);
|
2016-03-05 00:14:15 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Check for out of bound requests. */
|
2016-11-13 13:58:09 +08:00
|
|
|
#ifdef RELAX_COORD_BOUND
|
2017-12-03 22:11:51 +08:00
|
|
|
if (start[d2] > (hssize_t)ulen ||
|
|
|
|
(start[d2] == (hssize_t)ulen && count[d2] > 0))
|
2016-10-10 11:21:32 +08:00
|
|
|
#else
|
2017-12-03 22:11:51 +08:00
|
|
|
if (start[d2] >= (hssize_t)ulen && ulen > 0)
|
2016-10-10 11:21:32 +08:00
|
|
|
#endif
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL_QUIET(NC_EINVALCOORDS);
|
|
|
|
if (start[d2] + count[d2] > ulen)
|
|
|
|
BAIL_QUIET(NC_EEDGE);
|
|
|
|
|
|
|
|
/* Things get a little tricky here. If we're getting
|
|
|
|
a GET request beyond the end of this var's
|
|
|
|
current length in an unlimited dimension, we'll
|
|
|
|
later need to return the fill value for the
|
|
|
|
variable. */
|
|
|
|
if (start[d2] >= (hssize_t)fdims[d2])
|
|
|
|
fill_value_size[d2] = count[d2];
|
|
|
|
else if (start[d2] + count[d2] > fdims[d2])
|
|
|
|
fill_value_size[d2] = count[d2] - (fdims[d2] - start[d2]);
|
|
|
|
else
|
|
|
|
fill_value_size[d2] = 0;
|
|
|
|
count[d2] -= fill_value_size[d2];
|
|
|
|
if (fill_value_size[d2])
|
|
|
|
provide_fill++;
|
2016-03-05 00:14:15 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2016-03-05 00:14:15 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Check for out of bound requests. */
|
2016-11-13 13:58:09 +08:00
|
|
|
#ifdef RELAX_COORD_BOUND
|
2017-12-03 22:11:51 +08:00
|
|
|
if (start[d2] > (hssize_t)fdims[d2] ||
|
|
|
|
(start[d2] == (hssize_t)fdims[d2] && count[d2] > 0))
|
2016-10-10 11:21:32 +08:00
|
|
|
#else
|
2017-12-03 22:11:51 +08:00
|
|
|
if (start[d2] >= (hssize_t)fdims[d2])
|
2016-10-10 11:21:32 +08:00
|
|
|
#endif
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL_QUIET(NC_EINVALCOORDS);
|
|
|
|
if (start[d2] + count[d2] > fdims[d2])
|
|
|
|
BAIL_QUIET(NC_EEDGE);
|
2016-03-05 00:14:15 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Set the fill value boundary */
|
|
|
|
fill_value_size[d2] = count[d2];
|
2016-03-05 00:14:15 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* A little quirk: if any of the count values are zero, don't
|
|
|
|
* read. */
|
|
|
|
for (d2 = 0; d2 < var->ndims; d2++)
|
|
|
|
if (count[d2] == 0)
|
|
|
|
no_read++;
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Later on, we will need to know the size of this type in the
|
|
|
|
* file. */
|
|
|
|
assert(var->type_info->size);
|
|
|
|
file_type_size = var->type_info->size;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
if (!no_read)
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Now you would think that no one would be crazy enough to write
|
|
|
|
a scalar dataspace with one of the array function calls, but you
|
|
|
|
would be wrong. So let's check to see if the dataset is
|
|
|
|
scalar. If it is, we won't try to set up a hyperslab. */
|
|
|
|
if (H5Sget_simple_extent_type(file_spaceid) == H5S_SCALAR)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if ((mem_spaceid = H5Screate(H5S_SCALAR)) < 0)
|
2010-06-03 21:24:43 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
scalar++;
|
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
else
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET,
|
|
|
|
start, NULL, count, NULL) < 0)
|
2010-06-03 21:24:43 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Create a space for the memory, just big enough to hold the slab
|
|
|
|
we want. */
|
|
|
|
if ((mem_spaceid = H5Screate_simple(var->ndims, count, NULL)) < 0)
|
2010-06-03 21:24:43 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2012-04-20 23:42:55 +08:00
|
|
|
|
|
|
|
/* Fix bug when reading HDF5 files with variable of type
|
|
|
|
* fixed-length string. We need to make it look like a
|
|
|
|
* variable-length string, because that's all netCDF-4 data
|
|
|
|
* model supports, lacking anonymous dimensions. So
|
|
|
|
* variable-length strings are in allocated memory that user has
|
|
|
|
* to free, which we allocate here. */
|
2014-11-12 06:17:08 +08:00
|
|
|
if(var->type_info->nc_type_class == NC_STRING &&
|
2014-11-12 06:24:38 +08:00
|
|
|
H5Tget_size(var->type_info->hdf_typeid) > 1 &&
|
|
|
|
!H5Tis_variable_str(var->type_info->hdf_typeid)) {
|
2017-12-03 22:11:51 +08:00
|
|
|
hsize_t fstring_len;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((fstring_len = H5Tget_size(var->type_info->hdf_typeid)) == 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
if (!(*(char **)data = malloc(1 + fstring_len)))
|
|
|
|
BAIL(NC_ENOMEM);
|
|
|
|
bufr = *(char **)data;
|
2012-04-20 23:42:55 +08:00
|
|
|
}
|
2018-06-09 05:50:39 +08:00
|
|
|
#ifndef HDF5_CONVERT
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Are we going to convert any data? (No converting of compound or
|
|
|
|
* opaque types.) */
|
2018-06-06 04:40:49 +08:00
|
|
|
if (mem_nc_type != var->type_info->hdr.id &&
|
2010-06-03 21:24:43 +08:00
|
|
|
mem_nc_type != NC_COMPOUND && mem_nc_type != NC_OPAQUE)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
/* We must convert - allocate a buffer. */
|
|
|
|
need_to_convert++;
|
|
|
|
if (var->ndims)
|
2010-10-06 23:50:42 +08:00
|
|
|
for (d2 = 0; d2 < var->ndims; d2++)
|
2017-12-03 22:11:51 +08:00
|
|
|
len *= countp[d2];
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((4, "converting data for var %s type=%d len=%d", var->hdr.name,
|
|
|
|
var->type_info->hdr.id, len));
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* If we're reading, we need bufr to have enough memory to store
|
|
|
|
* the data in the file. If we're writing, we need bufr to be
|
|
|
|
* big enough to hold all the data in the file's type. */
|
|
|
|
if(len > 0)
|
2015-05-21 01:11:19 +08:00
|
|
|
if (!(bufr = malloc(len * file_type_size)))
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_ENOMEM);
|
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
else
|
2018-06-09 05:50:39 +08:00
|
|
|
#endif /* ifndef HDF5_CONVERT */
|
2017-12-03 22:11:51 +08:00
|
|
|
if(!bufr)
|
|
|
|
bufr = data;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2018-06-09 05:50:39 +08:00
|
|
|
/* Get the HDF type of the data in memory. */
|
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
if ((retval = nc4_get_hdf_typeid(h5, mem_nc_type, &mem_typeid,
|
|
|
|
var->type_info->endianness)))
|
|
|
|
BAIL(retval);
|
|
|
|
#endif
|
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Create the data transfer property list. */
|
|
|
|
if ((xfer_plistid = H5Pcreate(H5P_DATASET_XFER)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2018-06-09 05:50:39 +08:00
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
/* Apply the callback function which will detect range
|
|
|
|
* errors. Which one to call depends on the length of the
|
|
|
|
* destination buffer type. */
|
|
|
|
if (H5Pset_type_conv_cb(xfer_plistid, except_func, &range_error) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
#endif
|
|
|
|
|
2015-08-16 06:26:35 +08:00
|
|
|
#ifdef USE_PARALLEL4
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Set up parallel I/O, if needed. */
|
|
|
|
if ((retval = set_par_access(h5, var, xfer_plistid)))
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(retval);
|
2010-06-03 21:24:43 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Read this hyperslab into memory. */
|
|
|
|
LOG((5, "About to H5Dread some data..."));
|
2014-11-12 06:17:08 +08:00
|
|
|
if (H5Dread(var->hdf_datasetid, var->type_info->native_hdf_typeid,
|
2010-06-03 21:24:43 +08:00
|
|
|
mem_spaceid, file_spaceid, xfer_plistid, bufr) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2018-06-09 05:50:39 +08:00
|
|
|
#ifndef HDF5_CONVERT
|
|
|
|
/* Eventually the block below will go away. Right now it's
|
|
|
|
needed to support conversions between int/float, and range
|
|
|
|
checking converted data in the netcdf way. These features are
|
|
|
|
being added to HDF5 at the HDF5 World Hall of Coding right
|
|
|
|
now, by a staff of thousands of programming gnomes. */
|
2010-06-03 21:24:43 +08:00
|
|
|
if (need_to_convert)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((retval = nc4_convert_type(bufr, data, var->type_info->hdr.id, mem_nc_type,
|
2017-12-03 22:11:51 +08:00
|
|
|
len, &range_error, var->fill_value,
|
2018-06-09 05:50:39 +08:00
|
|
|
(h5->cmode & NC_CLASSIC_MODEL), 0, is_long)))
|
2010-06-03 21:24:43 +08:00
|
|
|
BAIL(retval);
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* For strict netcdf-3 rules, ignore erange errors between UBYTE
|
|
|
|
* and BYTE types. */
|
|
|
|
if ((h5->cmode & NC_CLASSIC_MODEL) &&
|
2018-03-17 01:46:18 +08:00
|
|
|
(var->type_info->hdr.id == NC_UBYTE || var->type_info->hdr.id == NC_BYTE) &&
|
2017-12-03 22:11:51 +08:00
|
|
|
(mem_nc_type == NC_UBYTE || mem_nc_type == NC_BYTE) &&
|
|
|
|
range_error)
|
2010-06-03 21:24:43 +08:00
|
|
|
range_error = 0;
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2018-06-09 05:50:39 +08:00
|
|
|
#endif
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* For strict netcdf-3 rules, ignore erange errors between UBYTE
|
|
|
|
* and BYTE types. */
|
|
|
|
if ((h5->cmode & NC_CLASSIC_MODEL) &&
|
2018-03-17 01:46:18 +08:00
|
|
|
(var->type_info->hdr.id == NC_UBYTE || var->type_info->hdr.id == NC_BYTE) &&
|
2010-06-03 21:24:43 +08:00
|
|
|
(mem_nc_type == NC_UBYTE || mem_nc_type == NC_BYTE) &&
|
|
|
|
range_error)
|
2017-12-03 22:11:51 +08:00
|
|
|
range_error = 0;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
} /* endif ! no_read */
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
else {
|
2016-06-10 04:16:20 +08:00
|
|
|
#ifdef USE_PARALLEL4 /* Start block contributed by HDF group. */
|
2017-12-03 22:11:51 +08:00
|
|
|
/* For collective IO read, some processes may not have any element for reading.
|
|
|
|
Collective requires all processes to participate, so we use H5Sselect_none
|
|
|
|
for these processes. */
|
|
|
|
if(var->parallel_access == NC_COLLECTIVE) {
|
|
|
|
|
|
|
|
/* Create the data transfer property list. */
|
|
|
|
if ((xfer_plistid = H5Pcreate(H5P_DATASET_XFER)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
if ((retval = set_par_access(h5, var, xfer_plistid)))
|
|
|
|
BAIL(retval);
|
|
|
|
|
|
|
|
if (H5Sselect_none(file_spaceid)<0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* Since no element will be selected, we just get the memory space the same as the file space.
|
|
|
|
*/
|
|
|
|
if((mem_spaceid = H5Dget_space(var->hdf_datasetid))<0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
if (H5Sselect_none(mem_spaceid)<0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* Read this hyperslab into memory. */
|
|
|
|
LOG((5, "About to H5Dread some data..."));
|
|
|
|
if (H5Dread(var->hdf_datasetid, var->type_info->native_hdf_typeid,
|
|
|
|
mem_spaceid, file_spaceid, xfer_plistid, bufr) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
2016-06-10 04:16:20 +08:00
|
|
|
#endif /* End ifdef USE_PARALLEL4 */
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
/* Now we need to fake up any further data that was asked for,
|
|
|
|
using the fill values instead. First skip past the data we
|
|
|
|
just read, if any. */
|
|
|
|
if (!scalar && provide_fill)
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
void *filldata;
|
2012-02-02 07:52:10 +08:00
|
|
|
size_t real_data_size = 0;
|
|
|
|
size_t fill_len;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* Skip past the real data we've already read. */
|
|
|
|
if (!no_read)
|
2017-12-03 22:11:51 +08:00
|
|
|
for (real_data_size = file_type_size, d2 = 0; d2 < var->ndims; d2++)
|
|
|
|
real_data_size *= (count[d2] - start[d2]);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* Get the fill value from the HDF5 variable. Memory will be
|
|
|
|
* allocated. */
|
|
|
|
if (get_fill_value(h5, var, &fillvalue) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* How many fill values do we need? */
|
2014-11-12 06:17:08 +08:00
|
|
|
for (fill_len = 1, d2 = 0; d2 < var->ndims; d2++)
|
2017-12-03 22:11:51 +08:00
|
|
|
fill_len *= (fill_value_size[d2] ? fill_value_size[d2] : 1);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* Copy the fill value into the rest of the data buffer. */
|
|
|
|
filldata = (char *)data + real_data_size;
|
|
|
|
for (i = 0; i < fill_len; i++)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2016-02-19 05:45:57 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
if (var->type_info->nc_type_class == NC_STRING)
|
|
|
|
{
|
|
|
|
if (*(char **)fillvalue)
|
Corrected "BAIL" macros to avoid infinite loop when logging is disabled and an
error occurs after an "exit:" label.
Corrected a dozen Coverity errors (mainly allocation issues, along with a few
other things):
711711, 711802, 711803, 711905, 970825, 996123, 996124, 1025787,
1047274, 1130013, 1130014, 1139538
Refactored internal fill-value code to correctly handle string types, and
especially to allow NULL pointers and null strings (ie. "") to be
distinguished. The code now avoids partially aliasing the two together
(which only happened on the 'write' side of things and wasn't reflected on
the 'read' side, adding to the previous confusion).
Probably still weak on handling fill-values of variable-length and compound
datatypes.
Refactored the recursive metadata reads a bit more, to process HDF5 named
datatypes and datasets immediately, avoiding chewing up memory for those
types of objects, etc.
Finished uncommenting and updating the nc_test4/tst_fills2.c code (as I'm
proceeding alphabetically through the nc_test4 code files).
2013-12-29 15:12:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
if (!(*(char **)filldata = strdup(*(char **)fillvalue)))
|
|
|
|
BAIL(NC_ENOMEM);
|
Corrected "BAIL" macros to avoid infinite loop when logging is disabled and an
error occurs after an "exit:" label.
Corrected a dozen Coverity errors (mainly allocation issues, along with a few
other things):
711711, 711802, 711803, 711905, 970825, 996123, 996124, 1025787,
1047274, 1130013, 1130014, 1139538
Refactored internal fill-value code to correctly handle string types, and
especially to allow NULL pointers and null strings (ie. "") to be
distinguished. The code now avoids partially aliasing the two together
(which only happened on the 'write' side of things and wasn't reflected on
the 'read' side, adding to the previous confusion).
Probably still weak on handling fill-values of variable-length and compound
datatypes.
Refactored the recursive metadata reads a bit more, to process HDF5 named
datatypes and datasets immediately, avoiding chewing up memory for those
types of objects, etc.
Finished uncommenting and updating the nc_test4/tst_fills2.c code (as I'm
proceeding alphabetically through the nc_test4 code files).
2013-12-29 15:12:43 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
|
|
|
*(char **)filldata = NULL;
|
|
|
|
}
|
|
|
|
else if(var->type_info->nc_type_class == NC_VLEN) {
|
2016-02-19 06:42:03 +08:00
|
|
|
if(fillvalue) {
|
2017-12-03 22:11:51 +08:00
|
|
|
memcpy(filldata,fillvalue,file_type_size);
|
2016-02-19 06:42:03 +08:00
|
|
|
} else {
|
2017-12-03 22:11:51 +08:00
|
|
|
*(char **)filldata = NULL;
|
2016-02-19 06:42:03 +08:00
|
|
|
}
|
2018-06-09 05:50:39 +08:00
|
|
|
} else
|
|
|
|
memcpy(filldata, fillvalue, file_type_size);
|
2017-12-03 22:11:51 +08:00
|
|
|
filldata = (char *)filldata + file_type_size;
|
|
|
|
}
|
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
exit:
|
2018-06-09 05:50:39 +08:00
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
if (mem_typeid > 0 && H5Tclose(mem_typeid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
#endif
|
2017-12-03 22:11:51 +08:00
|
|
|
if (file_spaceid > 0)
|
|
|
|
{
|
2010-10-06 23:50:42 +08:00
|
|
|
if (H5Sclose(file_spaceid) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
if (mem_spaceid > 0)
|
|
|
|
{
|
2010-10-06 23:50:42 +08:00
|
|
|
if (H5Sclose(mem_spaceid) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
if (xfer_plistid > 0)
|
|
|
|
{
|
2010-10-06 23:50:42 +08:00
|
|
|
if (H5Pclose(xfer_plistid) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
}
|
2018-06-09 05:50:39 +08:00
|
|
|
#ifndef HDF5_CONVERT
|
2017-12-03 22:11:51 +08:00
|
|
|
if (need_to_convert && bufr != NULL)
|
|
|
|
free(bufr);
|
2018-06-09 05:50:39 +08:00
|
|
|
#endif
|
2017-12-03 22:11:51 +08:00
|
|
|
if (xtend_size)
|
|
|
|
free(xtend_size);
|
|
|
|
if (fillvalue)
|
|
|
|
{
|
2014-02-12 07:12:08 +08:00
|
|
|
if (var->type_info->nc_type_class == NC_VLEN)
|
2017-12-03 22:11:51 +08:00
|
|
|
nc_free_vlen((nc_vlen_t *)fillvalue);
|
2014-02-12 07:12:08 +08:00
|
|
|
else if (var->type_info->nc_type_class == NC_STRING && *(char **)fillvalue)
|
2017-12-03 22:11:51 +08:00
|
|
|
free(*(char **)fillvalue);
|
Refactored read_scale(), memio_new(), var_create_dataset() and makespecial()
to clean up resources properly on failure.
Refactored doubly-linked list code for objects in the libsrc4 directory,
cleaning up the add/del routines, breaking out the common next/prev
pointers into a struct and extracting the add/del operations on them,
changed the list of dims to add new dims in the same order as the other
types, made all add routines able to optionally return a pointer to the
newly created object.
Removed some dead code (pg_var(), nc4_pg_var1(), nc4_pg_varm(), misc. small
routines, etc)
Fixed fill value handling for string types in nc4_get_vara().
Changed many malloc()+strcpy() pairs into calls to strdup().
Cleaned up misc. other minor Coverity issues.
2013-12-08 17:29:26 +08:00
|
|
|
free(fillvalue);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If there was an error return it, otherwise return any potential
|
|
|
|
range error value. If none, return NC_NOERR as usual.*/
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
if (range_error)
|
|
|
|
return NC_ERANGE;
|
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
2018-06-09 01:18:08 +08:00
|
|
|
* @internal Write an attribute.
|
2017-12-03 22:11:51 +08:00
|
|
|
*
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
* @param varid Variable ID or NC_GLOBAL.
|
|
|
|
* @param att Pointer to attribute info struct.
|
|
|
|
*
|
|
|
|
* @returns ::NC_NOERR No error.
|
|
|
|
* @returns ::NC_ENOTVAR Variable not found.
|
|
|
|
* @returns ::NC_EPERM Read-only file.
|
|
|
|
* @returns ::NC_EHDFERR HDF5 returned error.
|
|
|
|
* @returns ::NC_EATTMETA HDF5 returned error with attribute calls.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2014-12-01 13:37:19 +08:00
|
|
|
static int
|
2014-11-24 23:36:58 +08:00
|
|
|
put_att_grpa(NC_GRP_INFO_T *grp, int varid, NC_ATT_INFO_T *att)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
hid_t datasetid = 0, locid;
|
|
|
|
hid_t attid = 0, spaceid = 0, file_typeid = 0;
|
|
|
|
hsize_t dims[1]; /* netcdf attributes always 1-D. */
|
|
|
|
htri_t attr_exists;
|
|
|
|
int retval = NC_NOERR;
|
|
|
|
void *data;
|
|
|
|
int phoney_data = 99;
|
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(att->hdr.name);
|
|
|
|
LOG((3, "%s: varid %d att->hdr.id %d att->hdr.name %s att->nc_typeid %d att->len %d",
|
|
|
|
__func__, varid, att->hdr.id, att->hdr.name,
|
2017-12-03 22:11:51 +08:00
|
|
|
att->nc_typeid, att->len));
|
|
|
|
|
|
|
|
/* If the file is read-only, return an error. */
|
|
|
|
if (grp->nc4_info->no_write)
|
|
|
|
BAIL(NC_EPERM);
|
|
|
|
|
|
|
|
/* Get the hid to attach the attribute to, or read it from. */
|
|
|
|
if (varid == NC_GLOBAL)
|
|
|
|
locid = grp->hdf_grpid;
|
|
|
|
else
|
|
|
|
{
|
2014-11-24 23:36:58 +08:00
|
|
|
if ((retval = nc4_open_var_grp2(grp, varid, &datasetid)))
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(retval);
|
2014-11-24 23:36:58 +08:00
|
|
|
locid = datasetid;
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2014-11-24 23:36:58 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Delete the att if it exists already. */
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((attr_exists = H5Aexists(locid, att->hdr.name)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
if (attr_exists)
|
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
if (H5Adelete(locid, att->hdr.name) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the length ready, and find the HDF type we'll be
|
|
|
|
* writing. */
|
|
|
|
dims[0] = att->len;
|
|
|
|
if ((retval = nc4_get_hdf_typeid(grp->nc4_info, att->nc_typeid,
|
|
|
|
&file_typeid, 0)))
|
|
|
|
BAIL(retval);
|
|
|
|
|
|
|
|
/* Even if the length is zero, HDF5 won't let me write with a
|
|
|
|
* NULL pointer. So if the length of the att is zero, point to
|
|
|
|
* some phoney data (which won't be written anyway.)*/
|
|
|
|
if (!dims[0])
|
|
|
|
data = &phoney_data;
|
|
|
|
else if (att->data)
|
|
|
|
data = att->data;
|
|
|
|
else if (att->stdata)
|
|
|
|
data = att->stdata;
|
|
|
|
else
|
|
|
|
data = att->vldata;
|
|
|
|
|
|
|
|
/* NC_CHAR types require some extra work. The space ID is set to
|
|
|
|
* scalar, and the type is told how long the string is. If it's
|
|
|
|
* really zero length, set the size to 1. (The fact that it's
|
|
|
|
* really zero will be marked by the NULL dataspace, but HDF5
|
|
|
|
* doesn't allow me to set the size of the type to zero.)*/
|
|
|
|
if (att->nc_typeid == NC_CHAR)
|
|
|
|
{
|
2014-11-24 23:36:58 +08:00
|
|
|
size_t string_size = dims[0];
|
|
|
|
if (!string_size)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
string_size = 1;
|
|
|
|
if ((spaceid = H5Screate(H5S_NULL)) < 0)
|
2014-11-24 23:36:58 +08:00
|
|
|
BAIL(NC_EATTMETA);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2014-11-24 23:36:58 +08:00
|
|
|
else
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if ((spaceid = H5Screate(H5S_SCALAR)) < 0)
|
2014-11-24 23:36:58 +08:00
|
|
|
BAIL(NC_EATTMETA);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2014-11-24 23:36:58 +08:00
|
|
|
if (H5Tset_size(file_typeid, string_size) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EATTMETA);
|
2014-11-24 23:36:58 +08:00
|
|
|
if (H5Tset_strpad(file_typeid, H5T_STR_NULLTERM) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EATTMETA);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-11-24 23:36:58 +08:00
|
|
|
if (!att->len)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if ((spaceid = H5Screate(H5S_NULL)) < 0)
|
2014-11-24 23:36:58 +08:00
|
|
|
BAIL(NC_EATTMETA);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2014-11-24 23:36:58 +08:00
|
|
|
else
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if ((spaceid = H5Screate_simple(1, dims, NULL)) < 0)
|
2014-11-24 23:36:58 +08:00
|
|
|
BAIL(NC_EATTMETA);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
}
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((attid = H5Acreate(locid, att->hdr.name, file_typeid, spaceid,
|
2017-12-03 22:11:51 +08:00
|
|
|
H5P_DEFAULT)) < 0)
|
|
|
|
BAIL(NC_EATTMETA);
|
|
|
|
|
|
|
|
/* Write the values, (even if length is zero). */
|
|
|
|
if (H5Awrite(attid, file_typeid, data) < 0)
|
|
|
|
BAIL(NC_EATTMETA);
|
|
|
|
|
|
|
|
exit:
|
|
|
|
if (file_typeid && H5Tclose(file_typeid))
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
if (attid > 0 && H5Aclose(attid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
if (spaceid > 0 && H5Sclose(spaceid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
return retval;
|
2014-11-24 23:36:58 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
2018-06-09 01:18:08 +08:00
|
|
|
* @internal Write all the dirty atts in an attlist.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param attlist Pointer to the list if attributes.
|
|
|
|
* @param varid Variable ID.
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned an error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2014-11-24 23:36:58 +08:00
|
|
|
static int
|
2018-03-17 01:46:18 +08:00
|
|
|
write_attlist(NCindex* attlist, int varid, NC_GRP_INFO_T *grp)
|
2014-11-24 23:36:58 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_ATT_INFO_T *att;
|
|
|
|
int retval;
|
2018-03-17 01:46:18 +08:00
|
|
|
int i;
|
2014-11-24 23:36:58 +08:00
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(attlist);i++)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
att = (NC_ATT_INFO_T*)ncindexith(attlist,i);
|
|
|
|
if(att == NULL) continue;
|
2014-11-24 23:36:58 +08:00
|
|
|
if (att->dirty)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((4, "%s: writing att %s to varid %d", __func__, att->hdr.name, varid));
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((retval = put_att_grpa(grp, varid, att)))
|
2014-11-24 23:36:58 +08:00
|
|
|
return retval;
|
2017-12-03 22:11:51 +08:00
|
|
|
att->dirty = NC_FALSE;
|
|
|
|
att->created = NC_TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NC_NOERR;
|
2014-11-24 23:36:58 +08:00
|
|
|
}
|
|
|
|
|
2014-12-01 13:37:19 +08:00
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal This function is a bit of a hack. Turns out that HDF5
|
|
|
|
* dimension scales cannot themselves have scales attached. This
|
|
|
|
* leaves multidimensional coordinate variables hosed. So this
|
|
|
|
* function writes a special attribute for such a variable, which has
|
|
|
|
* the ids of all the dimensions for that coordinate variable. This
|
|
|
|
* sucks, really. But that's the way the cookie crumbles. Better luck
|
|
|
|
* next time. This function also contains a new way of dealing with
|
|
|
|
* HDF5 error handling, abandoning the BAIL macros for a more organic
|
|
|
|
* and natural approach, made with whole grains, and locally-grown
|
2018-06-09 01:18:08 +08:00
|
|
|
* vegetables.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param var Pointer to var info struct.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned an error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static int
|
2014-11-12 06:17:08 +08:00
|
|
|
write_coord_dimids(NC_VAR_INFO_T *var)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
hsize_t coords_len[1];
|
|
|
|
hid_t c_spaceid = -1, c_attid = -1;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Write our attribute. */
|
|
|
|
coords_len[0] = var->ndims;
|
|
|
|
if ((c_spaceid = H5Screate_simple(1, coords_len, coords_len)) < 0) ret++;
|
|
|
|
if (!ret && (c_attid = H5Acreate(var->hdf_datasetid, COORDINATES, H5T_NATIVE_INT,
|
|
|
|
c_spaceid, H5P_DEFAULT)) < 0) ret++;
|
|
|
|
if (!ret && H5Awrite(c_attid, H5T_NATIVE_INT, var->dimids) < 0) ret++;
|
|
|
|
|
|
|
|
/* Close up shop. */
|
|
|
|
if (c_spaceid > 0 && H5Sclose(c_spaceid) < 0) ret++;
|
|
|
|
if (c_attid > 0 && H5Aclose(c_attid) < 0) ret++;
|
|
|
|
return ret ? NC_EHDFERR : 0;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
2018-06-09 01:18:08 +08:00
|
|
|
* @internal Write a special attribute for the netCDF-4 dimension ID.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param datasetid HDF5 datasset ID.
|
|
|
|
* @param dimid NetCDF dimension ID.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned an error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static int
|
|
|
|
write_netcdf4_dimid(hid_t datasetid, int dimid)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
hid_t dimid_spaceid, dimid_attid;
|
|
|
|
htri_t attr_exists;
|
|
|
|
|
|
|
|
/* Create the space. */
|
|
|
|
if ((dimid_spaceid = H5Screate(H5S_SCALAR)) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
|
|
|
|
/* Does the attribute already exist? If so, don't try to create it. */
|
|
|
|
if ((attr_exists = H5Aexists(datasetid, NC_DIMID_ATT_NAME)) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
if (attr_exists)
|
|
|
|
dimid_attid = H5Aopen_by_name(datasetid, ".", NC_DIMID_ATT_NAME,
|
|
|
|
H5P_DEFAULT, H5P_DEFAULT);
|
|
|
|
else
|
|
|
|
/* Create the attribute if needed. */
|
|
|
|
dimid_attid = H5Acreate(datasetid, NC_DIMID_ATT_NAME,
|
|
|
|
H5T_NATIVE_INT, dimid_spaceid, H5P_DEFAULT);
|
|
|
|
if (dimid_attid < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
|
|
|
|
|
|
|
|
/* Write it. */
|
|
|
|
LOG((4, "%s: writing secret dimid %d", __func__, dimid));
|
|
|
|
if (H5Awrite(dimid_attid, H5T_NATIVE_INT, &dimid) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
|
|
|
|
/* Close stuff*/
|
|
|
|
if (H5Sclose(dimid_spaceid) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
if (H5Aclose(dimid_attid) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
|
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
2018-06-09 01:18:08 +08:00
|
|
|
* @internal This function creates the HDF5 dataset for a variable.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
* @param var Pointer to variable info struct.
|
|
|
|
* @param write_dimid True to write dimid.
|
|
|
|
*
|
|
|
|
* @return ::NC_NOERR
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static int
|
2014-11-24 23:36:58 +08:00
|
|
|
var_create_dataset(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var, nc_bool_t write_dimid)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
hid_t plistid = 0, access_plistid = 0, typeid = 0, spaceid = 0;
|
|
|
|
hsize_t chunksize[H5S_MAX_RANK], dimsize[H5S_MAX_RANK], maxdimsize[H5S_MAX_RANK];
|
|
|
|
int d;
|
|
|
|
void *fillp = NULL;
|
|
|
|
NC_DIM_INFO_T *dim = NULL;
|
|
|
|
char *name_to_use;
|
|
|
|
int retval = NC_NOERR;
|
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((3, "%s:: name %s", __func__, var->hdr.name));
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* Scalar or not, we need a creation property list. */
|
|
|
|
if ((plistid = H5Pcreate(H5P_DATASET_CREATE)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
if ((access_plistid = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* RJ: this suppose to be FALSE that is defined in H5 private.h as 0 */
|
|
|
|
if (H5Pset_obj_track_times(plistid,0)<0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* Find the HDF5 type of the dataset. */
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((retval = nc4_get_hdf_typeid(grp->nc4_info, var->type_info->hdr.id, &typeid,
|
2017-12-03 22:11:51 +08:00
|
|
|
var->type_info->endianness)))
|
|
|
|
BAIL(retval);
|
|
|
|
|
|
|
|
/* Figure out what fill value to set, if any. */
|
|
|
|
if (var->no_fill)
|
|
|
|
{
|
2014-02-12 07:12:08 +08:00
|
|
|
/* Required to truly turn HDF5 fill values off */
|
|
|
|
if (H5Pset_fill_time(plistid, H5D_FILL_TIME_NEVER) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2012-09-07 03:44:03 +08:00
|
|
|
if ((retval = get_fill_value(grp->nc4_info, var, &fillp)))
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(retval);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* If there is a fill value, set it. */
|
|
|
|
if (fillp)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if (var->type_info->nc_type_class == NC_STRING)
|
|
|
|
{
|
|
|
|
if (H5Pset_fill_value(plistid, typeid, fillp) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* The fill value set in HDF5 must always be presented as
|
|
|
|
* a native type, even if the endianness for this dataset
|
|
|
|
* is non-native. HDF5 will translate the fill value to
|
|
|
|
* the target endiannesss. */
|
|
|
|
hid_t fill_typeid = 0;
|
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((retval = nc4_get_hdf_typeid(grp->nc4_info, var->type_info->hdr.id, &fill_typeid,
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_ENDIAN_NATIVE)))
|
|
|
|
BAIL(retval);
|
|
|
|
if (H5Pset_fill_value(plistid, fill_typeid, fillp) < 0)
|
2014-11-12 06:24:38 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
if (H5Tclose(fill_typeid) < 0)
|
2014-02-12 07:12:08 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2014-02-12 07:12:08 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
if (H5Tclose(fill_typeid) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the user wants to shuffle the data, set that up now. */
|
|
|
|
if (var->shuffle) {
|
|
|
|
if (H5Pset_shuffle(plistid) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the user wants to deflate the data, set that up now. */
|
|
|
|
if (var->deflate) {
|
|
|
|
if (H5Pset_deflate(plistid, var->deflate_level) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
} else if(var->filterid) {
|
|
|
|
/* Handle szip case here */
|
|
|
|
if(var->filterid == H5Z_FILTER_SZIP) {
|
|
|
|
int options_mask;
|
|
|
|
int bits_per_pixel;
|
|
|
|
if(var->nparams != 2)
|
2017-08-28 03:35:20 +08:00
|
|
|
BAIL(NC_EFILTER);
|
2017-12-03 22:11:51 +08:00
|
|
|
options_mask = (int)var->params[0];
|
|
|
|
bits_per_pixel = (int)var->params[1];
|
|
|
|
if(H5Pset_szip(plistid, options_mask, bits_per_pixel) < 0)
|
|
|
|
BAIL(NC_EFILTER);
|
|
|
|
} else {
|
2018-01-17 02:00:09 +08:00
|
|
|
herr_t code = H5Pset_filter(plistid, var->filterid, H5Z_FLAG_MANDATORY, var->nparams, var->params);
|
|
|
|
if(code < 0) {
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EFILTER);
|
2018-01-17 02:00:09 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the user wants to fletcher error correcton, set that up now. */
|
|
|
|
if (var->fletcher32)
|
|
|
|
if (H5Pset_fletcher32(plistid) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* If ndims non-zero, get info for all dimensions. We look up the
|
|
|
|
dimids and get the len of each dimension. We need this to create
|
|
|
|
the space for the dataset. In netCDF a dimension length of zero
|
|
|
|
means an unlimited dimension. */
|
|
|
|
if (var->ndims)
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
int unlimdim = 0;
|
|
|
|
|
|
|
|
/* Check to see if any unlimited dimensions are used in this var. */
|
2015-04-03 02:30:51 +08:00
|
|
|
for (d = 0; d < var->ndims; d++) {
|
2017-12-03 22:11:51 +08:00
|
|
|
dim = var->dim[d];
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(dim && dim->hdr.id == var->dimids[d]);
|
2017-12-03 22:11:51 +08:00
|
|
|
if (dim->unlimited)
|
|
|
|
unlimdim++;
|
2015-04-03 02:30:51 +08:00
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* If there are no unlimited dims, and no filters, and the user
|
|
|
|
* has not specified chunksizes, use contiguous variable for
|
|
|
|
* better performance. */
|
2018-02-08 21:20:58 +08:00
|
|
|
if (!var->shuffle && !var->deflate && !var->fletcher32 &&
|
|
|
|
(var->chunksizes == NULL || !var->chunksizes[0]) && !unlimdim)
|
|
|
|
var->contiguous = NC_TRUE;
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2014-12-28 09:10:33 +08:00
|
|
|
/* Gather current & maximum dimension sizes, along with chunk sizes */
|
2013-03-26 02:06:19 +08:00
|
|
|
for (d = 0; d < var->ndims; d++)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
dim = var->dim[d];
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(dim && dim->hdr.id == var->dimids[d]);
|
2017-12-03 22:11:51 +08:00
|
|
|
dimsize[d] = dim->unlimited ? NC_HDF5_UNLIMITED_DIMSIZE : dim->len;
|
|
|
|
maxdimsize[d] = dim->unlimited ? H5S_UNLIMITED : (hsize_t)dim->len;
|
|
|
|
if (!var->contiguous) {
|
2016-03-05 00:14:15 +08:00
|
|
|
if (var->chunksizes[d])
|
2017-12-03 22:11:51 +08:00
|
|
|
chunksize[d] = var->chunksizes[d];
|
2016-03-05 00:14:15 +08:00
|
|
|
else
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
size_t type_size;
|
|
|
|
if (var->type_info->nc_type_class == NC_STRING)
|
2016-03-05 00:14:15 +08:00
|
|
|
type_size = sizeof(char *);
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2016-03-05 00:14:15 +08:00
|
|
|
type_size = var->type_info->size;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Unlimited dim always gets chunksize of 1. */
|
|
|
|
if (dim->unlimited)
|
2016-03-05 00:14:15 +08:00
|
|
|
chunksize[d] = 1;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2016-03-05 00:14:15 +08:00
|
|
|
chunksize[d] = pow((double)DEFAULT_CHUNK_SIZE/type_size,
|
|
|
|
1/(double)(var->ndims - unlimdim));
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* If the chunksize is greater than the dim
|
|
|
|
* length, make it the dim length. */
|
|
|
|
if (!dim->unlimited && chunksize[d] > dim->len)
|
2016-03-05 00:14:15 +08:00
|
|
|
chunksize[d] = dim->len;
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Remember the computed chunksize */
|
|
|
|
var->chunksizes[d] = chunksize[d];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
if (var->contiguous)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if (H5Pset_layout(plistid, H5D_CONTIGUOUS) < 0)
|
2010-06-03 21:24:43 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
else
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if (H5Pset_chunk(plistid, var->ndims, chunksize) < 0)
|
2010-06-03 21:24:43 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Create the dataspace. */
|
|
|
|
if ((spaceid = H5Screate_simple(var->ndims, dimsize, maxdimsize)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
if ((spaceid = H5Screate(H5S_SCALAR)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Turn on creation order tracking. */
|
|
|
|
if (H5Pset_attr_creation_order(plistid, H5P_CRT_ORDER_TRACKED|
|
|
|
|
H5P_CRT_ORDER_INDEXED) < 0)
|
2010-06-03 21:24:43 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Set per-var chunk cache, for chunked datasets. */
|
|
|
|
if (!var->contiguous && var->chunk_cache_size)
|
|
|
|
if (H5Pset_chunk_cache(access_plistid, var->chunk_cache_nelems,
|
|
|
|
var->chunk_cache_size, var->chunk_cache_preemption) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* At long last, create the dataset. */
|
2018-03-17 01:46:18 +08:00
|
|
|
name_to_use = var->hdf5_name ? var->hdf5_name : var->hdr.name;
|
2017-12-03 22:11:51 +08:00
|
|
|
LOG((4, "%s: about to H5Dcreate2 dataset %s of type 0x%x", __func__,
|
|
|
|
name_to_use, typeid));
|
|
|
|
if ((var->hdf_datasetid = H5Dcreate2(grp->hdf_grpid, name_to_use, typeid,
|
|
|
|
spaceid, H5P_DEFAULT, plistid, access_plistid)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
var->created = NC_TRUE;
|
|
|
|
var->is_new_var = NC_FALSE;
|
|
|
|
|
|
|
|
/* If this is a dimscale, mark it as such in the HDF5 file. Also
|
|
|
|
* find the dimension info and store the dataset id of the dimscale
|
|
|
|
* dataset. */
|
|
|
|
if (var->dimscale)
|
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
if (H5DSset_scale(var->hdf_datasetid, var->hdr.name) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* If this is a multidimensional coordinate variable, write a
|
|
|
|
* coordinates attribute. */
|
|
|
|
if (var->ndims > 1)
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((retval = write_coord_dimids(var)))
|
|
|
|
BAIL(retval);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* If desired, write the netCDF dimid. */
|
|
|
|
if (write_dimid)
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((retval = write_netcdf4_dimid(var->hdf_datasetid, var->dimids[0])))
|
|
|
|
BAIL(retval);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Write attributes for this var. */
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((retval = write_attlist(var->att, var->hdr.id, grp)))
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(retval);
|
|
|
|
var->attr_dirty = NC_FALSE;
|
|
|
|
|
|
|
|
exit:
|
|
|
|
if (typeid > 0 && H5Tclose(typeid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
if (plistid > 0 && H5Pclose(plistid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
if (access_plistid > 0 && H5Pclose(access_plistid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
if (spaceid > 0 && H5Sclose(spaceid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
if (fillp)
|
|
|
|
{
|
2014-02-12 07:12:08 +08:00
|
|
|
if (var->type_info->nc_type_class == NC_VLEN)
|
2017-12-03 22:11:51 +08:00
|
|
|
nc_free_vlen((nc_vlen_t *)fillp);
|
2014-02-12 07:12:08 +08:00
|
|
|
else if (var->type_info->nc_type_class == NC_STRING && *(char **)fillp)
|
2017-12-03 22:11:51 +08:00
|
|
|
free(*(char **)fillp);
|
Corrected "BAIL" macros to avoid infinite loop when logging is disabled and an
error occurs after an "exit:" label.
Corrected a dozen Coverity errors (mainly allocation issues, along with a few
other things):
711711, 711802, 711803, 711905, 970825, 996123, 996124, 1025787,
1047274, 1130013, 1130014, 1139538
Refactored internal fill-value code to correctly handle string types, and
especially to allow NULL pointers and null strings (ie. "") to be
distinguished. The code now avoids partially aliasing the two together
(which only happened on the 'write' side of things and wasn't reflected on
the 'read' side, adding to the previous confusion).
Probably still weak on handling fill-values of variable-length and compound
datatypes.
Refactored the recursive metadata reads a bit more, to process HDF5 named
datatypes and datasets immediately, avoiding chewing up memory for those
types of objects, etc.
Finished uncommenting and updating the nc_test4/tst_fills2.c code (as I'm
proceeding alphabetically through the nc_test4 code files).
2013-12-29 15:12:43 +08:00
|
|
|
free(fillp);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
return retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal Adjust the chunk cache of a var for better
|
2018-06-09 01:18:08 +08:00
|
|
|
* performance.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
* @param var Pointer to var info struct.
|
|
|
|
*
|
|
|
|
* @return NC_NOERR No error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
int
|
|
|
|
nc4_adjust_var_cache(NC_GRP_INFO_T *grp, NC_VAR_INFO_T * var)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
size_t chunk_size_bytes = 1;
|
|
|
|
int d;
|
|
|
|
int retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Nothing to be done. */
|
|
|
|
if (var->contiguous)
|
|
|
|
return NC_NOERR;
|
2015-08-16 06:26:35 +08:00
|
|
|
#ifdef USE_PARALLEL4
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
2014-11-12 06:24:38 +08:00
|
|
|
#endif
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* How many bytes in the chunk? */
|
|
|
|
for (d = 0; d < var->ndims; d++)
|
|
|
|
chunk_size_bytes *= var->chunksizes[d];
|
|
|
|
if (var->type_info->size)
|
|
|
|
chunk_size_bytes *= var->type_info->size;
|
|
|
|
else
|
|
|
|
chunk_size_bytes *= sizeof(char *);
|
|
|
|
|
|
|
|
/* If the chunk cache is too small, and the user has not changed
|
|
|
|
* the default value of the chunk cache size, then increase the
|
|
|
|
* size of the cache. */
|
|
|
|
if (var->chunk_cache_size == CHUNK_CACHE_SIZE)
|
|
|
|
if (chunk_size_bytes > var->chunk_cache_size)
|
2010-07-06 22:56:28 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
var->chunk_cache_size = chunk_size_bytes * DEFAULT_CHUNKS_IN_CACHE;
|
|
|
|
if (var->chunk_cache_size > MAX_DEFAULT_CACHE_SIZE)
|
|
|
|
var->chunk_cache_size = MAX_DEFAULT_CACHE_SIZE;
|
|
|
|
if ((retval = nc4_reopen_dataset(grp, var)))
|
|
|
|
return retval;
|
2010-07-06 22:56:28 +08:00
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal Create a HDF5 defined type from a NC_TYPE_INFO_T struct,
|
2018-06-09 01:18:08 +08:00
|
|
|
* and commit it to the file.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
* @param type Pointer to type info struct.
|
|
|
|
*
|
|
|
|
* @return NC_NOERR No error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static int
|
|
|
|
commit_type(NC_GRP_INFO_T *grp, NC_TYPE_INFO_T *type)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
int retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
assert(grp && type);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Did we already record this type? */
|
|
|
|
if (type->committed)
|
|
|
|
return NC_NOERR;
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Is this a compound type? */
|
|
|
|
if (type->nc_type_class == NC_COMPOUND)
|
|
|
|
{
|
2014-02-12 07:12:08 +08:00
|
|
|
NC_FIELD_INFO_T *field;
|
|
|
|
hid_t hdf_base_typeid, hdf_typeid;
|
2018-03-17 01:46:18 +08:00
|
|
|
int i;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
if ((type->hdf_typeid = H5Tcreate(H5T_COMPOUND, type->size)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EHDFERR;
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((4, "creating compound type %s hdf_typeid 0x%x", type->hdr.name,
|
2010-06-03 21:24:43 +08:00
|
|
|
type->hdf_typeid));
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<nclistlength(type->u.c.field);i++)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
if((field = (NC_FIELD_INFO_T*)nclistget(type->u.c.field,i)) == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((retval = nc4_get_hdf_typeid(grp->nc4_info, field->nc_typeid,
|
|
|
|
&hdf_base_typeid, type->endianness)))
|
2010-06-03 21:24:43 +08:00
|
|
|
return retval;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* If this is an array, create a special array type. */
|
|
|
|
if (field->ndims)
|
|
|
|
{
|
|
|
|
int d;
|
|
|
|
hsize_t dims[NC_MAX_VAR_DIMS];
|
|
|
|
|
|
|
|
for (d = 0; d < field->ndims; d++)
|
|
|
|
dims[d] = field->dim_size[d];
|
|
|
|
if ((hdf_typeid = H5Tarray_create(hdf_base_typeid, field->ndims,
|
|
|
|
dims, NULL)) < 0)
|
2014-02-12 07:12:08 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
if (H5Tclose(hdf_base_typeid) < 0)
|
2014-02-12 07:12:08 +08:00
|
|
|
return NC_EHDFERR;
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EHDFERR;
|
2014-11-12 06:24:38 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
if (H5Tclose(hdf_base_typeid) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
}
|
|
|
|
else
|
2014-11-12 06:24:38 +08:00
|
|
|
hdf_typeid = hdf_base_typeid;
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((4, "inserting field %s offset %d hdf_typeid 0x%x", field->hdr.name,
|
2017-12-03 22:11:51 +08:00
|
|
|
field->offset, hdf_typeid));
|
2018-03-17 01:46:18 +08:00
|
|
|
if (H5Tinsert(type->hdf_typeid, field->hdr.name, field->offset,
|
2017-12-03 22:11:51 +08:00
|
|
|
hdf_typeid) < 0)
|
2010-06-03 21:24:43 +08:00
|
|
|
return NC_EHDFERR;
|
2017-12-03 22:11:51 +08:00
|
|
|
if (H5Tclose(hdf_typeid) < 0)
|
2010-06-03 21:24:43 +08:00
|
|
|
return NC_EHDFERR;
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (type->nc_type_class == NC_VLEN)
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Find the HDF typeid of the base type of this vlen. */
|
2014-11-12 06:17:08 +08:00
|
|
|
if ((retval = nc4_get_hdf_typeid(grp->nc4_info, type->u.v.base_nc_typeid,
|
2014-11-12 06:24:38 +08:00
|
|
|
&type->u.v.base_hdf_typeid, type->endianness)))
|
2017-12-03 22:11:51 +08:00
|
|
|
return retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* Create a vlen type. */
|
2014-02-12 07:12:08 +08:00
|
|
|
if ((type->hdf_typeid = H5Tvlen_create(type->u.v.base_hdf_typeid)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EHDFERR;
|
|
|
|
}
|
|
|
|
else if (type->nc_type_class == NC_OPAQUE)
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Create the opaque type. */
|
|
|
|
if ((type->hdf_typeid = H5Tcreate(H5T_OPAQUE, type->size)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EHDFERR;
|
|
|
|
}
|
|
|
|
else if (type->nc_type_class == NC_ENUM)
|
|
|
|
{
|
2014-02-12 07:12:08 +08:00
|
|
|
NC_ENUM_MEMBER_INFO_T *enum_m;
|
2018-03-17 01:46:18 +08:00
|
|
|
int i;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
if (nclistlength(type->u.e.enum_member) == 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EINVAL;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* Find the HDF typeid of the base type of this enum. */
|
2014-11-12 06:17:08 +08:00
|
|
|
if ((retval = nc4_get_hdf_typeid(grp->nc4_info, type->u.e.base_nc_typeid,
|
2014-11-12 06:24:38 +08:00
|
|
|
&type->u.e.base_hdf_typeid, type->endianness)))
|
2017-12-03 22:11:51 +08:00
|
|
|
return retval;
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Create an enum type. */
|
2014-11-12 06:17:08 +08:00
|
|
|
if ((type->hdf_typeid = H5Tenum_create(type->u.e.base_hdf_typeid)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EHDFERR;
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Add all the members to the HDF5 type. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<nclistlength(type->u.e.enum_member);i++) {
|
|
|
|
enum_m = (NC_ENUM_MEMBER_INFO_T*)nclistget(type->u.e.enum_member,i);
|
2017-12-03 22:11:51 +08:00
|
|
|
if (H5Tenum_insert(type->hdf_typeid, enum_m->name, enum_m->value) < 0)
|
|
|
|
return NC_EHDFERR;
|
2018-03-17 01:46:18 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-02-12 07:12:08 +08:00
|
|
|
LOG((0, "Unknown class: %d", type->nc_type_class));
|
2010-06-03 21:24:43 +08:00
|
|
|
return NC_EBADTYPE;
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Commit the type. */
|
2018-03-17 01:46:18 +08:00
|
|
|
if (H5Tcommit(grp->hdf_grpid, type->hdr.name, type->hdf_typeid) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EHDFERR;
|
|
|
|
type->committed = NC_TRUE;
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((4, "just committed type %s, HDF typeid: 0x%x", type->hdr.name,
|
2017-12-03 22:11:51 +08:00
|
|
|
type->hdf_typeid));
|
|
|
|
|
|
|
|
/* Later we will always use the native typeid. In this case, it is
|
|
|
|
* a copy of the same type pointed to by hdf_typeid, but it's
|
|
|
|
* easier to maintain a copy. */
|
|
|
|
if ((type->native_hdf_typeid = H5Tget_native_type(type->hdf_typeid,
|
|
|
|
H5T_DIR_DEFAULT)) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
|
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal Write an attribute, with value 1, to indicate that strict
|
2018-06-09 01:18:08 +08:00
|
|
|
* NC3 rules apply to this file.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param hdf_grpid HDF5 group ID.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned an error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static int
|
|
|
|
write_nc3_strict_att(hid_t hdf_grpid)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
hid_t attid = 0, spaceid = 0;
|
|
|
|
int one = 1;
|
|
|
|
int retval = NC_NOERR;
|
|
|
|
htri_t attr_exists;
|
|
|
|
|
|
|
|
/* If the attribute already exists, call that a success and return
|
|
|
|
* NC_NOERR. */
|
|
|
|
if ((attr_exists = H5Aexists(hdf_grpid, NC3_STRICT_ATT_NAME)) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
if (attr_exists)
|
|
|
|
return NC_NOERR;
|
|
|
|
|
|
|
|
/* Create the attribute to mark this as a file that needs to obey
|
|
|
|
* strict netcdf-3 rules. */
|
|
|
|
if ((spaceid = H5Screate(H5S_SCALAR)) < 0)
|
|
|
|
BAIL(NC_EFILEMETA);
|
|
|
|
if ((attid = H5Acreate(hdf_grpid, NC3_STRICT_ATT_NAME,
|
|
|
|
H5T_NATIVE_INT, spaceid, H5P_DEFAULT)) < 0)
|
|
|
|
BAIL(NC_EFILEMETA);
|
|
|
|
if (H5Awrite(attid, H5T_NATIVE_INT, &one) < 0)
|
|
|
|
BAIL(NC_EFILEMETA);
|
|
|
|
|
|
|
|
exit:
|
|
|
|
if (spaceid > 0 && (H5Sclose(spaceid) < 0))
|
|
|
|
BAIL2(NC_EFILEMETA);
|
|
|
|
if (attid > 0 && (H5Aclose(attid) < 0))
|
|
|
|
BAIL2(NC_EFILEMETA);
|
|
|
|
return retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal Create a HDF5 group.
|
|
|
|
*
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
*
|
|
|
|
* @return NC_NOERR No error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static int
|
|
|
|
create_group(NC_GRP_INFO_T *grp)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
hid_t gcpl_id = 0;
|
|
|
|
int retval = NC_NOERR;;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
assert(grp);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* If this is not the root group, create it in the HDF5 file. */
|
|
|
|
if (grp->parent)
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Create group, with link_creation_order set in the group
|
|
|
|
* creation property list. */
|
|
|
|
if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EHDFERR;
|
2014-02-11 06:29:14 +08:00
|
|
|
|
|
|
|
/* RJ: this suppose to be FALSE that is defined in H5 private.h as 0 */
|
|
|
|
if (H5Pset_obj_track_times(gcpl_id,0)<0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2014-02-11 06:29:14 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED|H5P_CRT_ORDER_INDEXED) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
if (H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED|H5P_CRT_ORDER_INDEXED) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((grp->hdf_grpid = H5Gcreate2(grp->parent->hdf_grpid, grp->hdr.name,
|
2010-06-03 21:24:43 +08:00
|
|
|
H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
if (H5Pclose(gcpl_id) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Since this is the root group, we have to open it. */
|
2012-09-07 03:44:03 +08:00
|
|
|
if ((grp->hdf_grpid = H5Gopen2(grp->nc4_info->hdfid, "/", H5P_DEFAULT)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EFILEMETA);
|
|
|
|
}
|
|
|
|
return NC_NOERR;
|
|
|
|
|
|
|
|
exit:
|
|
|
|
if (gcpl_id > 0 && H5Pclose(gcpl_id) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
if (grp->hdf_grpid > 0 && H5Gclose(grp->hdf_grpid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
return retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal After all the datasets of the file have been read, it's
|
|
|
|
* time to sort the wheat from the chaff. Which of the datasets are
|
|
|
|
* netCDF dimensions, and which are coordinate variables, and which
|
2018-06-09 01:18:08 +08:00
|
|
|
* are non-coordinate variables.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
2018-06-09 01:18:08 +08:00
|
|
|
* @param grp Pointer to group info struct.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @return ::NC_NOERR No error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static int
|
|
|
|
attach_dimscales(NC_GRP_INFO_T *grp)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_VAR_INFO_T *var;
|
|
|
|
NC_DIM_INFO_T *dim1;
|
|
|
|
int d, i;
|
|
|
|
int retval = NC_NOERR;
|
|
|
|
|
|
|
|
/* Attach dimension scales. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->vars);i++)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,i);
|
2016-07-07 22:28:24 +08:00
|
|
|
if (!var) continue;
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Scales themselves do not attach. But I really wish they
|
|
|
|
* would. */
|
2014-11-12 06:17:08 +08:00
|
|
|
if (var->dimscale)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
/* If this is a multidimensional coordinate variable, it will
|
|
|
|
* have a special coords attribute (read earlier) with a list
|
|
|
|
* of the dimensions for this variable. */
|
|
|
|
}
|
2014-11-12 06:17:08 +08:00
|
|
|
else /* not a dimscale... */
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
/* Find the scale for each dimension, if any, and attach it. */
|
2017-12-03 22:11:51 +08:00
|
|
|
for (d = 0; d < var->ndims; d++)
|
|
|
|
{
|
|
|
|
/* Is there a dimscale for this dimension? */
|
|
|
|
if (var->dimscale_attached)
|
2014-11-12 06:24:38 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
if (!var->dimscale_attached[d])
|
|
|
|
{
|
|
|
|
hid_t dim_datasetid; /* Dataset ID for dimension */
|
|
|
|
dim1 = var->dim[d];
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(dim1 && dim1->hdr.id == var->dimids[d]);
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
LOG((2, "%s: attaching scale for dimid %d to var %s",
|
2018-03-17 01:46:18 +08:00
|
|
|
__func__, var->dimids[d], var->hdr.name));
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* Find dataset ID for dimension */
|
|
|
|
if (dim1->coord_var)
|
|
|
|
dim_datasetid = dim1->coord_var->hdf_datasetid;
|
|
|
|
else
|
|
|
|
dim_datasetid = dim1->hdf_dimscaleid;
|
2018-03-17 01:46:18 +08:00
|
|
|
if(!(dim_datasetid > 0))
|
2017-12-03 22:11:51 +08:00
|
|
|
assert(dim_datasetid > 0);
|
|
|
|
if (H5DSattach_scale(var->hdf_datasetid, dim_datasetid, d) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
var->dimscale_attached[d] = NC_TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we didn't find a dimscale to attach, that's a problem! */
|
|
|
|
if (!var->dimscale_attached[d])
|
|
|
|
{
|
|
|
|
LOG((0, "no dimscale found!"));
|
|
|
|
return NC_EDIMSCALE;
|
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
exit:
|
|
|
|
return retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal Does a variable exist?
|
|
|
|
*
|
|
|
|
* @param grpid HDF5 group ID.
|
|
|
|
* @param name Name of variable.
|
|
|
|
* @param exists Pointer that gets 1 of the variable exists, 0 otherwise.
|
|
|
|
*
|
|
|
|
* @return ::NC_NOERR No error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static int
|
2014-11-24 23:36:58 +08:00
|
|
|
var_exists(hid_t grpid, char *name, nc_bool_t *exists)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
htri_t link_exists;
|
2013-12-01 13:20:28 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Reset the boolean */
|
|
|
|
*exists = NC_FALSE;
|
2013-12-01 13:20:28 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Check if the object name exists in the group */
|
|
|
|
if ((link_exists = H5Lexists(grpid, name, H5P_DEFAULT)) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
if (link_exists)
|
|
|
|
{
|
2013-12-01 13:20:28 +08:00
|
|
|
H5G_stat_t statbuf;
|
|
|
|
|
|
|
|
/* Get info about the object */
|
|
|
|
if (H5Gget_objinfo(grpid, name, 1, &statbuf) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EHDFERR;
|
2013-12-01 13:20:28 +08:00
|
|
|
|
|
|
|
if (H5G_DATASET == statbuf.type)
|
2017-12-03 22:11:51 +08:00
|
|
|
*exists = NC_TRUE;
|
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2018-01-31 05:39:47 +08:00
|
|
|
/**
|
|
|
|
* @internal Convert a coordinate variable HDF5 dataset into one that
|
|
|
|
* is not a coordinate variable. This happens during renaming of vars
|
|
|
|
* and dims. This function removes the HDF5 NAME and CLASS attributes
|
|
|
|
* associated with dimension scales, and also the NC_DIMID_ATT_NAME
|
|
|
|
* attribute which may be present, and, if it does, holds the dimid of
|
|
|
|
* the coordinate variable.
|
|
|
|
*
|
|
|
|
* @param hdf_datasetid The HDF5 dataset ID of the coordinate variable dataset.
|
|
|
|
*
|
|
|
|
* @return ::NC_NOERR No error.
|
|
|
|
* @return ::NC_EHDFERR HDF5 error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
remove_coord_atts(hid_t hdf_datasetid)
|
|
|
|
{
|
|
|
|
htri_t attr_exists;
|
|
|
|
|
|
|
|
/* If the variable dataset has an optional NC_DIMID_ATT_NAME
|
|
|
|
* attribute, delete it. */
|
|
|
|
if ((attr_exists = H5Aexists(hdf_datasetid, NC_DIMID_ATT_NAME)) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
if (attr_exists)
|
|
|
|
{
|
|
|
|
if (H5Adelete(hdf_datasetid, NC_DIMID_ATT_NAME) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* (We could do a better job here and verify that the attributes are
|
|
|
|
* really dimension scale 'CLASS' & 'NAME' attributes, but that would be
|
|
|
|
* poking about in the HDF5 DimScale internal data) */
|
|
|
|
if ((attr_exists = H5Aexists(hdf_datasetid, HDF5_DIMSCALE_CLASS_ATT_NAME)) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
if (attr_exists)
|
|
|
|
{
|
|
|
|
if (H5Adelete(hdf_datasetid, HDF5_DIMSCALE_CLASS_ATT_NAME) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
}
|
|
|
|
if ((attr_exists = H5Aexists(hdf_datasetid, HDF5_DIMSCALE_NAME_ATT_NAME)) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
if (attr_exists)
|
|
|
|
{
|
|
|
|
if (H5Adelete(hdf_datasetid, HDF5_DIMSCALE_NAME_ATT_NAME) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
}
|
|
|
|
return NC_NOERR;
|
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal This function writes a variable. The principle difficulty
|
|
|
|
* comes from the possibility that this is a coordinate variable, and
|
|
|
|
* was already written to the file as a dimension-only dimscale. If
|
2018-06-09 01:18:08 +08:00
|
|
|
* this occurs, then it must be deleted and recreated.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param var Pointer to variable info struct.
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
* @param write_dimid
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned an error.
|
2018-01-31 05:39:47 +08:00
|
|
|
* @author Ed Hartnett, Quincey Koziol
|
2017-12-05 03:21:14 +08:00
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static int
|
2014-11-24 23:36:58 +08:00
|
|
|
write_var(NC_VAR_INFO_T *var, NC_GRP_INFO_T *grp, nc_bool_t write_dimid)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
nc_bool_t replace_existing_var = NC_FALSE;
|
|
|
|
int retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((4, "%s: writing var %s", __func__, var->hdr.name));
|
2014-12-01 13:37:19 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* If the variable has already been created & the fill value changed,
|
|
|
|
* indicate that the existing variable should be replaced. */
|
|
|
|
if (var->created && var->fill_val_changed)
|
|
|
|
{
|
2014-12-01 22:52:53 +08:00
|
|
|
replace_existing_var = NC_TRUE;
|
|
|
|
var->fill_val_changed = NC_FALSE;
|
2016-06-11 07:03:08 +08:00
|
|
|
/* If the variable is going to be replaced,
|
|
|
|
we need to flag any other attributes associated
|
|
|
|
with the variable as 'dirty', or else
|
|
|
|
*only* the fill value attribute will be copied over
|
|
|
|
and the rest will be lost. See:
|
|
|
|
|
|
|
|
* https://github.com/Unidata/netcdf-c/issues/239 */
|
2016-06-15 00:22:06 +08:00
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
flag_atts_dirty(var->att);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Is this a coordinate var that has already been created in
|
|
|
|
* the HDF5 file as a dimscale dataset? Check for dims with the
|
|
|
|
* same name in this group. If there is one, check to see if
|
|
|
|
* this object exists in the HDF group. */
|
|
|
|
if (var->became_coord_var)
|
|
|
|
{
|
2014-11-24 23:36:58 +08:00
|
|
|
NC_DIM_INFO_T *d1;
|
2018-03-17 01:46:18 +08:00
|
|
|
int i;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->dim);i++) {
|
|
|
|
if((d1 = (NC_DIM_INFO_T*)ncindexith(grp->dim,i)) == NULL) continue;
|
|
|
|
if (!strcmp(d1->hdr.name, var->hdr.name))
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2014-11-24 23:36:58 +08:00
|
|
|
nc_bool_t exists;
|
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((retval = var_exists(grp->hdf_grpid, var->hdr.name, &exists)))
|
2017-12-03 22:11:51 +08:00
|
|
|
return retval;
|
2014-11-24 23:36:58 +08:00
|
|
|
if (exists)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
/* Indicate that the variable already exists, and should be replaced */
|
|
|
|
replace_existing_var = NC_TRUE;
|
2018-03-17 01:46:18 +08:00
|
|
|
flag_atts_dirty(var->att);
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-03-17 01:46:18 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check dims if the variable will be replaced, so that the dimensions
|
|
|
|
* will be de-attached and re-attached correctly. */
|
|
|
|
/* (Note: There's a temptation to merge this loop over the dimensions with
|
|
|
|
* the prior loop over dimensions, but that blurs the line over the
|
|
|
|
* purpose of them, so they are currently separate. If performance
|
|
|
|
* becomes an issue here, it would be possible to merge them. -QAK)
|
|
|
|
*/
|
|
|
|
if (replace_existing_var)
|
|
|
|
{
|
2014-12-01 22:52:53 +08:00
|
|
|
NC_DIM_INFO_T *d1;
|
2018-03-17 01:46:18 +08:00
|
|
|
int i;
|
2014-12-01 13:37:19 +08:00
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->dim);i++) {
|
|
|
|
if((d1 = (NC_DIM_INFO_T*)ncindexith(grp->dim,i)) == NULL) continue;
|
|
|
|
if (!strcmp(d1->hdr.name, var->hdr.name))
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2014-12-01 22:52:53 +08:00
|
|
|
nc_bool_t exists;
|
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((retval = var_exists(grp->hdf_grpid, var->hdr.name, &exists)))
|
2017-12-03 22:11:51 +08:00
|
|
|
return retval;
|
2014-12-01 22:52:53 +08:00
|
|
|
if (exists)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
hid_t dim_datasetid; /* Dataset ID for dimension */
|
2014-12-01 13:37:19 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Find dataset ID for dimension */
|
|
|
|
if (d1->coord_var)
|
2014-12-01 13:37:19 +08:00
|
|
|
dim_datasetid = d1->coord_var->hdf_datasetid;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2014-12-01 13:37:19 +08:00
|
|
|
dim_datasetid = d1->hdf_dimscaleid;
|
2017-12-03 22:11:51 +08:00
|
|
|
assert(dim_datasetid > 0);
|
2014-12-01 13:37:19 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* If we're replacing an existing dimscale dataset, go to
|
|
|
|
* every var in the file and detach this dimension scale,
|
|
|
|
* because we have to delete it. */
|
|
|
|
if ((retval = rec_detach_scales(grp->nc4_info->root_grp,
|
|
|
|
var->dimids[0], dim_datasetid)))
|
2014-11-24 23:36:58 +08:00
|
|
|
return retval;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-03-17 01:46:18 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If this is not a dimension scale, do this stuff. */
|
|
|
|
if (var->was_coord_var && var->dimscale_attached)
|
|
|
|
{
|
2014-11-24 23:36:58 +08:00
|
|
|
/* If the variable already exists in the file, Remove any dimension scale
|
|
|
|
* attributes from it, if they exist. */
|
|
|
|
/* (The HDF5 Dimension Scale API should really have an API routine
|
|
|
|
* for making a dataset not a scale. -QAK) */
|
|
|
|
if (var->created)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2018-01-31 05:39:47 +08:00
|
|
|
if ((retval = remove_coord_atts(var->hdf_datasetid)))
|
|
|
|
BAIL(retval);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2014-11-24 23:36:58 +08:00
|
|
|
if (var->dimscale_attached)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
int d;
|
2014-12-01 13:37:19 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* If this is a regular var, detach all its dim scales. */
|
|
|
|
for (d = 0; d < var->ndims; d++)
|
2014-11-24 23:36:58 +08:00
|
|
|
if (var->dimscale_attached[d])
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
hid_t dim_datasetid; /* Dataset ID for dimension */
|
|
|
|
NC_DIM_INFO_T *dim1 = var->dim[d];
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(dim1 && dim1->hdr.id == var->dimids[d]);
|
2014-12-01 13:37:19 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Find dataset ID for dimension */
|
|
|
|
if (dim1->coord_var)
|
2016-03-05 00:14:15 +08:00
|
|
|
dim_datasetid = dim1->coord_var->hdf_datasetid;
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2016-03-05 00:14:15 +08:00
|
|
|
dim_datasetid = dim1->hdf_dimscaleid;
|
2017-12-03 22:11:51 +08:00
|
|
|
assert(dim_datasetid > 0);
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
if (H5DSdetach_scale(var->hdf_datasetid, dim_datasetid, d) < 0)
|
2016-03-05 00:14:15 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
var->dimscale_attached[d] = NC_FALSE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Delete the HDF5 dataset that is to be replaced. */
|
|
|
|
if (replace_existing_var)
|
|
|
|
{
|
2014-11-24 23:36:58 +08:00
|
|
|
/* Free the HDF5 dataset id. */
|
2014-12-01 13:37:19 +08:00
|
|
|
if (var->hdf_datasetid && H5Dclose(var->hdf_datasetid) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2014-11-24 23:36:58 +08:00
|
|
|
var->hdf_datasetid = 0;
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2014-11-24 23:36:58 +08:00
|
|
|
/* Now delete the variable. */
|
2018-03-17 01:46:18 +08:00
|
|
|
if (H5Gunlink(grp->hdf_grpid, var->hdr.name) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_EDIMMETA;
|
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Create the dataset. */
|
|
|
|
if (var->is_new_var || replace_existing_var)
|
|
|
|
{
|
2010-06-03 21:24:43 +08:00
|
|
|
if ((retval = var_create_dataset(grp, var, write_dimid)))
|
2017-12-03 22:11:51 +08:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-11-24 23:36:58 +08:00
|
|
|
if (write_dimid && var->ndims)
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((retval = write_netcdf4_dimid(var->hdf_datasetid, var->dimids[0])))
|
|
|
|
BAIL(retval);
|
|
|
|
}
|
2014-11-24 23:36:58 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
if (replace_existing_var)
|
|
|
|
{
|
2014-12-28 09:10:33 +08:00
|
|
|
/* If this is a dimension scale, reattach the scale everywhere it
|
|
|
|
* is used. (Recall that netCDF dimscales are always 1-D). */
|
|
|
|
if(var->dimscale)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if ((retval = rec_reattach_scales(grp->nc4_info->root_grp,
|
|
|
|
var->dimids[0], var->hdf_datasetid)))
|
2014-12-28 09:10:33 +08:00
|
|
|
return retval;
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2014-12-28 09:10:33 +08:00
|
|
|
/* If it's not a dimension scale, clear the dimscale attached flags,
|
|
|
|
* so the dimensions are re-attached. */
|
|
|
|
else
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if (var->dimscale_attached)
|
2014-12-28 09:10:33 +08:00
|
|
|
memset(var->dimscale_attached, 0, sizeof(nc_bool_t) * var->ndims);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
}
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Clear coord. var state transition flags */
|
|
|
|
var->was_coord_var = NC_FALSE;
|
|
|
|
var->became_coord_var = NC_FALSE;
|
2015-05-15 07:22:07 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Now check the attributes for this var. */
|
|
|
|
if (var->attr_dirty)
|
|
|
|
{
|
2014-12-01 13:37:19 +08:00
|
|
|
/* Write attributes for this var. */
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((retval = write_attlist(var->att, var->hdr.id, grp)))
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(retval);
|
2014-12-01 13:37:19 +08:00
|
|
|
var->attr_dirty = NC_FALSE;
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
|
|
|
exit:
|
|
|
|
return retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal Write a dimension.
|
|
|
|
*
|
|
|
|
* @param dim Pointer to dim info struct.
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
* @param write_dimid
|
|
|
|
*
|
|
|
|
* @returns ::NC_NOERR No error.
|
|
|
|
* @returns ::NC_EPERM Read-only file.
|
|
|
|
* @returns ::NC_EHDFERR HDF5 returned error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static int
|
2014-11-24 23:36:58 +08:00
|
|
|
write_dim(NC_DIM_INFO_T *dim, NC_GRP_INFO_T *grp, nc_bool_t write_dimid)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
int retval;
|
|
|
|
|
|
|
|
/* If there's no dimscale dataset for this dim, create one,
|
|
|
|
* and mark that it should be hidden from netCDF as a
|
|
|
|
* variable. (That is, it should appear as a dimension
|
|
|
|
* without an associated variable.) */
|
|
|
|
if (0 == dim->hdf_dimscaleid)
|
|
|
|
{
|
2013-12-01 13:20:28 +08:00
|
|
|
hid_t spaceid, create_propid;
|
2014-11-12 06:17:08 +08:00
|
|
|
hsize_t dims[1], max_dims[1], chunk_dims[1] = {1};
|
2013-12-01 13:20:28 +08:00
|
|
|
char dimscale_wo_var[NC_MAX_NAME];
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((4, "%s: creating dim %s", __func__, dim->hdr.name));
|
2013-12-01 13:20:28 +08:00
|
|
|
|
|
|
|
/* Sanity check */
|
|
|
|
assert(NULL == dim->coord_var);
|
|
|
|
|
|
|
|
/* Create a property list. If this dimension scale is
|
|
|
|
* unlimited (i.e. it's an unlimited dimension), then set
|
|
|
|
* up chunking, with a chunksize of 1. */
|
|
|
|
if ((create_propid = H5Pcreate(H5P_DATASET_CREATE)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2014-02-11 06:29:14 +08:00
|
|
|
|
|
|
|
/* RJ: this suppose to be FALSE that is defined in H5 private.h as 0 */
|
|
|
|
if (H5Pset_obj_track_times(create_propid,0)<0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2014-02-11 06:29:14 +08:00
|
|
|
|
2013-12-01 13:20:28 +08:00
|
|
|
dims[0] = dim->len;
|
|
|
|
max_dims[0] = dim->len;
|
2014-11-12 06:17:08 +08:00
|
|
|
if (dim->unlimited)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
max_dims[0] = H5S_UNLIMITED;
|
|
|
|
if (H5Pset_chunk(create_propid, 1, chunk_dims) < 0)
|
2010-06-03 21:24:43 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2013-12-01 13:20:28 +08:00
|
|
|
|
|
|
|
/* Set up space. */
|
2014-11-12 06:17:08 +08:00
|
|
|
if ((spaceid = H5Screate_simple(1, dims, max_dims)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2013-12-01 13:20:28 +08:00
|
|
|
if (H5Pset_attr_creation_order(create_propid, H5P_CRT_ORDER_TRACKED|
|
|
|
|
H5P_CRT_ORDER_INDEXED) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2013-12-01 13:20:28 +08:00
|
|
|
/* Create the dataset that will be the dimension scale. */
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((4, "%s: about to H5Dcreate1 a dimscale dataset %s", __func__, dim->hdr.name));
|
|
|
|
if ((dim->hdf_dimscaleid = H5Dcreate1(grp->hdf_grpid, dim->hdr.name, H5T_IEEE_F32BE,
|
2013-12-01 13:20:28 +08:00
|
|
|
spaceid, create_propid)) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2013-12-01 13:20:28 +08:00
|
|
|
/* Close the spaceid and create_propid. */
|
|
|
|
if (H5Sclose(spaceid) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2013-12-01 13:20:28 +08:00
|
|
|
if (H5Pclose(create_propid) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2013-12-01 13:20:28 +08:00
|
|
|
/* Indicate that this is a scale. Also indicate that not
|
|
|
|
* be shown to the user as a variable. It is hidden. It is
|
|
|
|
* a DIM WITHOUT A VARIABLE! */
|
|
|
|
sprintf(dimscale_wo_var, "%s%10d", DIM_WITHOUT_VARIABLE, (int)dim->len);
|
|
|
|
if (H5DSset_scale(dim->hdf_dimscaleid, dimscale_wo_var) < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Did we extend an unlimited dimension? */
|
|
|
|
if (dim->extended)
|
|
|
|
{
|
2016-07-07 22:28:24 +08:00
|
|
|
NC_VAR_INFO_T *v1 = NULL;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
assert(dim->unlimited);
|
|
|
|
/* If this is a dimension without a variable, then update
|
|
|
|
* the secret length information at the end of the NAME
|
|
|
|
* attribute. */
|
2018-03-17 01:46:18 +08:00
|
|
|
v1 = (NC_VAR_INFO_T*)ncindexlookup(grp->vars,dim->hdr.name);
|
2010-06-03 21:24:43 +08:00
|
|
|
if (v1)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
hsize_t *new_size = NULL;
|
|
|
|
int d1;
|
2014-11-12 06:24:38 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Extend the dimension scale dataset to reflect the new
|
|
|
|
* length of the dimension. */
|
|
|
|
if (!(new_size = malloc(v1->ndims * sizeof(hsize_t))))
|
2010-06-03 21:24:43 +08:00
|
|
|
BAIL(NC_ENOMEM);
|
2017-12-03 22:11:51 +08:00
|
|
|
for (d1 = 0; d1 < v1->ndims; d1++)
|
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(v1->dim[d1] && v1->dim[d1]->hdr.id == v1->dimids[d1]);
|
2017-12-03 22:11:51 +08:00
|
|
|
new_size[d1] = v1->dim[d1]->len;
|
|
|
|
}
|
|
|
|
if (H5Dset_extent(v1->hdf_datasetid, new_size) < 0) {
|
2014-11-12 06:24:38 +08:00
|
|
|
free(new_size);
|
|
|
|
BAIL(NC_EHDFERR);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
free(new_size);
|
|
|
|
}
|
|
|
|
}
|
2010-07-08 21:39:47 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* If desired, write the secret dimid. This will be used instead of
|
|
|
|
* the dimid that the dimension would otherwise receive based on
|
|
|
|
* creation order. This can be necessary when dims and their
|
|
|
|
* coordinate variables were created in different order. */
|
|
|
|
if (write_dimid && dim->hdf_dimscaleid)
|
2018-03-17 01:46:18 +08:00
|
|
|
if ((retval = write_netcdf4_dimid(dim->hdf_dimscaleid, dim->hdr.id)))
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(retval);
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
|
|
|
exit:
|
|
|
|
|
|
|
|
return retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal Recursively determine if there is a mismatch between
|
|
|
|
* order of coordinate creation and associated dimensions in this
|
|
|
|
* group or any subgroups, to find out if we have to handle that
|
|
|
|
* situation. Also check if there are any multidimensional coordinate
|
|
|
|
* variables defined, which require the same treatment to fix a
|
2018-06-09 01:18:08 +08:00
|
|
|
* potential bug when such variables occur in subgroups.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
* @param bad_coord_orderp Pointer that gets 1 if there is a bad
|
|
|
|
* coordinate order.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned an error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
int
|
2014-11-24 23:36:58 +08:00
|
|
|
nc4_rec_detect_need_to_preserve_dimids(NC_GRP_INFO_T *grp, nc_bool_t *bad_coord_orderp)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_VAR_INFO_T *var;
|
|
|
|
NC_GRP_INFO_T *child_grp;
|
|
|
|
int last_dimid = -1;
|
|
|
|
int retval;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Iterate over variables in this group */
|
2018-03-17 01:46:18 +08:00
|
|
|
for (i=0; i < ncindexsize(grp->vars); i++)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,i);
|
|
|
|
if (var == NULL) continue;
|
2013-12-01 13:20:28 +08:00
|
|
|
/* Only matters for dimension scale variables, with non-scalar dimensionality */
|
|
|
|
if (var->dimscale && var->ndims)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
/* If the user writes coord vars in a different order then he
|
|
|
|
* defined their dimensions, then, when the file is reopened, the
|
|
|
|
* order of the dimids will change to match the order of the coord
|
|
|
|
* vars. Detect if this is about to happen. */
|
|
|
|
if (var->dimids[0] < last_dimid)
|
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((5, "%s: %s is out of order coord var", __func__, var->hdr.name));
|
2017-12-03 22:11:51 +08:00
|
|
|
*bad_coord_orderp = NC_TRUE;
|
|
|
|
return NC_NOERR;
|
|
|
|
}
|
|
|
|
last_dimid = var->dimids[0];
|
|
|
|
|
|
|
|
/* If there are multidimensional coordinate variables defined, then
|
|
|
|
* it's also necessary to preserve dimension IDs when the file is
|
|
|
|
* reopened ... */
|
|
|
|
if (var->ndims > 1)
|
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((5, "%s: %s is multidimensional coord var", __func__, var->hdr.name));
|
2017-12-03 22:11:51 +08:00
|
|
|
*bad_coord_orderp = NC_TRUE;
|
|
|
|
return NC_NOERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Did the user define a dimension, end define mode, reenter define
|
|
|
|
* mode, and then define a coordinate variable for that dimension?
|
|
|
|
* If so, dimensions will be out of order. */
|
|
|
|
if (var->is_new_var || var->became_coord_var)
|
|
|
|
{
|
|
|
|
LOG((5, "%s: coord var defined after enddef/redef", __func__));
|
|
|
|
*bad_coord_orderp = NC_TRUE;
|
|
|
|
return NC_NOERR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-04-23 06:34:21 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* If there are any child groups, check them also for this condition. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->children); i++) {
|
|
|
|
if((child_grp = (NC_GRP_INFO_T*)ncindexith(grp->children,i)) == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((retval = nc4_rec_detect_need_to_preserve_dimids(child_grp, bad_coord_orderp)))
|
|
|
|
return retval;
|
2018-03-17 01:46:18 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
2013-03-26 23:14:19 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal Recursively write all the metadata in a group. Groups and
|
|
|
|
* types have all already been written. Propagate bad cooordinate
|
|
|
|
* order to subgroups, if detected.
|
|
|
|
*
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
* @param bad_coord_order 1 if there is a bad coordinate order.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned an error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2013-03-26 23:14:19 +08:00
|
|
|
int
|
2014-11-24 23:36:58 +08:00
|
|
|
nc4_rec_write_metadata(NC_GRP_INFO_T *grp, nc_bool_t bad_coord_order)
|
2013-03-26 23:14:19 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_DIM_INFO_T *dim = NULL;
|
|
|
|
NC_VAR_INFO_T *var = NULL;
|
|
|
|
NC_GRP_INFO_T *child_grp = NULL;
|
|
|
|
int coord_varid = -1;
|
|
|
|
int var_index = 0;
|
2018-03-17 01:46:18 +08:00
|
|
|
int dim_index = 0;
|
2017-12-03 22:11:51 +08:00
|
|
|
int retval;
|
2018-03-17 01:46:18 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
assert(grp && grp->hdr.name && grp->hdf_grpid);
|
|
|
|
LOG((3, "%s: grp->hdr.name %s, bad_coord_order %d", __func__, grp->hdr.name, bad_coord_order));
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* Write global attributes for this group. */
|
|
|
|
if ((retval = write_attlist(grp->att, NC_GLOBAL, grp)))
|
|
|
|
return retval;
|
|
|
|
/* Set the pointers to the beginning of the list of dims & vars in this
|
|
|
|
* group. */
|
2018-03-17 01:46:18 +08:00
|
|
|
dim_index = 0;
|
|
|
|
var_index = 0;
|
|
|
|
/* prime the loop */
|
|
|
|
dim = (NC_DIM_INFO_T*)ncindexith(grp->dim,dim_index);
|
|
|
|
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,var_index);
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Because of HDF5 ordering the dims and vars have to be stored in
|
|
|
|
* this way to ensure that the dims and coordinate vars come out in
|
|
|
|
* the correct order. */
|
|
|
|
while (dim || var)
|
|
|
|
{
|
2014-12-01 13:37:19 +08:00
|
|
|
nc_bool_t found_coord, wrote_coord;
|
2013-03-26 23:14:19 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
/* Write non-coord dims in order, stopping at the first one that
|
|
|
|
* has an associated coord var. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for (found_coord = NC_FALSE; dim && !found_coord; )
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if (!dim->coord_var)
|
|
|
|
{
|
|
|
|
if ((retval = write_dim(dim, grp, bad_coord_order)))
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
coord_varid = dim->coord_var->hdr.id;
|
2017-12-03 22:11:51 +08:00
|
|
|
found_coord = NC_TRUE;
|
|
|
|
}
|
2018-03-17 01:46:18 +08:00
|
|
|
dim = (NC_DIM_INFO_T*)ncindexith(grp->dim,++dim_index);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* Write each var. When we get to the coord var we are waiting
|
|
|
|
* for (if any), then we break after writing it. */
|
2016-07-08 00:33:56 +08:00
|
|
|
for (wrote_coord = NC_FALSE; var && !wrote_coord; )
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if ((retval = write_var(var, grp, bad_coord_order)))
|
2010-06-03 21:24:43 +08:00
|
|
|
return retval;
|
2018-03-17 01:46:18 +08:00
|
|
|
if (found_coord && var->hdr.id == coord_varid)
|
2014-11-24 23:36:58 +08:00
|
|
|
wrote_coord = NC_TRUE;
|
2018-03-17 01:46:18 +08:00
|
|
|
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,++var_index);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
} /* end while */
|
|
|
|
|
|
|
|
if ((retval = attach_dimscales(grp)))
|
2010-06-03 21:24:43 +08:00
|
|
|
return retval;
|
2014-11-12 06:17:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* If there are any child groups, write their metadata. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->children);i++) {
|
|
|
|
if((child_grp = (NC_GRP_INFO_T*)ncindexith(grp->children,i)) == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((retval = nc4_rec_write_metadata(child_grp, bad_coord_order)))
|
|
|
|
return retval;
|
2018-03-17 01:46:18 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
2018-06-09 01:18:08 +08:00
|
|
|
* @internal Recursively write all groups and types.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned an error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
int
|
2013-12-01 13:20:28 +08:00
|
|
|
nc4_rec_write_groups_types(NC_GRP_INFO_T *grp)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_GRP_INFO_T *child_grp;
|
|
|
|
NC_TYPE_INFO_T *type;
|
|
|
|
int retval;
|
2018-03-17 01:46:18 +08:00
|
|
|
int i;
|
2017-12-03 22:11:51 +08:00
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(grp && grp->hdr.name);
|
|
|
|
LOG((3, "%s: grp->hdr.name %s", __func__, grp->hdr.name));
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* Create the group in the HDF5 file if it doesn't exist. */
|
|
|
|
if (!grp->hdf_grpid)
|
|
|
|
if ((retval = create_group(grp)))
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
/* If this is the root group of a file with strict NC3 rules, write
|
|
|
|
* an attribute. But don't leave the attribute open. */
|
|
|
|
if (!grp->parent && (grp->nc4_info->cmode & NC_CLASSIC_MODEL))
|
|
|
|
if ((retval = write_nc3_strict_att(grp->hdf_grpid)))
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
/* If there are any user-defined types, write them now. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->type);i++) {
|
|
|
|
if((type = (NC_TYPE_INFO_T*)ncindexith(grp->type,i)) == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((retval = commit_type(grp, type)))
|
|
|
|
return retval;
|
2018-03-17 01:46:18 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* If there are any child groups, write their groups and types. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->children);i++) {
|
|
|
|
if((child_grp = (NC_GRP_INFO_T*)ncindexith(grp->children,i)) == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((retval = nc4_rec_write_groups_types(child_grp)))
|
|
|
|
return retval;
|
2018-03-17 01:46:18 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Copy data from one buffer to another, performing
|
|
|
|
* appropriate data conversion.
|
|
|
|
*
|
|
|
|
* This function will copy data from one buffer to another, in
|
|
|
|
* accordance with the types. Range errors will be noted, and the fill
|
|
|
|
* value used (or the default fill value if none is supplied) for
|
|
|
|
* values that overflow the type.
|
|
|
|
*
|
|
|
|
* @note I should be able to take this out when HDF5 does the right thing
|
|
|
|
* with data type conversion. Ed Hartnett, 11/15/3
|
|
|
|
*
|
|
|
|
* @param src Pointer to source of data.
|
|
|
|
* @param dest Pointer that gets data.
|
|
|
|
* @param src_type Type ID of source data.
|
|
|
|
* @param dest_type Type ID of destination data.
|
|
|
|
* @param len Number of elements of data to copy.
|
|
|
|
* @param range_error Pointer that gets 1 if there was a range error.
|
|
|
|
* @param fill_value The fill value.
|
|
|
|
* @param strict_nc3 Non-zero if strict model in effect.
|
2018-06-09 05:50:39 +08:00
|
|
|
* @param src_long Is the source NC_LONG?
|
|
|
|
* @param dest_long Is the destination NC_LONG?
|
2017-12-03 22:11:51 +08:00
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EBADTYPE Type not found.
|
2018-06-09 05:50:39 +08:00
|
|
|
* @author Ed Hartnett, Dennis Heimbigner
|
2010-06-03 21:24:43 +08:00
|
|
|
*/
|
|
|
|
int
|
2018-06-09 05:50:39 +08:00
|
|
|
nc4_convert_type(const void *src, void *dest,
|
|
|
|
const nc_type src_type, const nc_type dest_type,
|
|
|
|
const size_t len, int *range_error,
|
|
|
|
const void *fill_value, int strict_nc3, int src_long,
|
|
|
|
int dest_long)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
char *cp, *cp1;
|
|
|
|
float *fp, *fp1;
|
|
|
|
double *dp, *dp1;
|
|
|
|
int *ip, *ip1;
|
2018-06-09 05:50:39 +08:00
|
|
|
signed long *lp, *lp1;
|
2017-12-03 22:11:51 +08:00
|
|
|
short *sp, *sp1;
|
|
|
|
signed char *bp, *bp1;
|
|
|
|
unsigned char *ubp, *ubp1;
|
|
|
|
unsigned short *usp, *usp1;
|
|
|
|
unsigned int *uip, *uip1;
|
|
|
|
long long *lip, *lip1;
|
|
|
|
unsigned long long *ulip, *ulip1;
|
|
|
|
size_t count = 0;
|
|
|
|
|
|
|
|
*range_error = 0;
|
2018-06-09 05:50:39 +08:00
|
|
|
LOG((3, "%s: len %d src_type %d dest_type %d src_long %d dest_long %d",
|
|
|
|
__func__, len, src_type, dest_type, src_long, dest_long));
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* OK, this is ugly. If you can think of anything better, I'm open
|
|
|
|
to suggestions!
|
|
|
|
|
|
|
|
Note that we don't use a default fill value for type
|
|
|
|
NC_BYTE. This is because Lord Voldemort cast a nofilleramous spell
|
|
|
|
at Harry Potter, but it bounced off his scar and hit the netcdf-4
|
|
|
|
code.
|
|
|
|
*/
|
|
|
|
switch (src_type)
|
|
|
|
{
|
|
|
|
case NC_CHAR:
|
2014-11-12 06:24:38 +08:00
|
|
|
switch (dest_type)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
case NC_CHAR:
|
|
|
|
for (cp = (char *)src, cp1 = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*cp1++ = *cp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
default:
|
2018-04-24 06:38:08 +08:00
|
|
|
LOG((0, "%s: Unknown destination type.", __func__));
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
break;
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_BYTE:
|
2014-11-12 06:24:38 +08:00
|
|
|
switch (dest_type)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
case NC_BYTE:
|
|
|
|
for (bp = (signed char *)src, bp1 = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*bp1++ = *bp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_UBYTE:
|
|
|
|
for (bp = (signed char *)src, ubp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*bp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*ubp++ = *bp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_SHORT:
|
|
|
|
for (bp = (signed char *)src, sp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*sp++ = *bp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_USHORT:
|
|
|
|
for (bp = (signed char *)src, usp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*bp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*usp++ = *bp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT:
|
2018-06-09 05:50:39 +08:00
|
|
|
if (dest_long)
|
|
|
|
{
|
|
|
|
for (bp = (signed char *)src, lp = dest; count < len; count++)
|
|
|
|
*lp++ = *bp++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (bp = (signed char *)src, ip = dest; count < len; count++)
|
|
|
|
*ip++ = *bp++;
|
|
|
|
break;
|
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_UINT:
|
|
|
|
for (bp = (signed char *)src, uip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*bp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*uip++ = *bp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT64:
|
|
|
|
for (bp = (signed char *)src, lip = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*lip++ = *bp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_UINT64:
|
|
|
|
for (bp = (signed char *)src, ulip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*bp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*ulip++ = *bp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_FLOAT:
|
|
|
|
for (bp = (signed char *)src, fp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*fp++ = *bp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_DOUBLE:
|
|
|
|
for (bp = (signed char *)src, dp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*dp++ = *bp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
|
|
|
|
__func__, src_type, dest_type));
|
|
|
|
return NC_EBADTYPE;
|
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_UBYTE:
|
2014-11-12 06:24:38 +08:00
|
|
|
switch (dest_type)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
case NC_BYTE:
|
|
|
|
for (ubp = (unsigned char *)src, bp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (!strict_nc3 && *ubp > X_SCHAR_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*bp++ = *ubp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_SHORT:
|
|
|
|
for (ubp = (unsigned char *)src, sp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*sp++ = *ubp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_UBYTE:
|
|
|
|
for (ubp = (unsigned char *)src, ubp1 = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*ubp1++ = *ubp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_USHORT:
|
|
|
|
for (ubp = (unsigned char *)src, usp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*usp++ = *ubp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_INT:
|
2018-06-09 05:50:39 +08:00
|
|
|
if (dest_long)
|
|
|
|
{
|
|
|
|
for (ubp = (unsigned char *)src, lp = dest; count < len; count++)
|
|
|
|
*lp++ = *ubp++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (ubp = (unsigned char *)src, ip = dest; count < len; count++)
|
|
|
|
*ip++ = *ubp++;
|
|
|
|
break;
|
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_UINT:
|
|
|
|
for (ubp = (unsigned char *)src, uip = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*uip++ = *ubp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_INT64:
|
|
|
|
for (ubp = (unsigned char *)src, lip = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*lip++ = *ubp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_UINT64:
|
|
|
|
for (ubp = (unsigned char *)src, ulip = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*ulip++ = *ubp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_FLOAT:
|
|
|
|
for (ubp = (unsigned char *)src, fp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*fp++ = *ubp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_DOUBLE:
|
|
|
|
for (ubp = (unsigned char *)src, dp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*dp++ = *ubp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
|
|
|
|
__func__, src_type, dest_type));
|
|
|
|
return NC_EBADTYPE;
|
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
break;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_SHORT:
|
2014-11-12 06:24:38 +08:00
|
|
|
switch (dest_type)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
case NC_UBYTE:
|
|
|
|
for (sp = (short *)src, ubp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*sp > X_UCHAR_MAX || *sp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*ubp++ = *sp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_BYTE:
|
|
|
|
for (sp = (short *)src, bp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*sp > X_SCHAR_MAX || *sp < X_SCHAR_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*bp++ = *sp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_SHORT:
|
|
|
|
for (sp = (short *)src, sp1 = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*sp1++ = *sp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_USHORT:
|
|
|
|
for (sp = (short *)src, usp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*sp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*usp++ = *sp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT:
|
2018-06-09 05:50:39 +08:00
|
|
|
if (dest_long)
|
|
|
|
for (sp = (short *)src, lp = dest; count < len; count++)
|
|
|
|
*lp++ = *sp++;
|
|
|
|
else
|
|
|
|
for (sp = (short *)src, ip = dest; count < len; count++)
|
|
|
|
*ip++ = *sp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_UINT:
|
|
|
|
for (sp = (short *)src, uip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*sp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*uip++ = *sp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT64:
|
|
|
|
for (sp = (short *)src, lip = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*lip++ = *sp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_UINT64:
|
|
|
|
for (sp = (short *)src, ulip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*sp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*ulip++ = *sp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_FLOAT:
|
|
|
|
for (sp = (short *)src, fp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*fp++ = *sp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_DOUBLE:
|
|
|
|
for (sp = (short *)src, dp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*dp++ = *sp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
|
|
|
|
__func__, src_type, dest_type));
|
|
|
|
return NC_EBADTYPE;
|
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
break;
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_USHORT:
|
2014-11-12 06:24:38 +08:00
|
|
|
switch (dest_type)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
case NC_UBYTE:
|
|
|
|
for (usp = (unsigned short *)src, ubp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*usp > X_UCHAR_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*ubp++ = *usp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_BYTE:
|
|
|
|
for (usp = (unsigned short *)src, bp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*usp > X_SCHAR_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*bp++ = *usp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_SHORT:
|
|
|
|
for (usp = (unsigned short *)src, sp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*usp > X_SHORT_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*sp++ = *usp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_USHORT:
|
|
|
|
for (usp = (unsigned short *)src, usp1 = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*usp1++ = *usp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_INT:
|
2018-06-09 05:50:39 +08:00
|
|
|
if (dest_long)
|
|
|
|
for (usp = (unsigned short *)src, lp = dest; count < len; count++)
|
|
|
|
*lp++ = *usp++;
|
|
|
|
else
|
|
|
|
for (usp = (unsigned short *)src, ip = dest; count < len; count++)
|
|
|
|
*ip++ = *usp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_UINT:
|
|
|
|
for (usp = (unsigned short *)src, uip = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*uip++ = *usp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_INT64:
|
|
|
|
for (usp = (unsigned short *)src, lip = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*lip++ = *usp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_UINT64:
|
|
|
|
for (usp = (unsigned short *)src, ulip = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*ulip++ = *usp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_FLOAT:
|
|
|
|
for (usp = (unsigned short *)src, fp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*fp++ = *usp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_DOUBLE:
|
|
|
|
for (usp = (unsigned short *)src, dp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*dp++ = *usp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
|
|
|
|
__func__, src_type, dest_type));
|
|
|
|
return NC_EBADTYPE;
|
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
break;
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_INT:
|
2018-06-09 05:50:39 +08:00
|
|
|
if (src_long)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2018-06-09 05:50:39 +08:00
|
|
|
switch (dest_type)
|
2018-06-06 04:40:49 +08:00
|
|
|
{
|
2018-06-09 05:50:39 +08:00
|
|
|
case NC_UBYTE:
|
|
|
|
for (lp = (long *)src, ubp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lp > X_UCHAR_MAX || *lp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*ubp++ = *lp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_BYTE:
|
|
|
|
for (lp = (long *)src, bp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lp > X_SCHAR_MAX || *lp < X_SCHAR_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*bp++ = *lp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_SHORT:
|
|
|
|
for (lp = (long *)src, sp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lp > X_SHORT_MAX || *lp < X_SHORT_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*sp++ = *lp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_USHORT:
|
|
|
|
for (lp = (long *)src, usp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lp > X_USHORT_MAX || *lp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*usp++ = *lp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT: /* src is long */
|
|
|
|
if (dest_long)
|
|
|
|
{
|
|
|
|
for (lp = (long *)src, lp1 = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lp > X_LONG_MAX || *lp < X_LONG_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*lp1++ = *lp++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else /* dest is int */
|
|
|
|
{
|
|
|
|
for (lp = (long *)src, ip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lp > X_INT_MAX || *lp < X_INT_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*ip++ = *lp++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_UINT:
|
|
|
|
for (lp = (long *)src, uip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lp > X_UINT_MAX || *lp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*uip++ = *lp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT64:
|
|
|
|
for (lp = (long *)src, lip = dest; count < len; count++)
|
|
|
|
*lip++ = *lp++;
|
|
|
|
break;
|
|
|
|
case NC_UINT64:
|
|
|
|
for (lp = (long *)src, ulip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*ulip++ = *lp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_FLOAT:
|
|
|
|
for (lp = (long *)src, fp = dest; count < len; count++)
|
|
|
|
*fp++ = *lp++;
|
|
|
|
break;
|
|
|
|
case NC_DOUBLE:
|
|
|
|
for (lp = (long *)src, dp = dest; count < len; count++)
|
|
|
|
*dp++ = *lp++;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
|
|
|
|
__func__, src_type, dest_type));
|
|
|
|
return NC_EBADTYPE;
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2018-06-09 05:50:39 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
switch (dest_type)
|
2018-06-06 04:40:49 +08:00
|
|
|
{
|
2018-06-09 05:50:39 +08:00
|
|
|
case NC_UBYTE:
|
|
|
|
for (ip = (int *)src, ubp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ip > X_UCHAR_MAX || *ip < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*ubp++ = *ip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_BYTE:
|
|
|
|
for (ip = (int *)src, bp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ip > X_SCHAR_MAX || *ip < X_SCHAR_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*bp++ = *ip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_SHORT:
|
|
|
|
for (ip = (int *)src, sp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ip > X_SHORT_MAX || *ip < X_SHORT_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*sp++ = *ip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_USHORT:
|
|
|
|
for (ip = (int *)src, usp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ip > X_USHORT_MAX || *ip < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*usp++ = *ip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT: /* src is int */
|
|
|
|
if (dest_long)
|
|
|
|
{
|
|
|
|
for (ip = (int *)src, lp1 = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ip > X_LONG_MAX || *ip < X_LONG_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*lp1++ = *ip++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else /* dest is int */
|
|
|
|
{
|
|
|
|
for (ip = (int *)src, ip1 = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ip > X_INT_MAX || *ip < X_INT_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*ip1++ = *ip++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_UINT:
|
|
|
|
for (ip = (int *)src, uip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ip > X_UINT_MAX || *ip < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*uip++ = *ip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT64:
|
|
|
|
for (ip = (int *)src, lip = dest; count < len; count++)
|
|
|
|
*lip++ = *ip++;
|
|
|
|
break;
|
|
|
|
case NC_UINT64:
|
|
|
|
for (ip = (int *)src, ulip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ip < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*ulip++ = *ip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_FLOAT:
|
|
|
|
for (ip = (int *)src, fp = dest; count < len; count++)
|
|
|
|
*fp++ = *ip++;
|
|
|
|
break;
|
|
|
|
case NC_DOUBLE:
|
|
|
|
for (ip = (int *)src, dp = dest; count < len; count++)
|
|
|
|
*dp++ = *ip++;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
|
|
|
|
__func__, src_type, dest_type));
|
|
|
|
return NC_EBADTYPE;
|
2018-06-06 04:40:49 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
break;
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_UINT:
|
2014-11-12 06:24:38 +08:00
|
|
|
switch (dest_type)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
case NC_UBYTE:
|
|
|
|
for (uip = (unsigned int *)src, ubp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*uip > X_UCHAR_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*ubp++ = *uip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_BYTE:
|
|
|
|
for (uip = (unsigned int *)src, bp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*uip > X_SCHAR_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*bp++ = *uip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_SHORT:
|
|
|
|
for (uip = (unsigned int *)src, sp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*uip > X_SHORT_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*sp++ = *uip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_USHORT:
|
|
|
|
for (uip = (unsigned int *)src, usp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*uip > X_USHORT_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*usp++ = *uip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT:
|
2018-06-09 05:50:39 +08:00
|
|
|
if (dest_long)
|
|
|
|
for (uip = (unsigned int *)src, lp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*uip > X_LONG_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*lp++ = *uip++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
for (uip = (unsigned int *)src, ip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*uip > X_INT_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*ip++ = *uip++;
|
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_UINT:
|
|
|
|
for (uip = (unsigned int *)src, uip1 = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*uip > X_UINT_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*uip1++ = *uip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT64:
|
|
|
|
for (uip = (unsigned int *)src, lip = dest; count < len; count++)
|
|
|
|
*lip++ = *uip++;
|
|
|
|
break;
|
|
|
|
case NC_UINT64:
|
|
|
|
for (uip = (unsigned int *)src, ulip = dest; count < len; count++)
|
|
|
|
*ulip++ = *uip++;
|
|
|
|
break;
|
|
|
|
case NC_FLOAT:
|
|
|
|
for (uip = (unsigned int *)src, fp = dest; count < len; count++)
|
|
|
|
*fp++ = *uip++;
|
|
|
|
break;
|
|
|
|
case NC_DOUBLE:
|
|
|
|
for (uip = (unsigned int *)src, dp = dest; count < len; count++)
|
|
|
|
*dp++ = *uip++;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
|
|
|
|
__func__, src_type, dest_type));
|
|
|
|
return NC_EBADTYPE;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NC_INT64:
|
|
|
|
switch (dest_type)
|
|
|
|
{
|
|
|
|
case NC_UBYTE:
|
|
|
|
for (lip = (long long *)src, ubp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lip > X_UCHAR_MAX || *lip < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*ubp++ = *lip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_BYTE:
|
|
|
|
for (lip = (long long *)src, bp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lip > X_SCHAR_MAX || *lip < X_SCHAR_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*bp++ = *lip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_SHORT:
|
|
|
|
for (lip = (long long *)src, sp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lip > X_SHORT_MAX || *lip < X_SHORT_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*sp++ = *lip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_USHORT:
|
|
|
|
for (lip = (long long *)src, usp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lip > X_USHORT_MAX || *lip < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*usp++ = *lip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_UINT:
|
|
|
|
for (lip = (long long *)src, uip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lip > X_UINT_MAX || *lip < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*uip++ = *lip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT:
|
2018-06-09 05:50:39 +08:00
|
|
|
if (dest_long)
|
|
|
|
for (lip = (long long *)src, lp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lip > X_LONG_MAX || *lip < X_LONG_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*lp++ = *lip++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
for (lip = (long long *)src, ip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lip > X_INT_MAX || *lip < X_INT_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*ip++ = *lip++;
|
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_INT64:
|
|
|
|
for (lip = (long long *)src, lip1 = dest; count < len; count++)
|
|
|
|
*lip1++ = *lip++;
|
|
|
|
break;
|
|
|
|
case NC_UINT64:
|
|
|
|
for (lip = (long long *)src, ulip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*lip < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*ulip++ = *lip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_FLOAT:
|
|
|
|
for (lip = (long long *)src, fp = dest; count < len; count++)
|
|
|
|
*fp++ = *lip++;
|
|
|
|
break;
|
|
|
|
case NC_DOUBLE:
|
|
|
|
for (lip = (long long *)src, dp = dest; count < len; count++)
|
|
|
|
*dp++ = *lip++;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
|
|
|
|
__func__, src_type, dest_type));
|
|
|
|
return NC_EBADTYPE;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NC_UINT64:
|
|
|
|
switch (dest_type)
|
|
|
|
{
|
|
|
|
case NC_UBYTE:
|
|
|
|
for (ulip = (unsigned long long *)src, ubp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ulip > X_UCHAR_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*ubp++ = *ulip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_BYTE:
|
|
|
|
for (ulip = (unsigned long long *)src, bp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ulip > X_SCHAR_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*bp++ = *ulip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_SHORT:
|
|
|
|
for (ulip = (unsigned long long *)src, sp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ulip > X_SHORT_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*sp++ = *ulip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_USHORT:
|
|
|
|
for (ulip = (unsigned long long *)src, usp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ulip > X_USHORT_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*usp++ = *ulip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_UINT:
|
|
|
|
for (ulip = (unsigned long long *)src, uip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ulip > X_UINT_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*uip++ = *ulip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT:
|
2018-06-09 05:50:39 +08:00
|
|
|
if (dest_long)
|
|
|
|
for (ulip = (unsigned long long *)src, lp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ulip > X_LONG_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*lp++ = *ulip++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
for (ulip = (unsigned long long *)src, ip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ulip > X_INT_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*ip++ = *ulip++;
|
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_INT64:
|
|
|
|
for (ulip = (unsigned long long *)src, lip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*ulip > X_INT64_MAX)
|
|
|
|
(*range_error)++;
|
|
|
|
*lip++ = *ulip++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_UINT64:
|
|
|
|
for (ulip = (unsigned long long *)src, ulip1 = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*ulip1++ = *ulip++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_FLOAT:
|
|
|
|
for (ulip = (unsigned long long *)src, fp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*fp++ = *ulip++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_DOUBLE:
|
|
|
|
for (ulip = (unsigned long long *)src, dp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*dp++ = *ulip++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
|
|
|
|
__func__, src_type, dest_type));
|
|
|
|
return NC_EBADTYPE;
|
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
break;
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_FLOAT:
|
2014-11-12 06:24:38 +08:00
|
|
|
switch (dest_type)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
case NC_UBYTE:
|
|
|
|
for (fp = (float *)src, ubp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*fp > X_UCHAR_MAX || *fp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*ubp++ = *fp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_BYTE:
|
|
|
|
for (fp = (float *)src, bp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*fp > (double)X_SCHAR_MAX || *fp < (double)X_SCHAR_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*bp++ = *fp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_SHORT:
|
|
|
|
for (fp = (float *)src, sp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*fp > (double)X_SHORT_MAX || *fp < (double)X_SHORT_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*sp++ = *fp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_USHORT:
|
|
|
|
for (fp = (float *)src, usp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*fp > X_USHORT_MAX || *fp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*usp++ = *fp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_UINT:
|
|
|
|
for (fp = (float *)src, uip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*fp > X_UINT_MAX || *fp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*uip++ = *fp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT:
|
2018-06-09 05:50:39 +08:00
|
|
|
if (dest_long)
|
|
|
|
for (fp = (float *)src, lp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*fp > (double)X_LONG_MAX || *fp < (double)X_LONG_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*lp++ = *fp++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
for (fp = (float *)src, ip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*fp > (double)X_INT_MAX || *fp < (double)X_INT_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*ip++ = *fp++;
|
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_INT64:
|
|
|
|
for (fp = (float *)src, lip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*fp > X_INT64_MAX || *fp <X_INT64_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*lip++ = *fp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_UINT64:
|
|
|
|
for (fp = (float *)src, lip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*fp > X_UINT64_MAX || *fp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*lip++ = *fp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_FLOAT:
|
|
|
|
for (fp = (float *)src, fp1 = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
/* if (*fp > X_FLOAT_MAX || *fp < X_FLOAT_MIN)
|
|
|
|
(*range_error)++;*/
|
|
|
|
*fp1++ = *fp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_DOUBLE:
|
|
|
|
for (fp = (float *)src, dp = dest; count < len; count++)
|
2014-11-12 06:24:38 +08:00
|
|
|
*dp++ = *fp++;
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
|
|
|
|
__func__, src_type, dest_type));
|
|
|
|
return NC_EBADTYPE;
|
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
break;
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_DOUBLE:
|
2014-11-12 06:24:38 +08:00
|
|
|
switch (dest_type)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
case NC_UBYTE:
|
|
|
|
for (dp = (double *)src, ubp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*dp > X_UCHAR_MAX || *dp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*ubp++ = *dp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_BYTE:
|
|
|
|
for (dp = (double *)src, bp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*dp > X_SCHAR_MAX || *dp < X_SCHAR_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*bp++ = *dp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_SHORT:
|
|
|
|
for (dp = (double *)src, sp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*dp > X_SHORT_MAX || *dp < X_SHORT_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*sp++ = *dp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_USHORT:
|
|
|
|
for (dp = (double *)src, usp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*dp > X_USHORT_MAX || *dp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*usp++ = *dp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_UINT:
|
|
|
|
for (dp = (double *)src, uip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*dp > X_UINT_MAX || *dp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*uip++ = *dp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_INT:
|
2018-06-09 05:50:39 +08:00
|
|
|
if (dest_long)
|
|
|
|
for (dp = (double *)src, lp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*dp > X_LONG_MAX || *dp < X_LONG_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*lp++ = *dp++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
for (dp = (double *)src, ip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*dp > X_INT_MAX || *dp < X_INT_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*ip++ = *dp++;
|
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
break;
|
|
|
|
case NC_INT64:
|
|
|
|
for (dp = (double *)src, lip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*dp > X_INT64_MAX || *dp < X_INT64_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*lip++ = *dp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_UINT64:
|
|
|
|
for (dp = (double *)src, lip = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*dp > X_UINT64_MAX || *dp < 0)
|
|
|
|
(*range_error)++;
|
|
|
|
*lip++ = *dp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_FLOAT:
|
|
|
|
for (dp = (double *)src, fp = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
if (*dp > X_FLOAT_MAX || *dp < X_FLOAT_MIN)
|
|
|
|
(*range_error)++;
|
|
|
|
*fp++ = *dp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case NC_DOUBLE:
|
|
|
|
for (dp = (double *)src, dp1 = dest; count < len; count++)
|
|
|
|
{
|
|
|
|
/* if (*dp > X_DOUBLE_MAX || *dp < X_DOUBLE_MIN) */
|
|
|
|
/* (*range_error)++; */
|
|
|
|
*dp1++ = *dp++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
|
|
|
|
__func__, src_type, dest_type));
|
|
|
|
return NC_EBADTYPE;
|
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
break;
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
default:
|
2014-11-12 06:24:38 +08:00
|
|
|
LOG((0, "%s: unexpected src type. src_type %d, dest_type %d",
|
|
|
|
__func__, src_type, dest_type));
|
|
|
|
return NC_EBADTYPE;
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal In our first pass through the data, we may have
|
|
|
|
* encountered variables before encountering their dimscales, so go
|
|
|
|
* through the vars in this file and make sure we've got a dimid for
|
2018-06-09 01:18:08 +08:00
|
|
|
* each.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param grp Pointer to group info struct.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned an error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
int
|
|
|
|
nc4_rec_match_dimscales(NC_GRP_INFO_T *grp)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_GRP_INFO_T *g;
|
|
|
|
NC_VAR_INFO_T *var;
|
|
|
|
NC_DIM_INFO_T *dim;
|
|
|
|
int retval = NC_NOERR;
|
|
|
|
int i;
|
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
assert(grp && grp->hdr.name);
|
|
|
|
LOG((4, "%s: grp->hdr.name %s", __func__, grp->hdr.name));
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* Perform var dimscale match for child groups. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->children);i++) {
|
|
|
|
if((g = (NC_GRP_INFO_T*)ncindexith(grp->children,i)) == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((retval = nc4_rec_match_dimscales(g)))
|
|
|
|
return retval;
|
2018-03-17 01:46:18 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Check all the vars in this group. If they have dimscale info,
|
|
|
|
* try and find a dimension for them. */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(i=0;i<ncindexsize(grp->vars);i++)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
2017-01-11 04:54:09 +08:00
|
|
|
int ndims;
|
|
|
|
int d;
|
2018-03-17 01:46:18 +08:00
|
|
|
if((var = (NC_VAR_INFO_T*)ncindexith(grp->vars,i)) == NULL) continue;
|
2016-03-05 00:14:15 +08:00
|
|
|
/* Check all vars and see if dim[i] != NULL if dimids[i] valid. */
|
2018-03-17 01:46:18 +08:00
|
|
|
/* This loop is very odd. Under normal circumstances, var->dimid[d] is zero
|
|
|
|
(from the initial calloc) which is a legitimate dimid. The code does not
|
|
|
|
distinquish this case from the dimscale case where the id might actually
|
|
|
|
be defined.
|
|
|
|
The original nc4_find_dim searched up the group tree looking for the given
|
|
|
|
dimid in one of the dim lists associated with each ancestor group.
|
|
|
|
I changed nc4_fnd_dim to use the dimid directly using h5->alldims.
|
|
|
|
However, here that is incorrect because it will find the dimid 0 always
|
|
|
|
(if any dimensions were defined). Except that when dimscale dimids have
|
|
|
|
been defined, one or more of the values in var->dimids will have a
|
2018-04-24 06:38:08 +08:00
|
|
|
legitimate value.
|
2018-03-17 01:46:18 +08:00
|
|
|
The solution I choose is to modify nc4_var_list_add to initialize dimids to
|
|
|
|
illegal values (-1). This is another example of the problems with dimscales.
|
|
|
|
*/
|
2017-01-11 04:54:09 +08:00
|
|
|
ndims = var->ndims;
|
2016-03-05 00:14:15 +08:00
|
|
|
for (d = 0; d < ndims; d++)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
if (var->dim[d] == NULL) {
|
|
|
|
nc4_find_dim(grp, var->dimids[d], &var->dim[d], NULL);
|
|
|
|
}
|
2018-03-17 01:46:18 +08:00
|
|
|
/* assert(var->dim[d] && var->dim[d]->hdr.id == var->dimids[d]); */
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2016-03-05 00:14:15 +08:00
|
|
|
|
2013-12-01 13:20:28 +08:00
|
|
|
/* Skip dimension scale variables */
|
|
|
|
if (!var->dimscale)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
int d;
|
2018-03-17 01:46:18 +08:00
|
|
|
int j;
|
2013-12-01 13:20:28 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Are there dimscales for this variable? */
|
|
|
|
if (var->dimscale_hdf5_objids)
|
|
|
|
{
|
|
|
|
for (d = 0; d < var->ndims; d++)
|
2014-11-12 06:24:38 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
nc_bool_t finished = NC_FALSE;
|
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((5, "%s: var %s has dimscale info...", __func__, var->hdr.name));
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Look at all the dims in this group to see if they
|
|
|
|
* match. */
|
|
|
|
for (g = grp; g && !finished; g = g->parent)
|
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
for(j=0;j<ncindexsize(g->dim);j++)
|
|
|
|
{
|
|
|
|
if((dim = (NC_DIM_INFO_T*)ncindexith(g->dim,j)) == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
if (var->dimscale_hdf5_objids[d].fileno[0] == dim->hdf5_objid.fileno[0] &&
|
|
|
|
var->dimscale_hdf5_objids[d].objno[0] == dim->hdf5_objid.objno[0] &&
|
|
|
|
var->dimscale_hdf5_objids[d].fileno[1] == dim->hdf5_objid.fileno[1] &&
|
|
|
|
var->dimscale_hdf5_objids[d].objno[1] == dim->hdf5_objid.objno[1])
|
|
|
|
{
|
|
|
|
LOG((4, "%s: for dimension %d, found dim %s",
|
2018-03-17 01:46:18 +08:00
|
|
|
__func__, d, dim->hdr.name));
|
|
|
|
var->dimids[d] = dim->hdr.id;
|
2017-12-03 22:11:51 +08:00
|
|
|
var->dim[d] = dim;
|
|
|
|
finished = NC_TRUE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} /* next dim */
|
|
|
|
} /* next grp */
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((5, "%s: dimid for this dimscale is %d", __func__, var->type_info->hdr.id));
|
2017-12-03 22:11:51 +08:00
|
|
|
} /* next var->dim */
|
|
|
|
}
|
|
|
|
/* No dimscales for this var! Invent phony dimensions. */
|
|
|
|
else
|
|
|
|
{
|
|
|
|
hid_t spaceid = 0;
|
|
|
|
hsize_t *h5dimlen = NULL, *h5dimlenmax = NULL;
|
|
|
|
int dataset_ndims;
|
|
|
|
|
|
|
|
/* Find the space information for this dimension. */
|
|
|
|
if ((spaceid = H5Dget_space(var->hdf_datasetid)) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
|
|
|
|
/* Get the len of each dim in the space. */
|
|
|
|
if (var->ndims)
|
|
|
|
{
|
|
|
|
if (!(h5dimlen = malloc(var->ndims * sizeof(hsize_t))))
|
|
|
|
return NC_ENOMEM;
|
|
|
|
if (!(h5dimlenmax = malloc(var->ndims * sizeof(hsize_t))))
|
|
|
|
{
|
|
|
|
free(h5dimlen);
|
|
|
|
return NC_ENOMEM;
|
|
|
|
}
|
|
|
|
if ((dataset_ndims = H5Sget_simple_extent_dims(spaceid, h5dimlen,
|
|
|
|
h5dimlenmax)) < 0) {
|
|
|
|
free(h5dimlenmax);
|
|
|
|
free(h5dimlen);
|
|
|
|
return NC_EHDFERR;
|
|
|
|
}
|
|
|
|
if (dataset_ndims != var->ndims) {
|
|
|
|
free(h5dimlenmax);
|
|
|
|
free(h5dimlen);
|
|
|
|
return NC_EHDFERR;
|
|
|
|
}
|
2014-11-12 06:24:38 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
else
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Make sure it's scalar. */
|
|
|
|
if (H5Sget_simple_extent_type(spaceid) != H5S_SCALAR)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release the space object. */
|
|
|
|
if (H5Sclose(spaceid) < 0) {
|
|
|
|
free(h5dimlen);
|
|
|
|
free(h5dimlenmax);
|
|
|
|
return NC_EHDFERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a phony dimension for each dimension in the
|
|
|
|
* dataset, unless there already is one the correct
|
|
|
|
* size. */
|
|
|
|
for (d = 0; d < var->ndims; d++)
|
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
int k;
|
|
|
|
int match;
|
2017-12-03 22:11:51 +08:00
|
|
|
/* Is there already a phony dimension of the correct size? */
|
2018-03-17 01:46:18 +08:00
|
|
|
for(match=-1,k=0;k<ncindexsize(grp->dim);k++) {
|
|
|
|
if((dim = (NC_DIM_INFO_T*)ncindexith(grp->dim,k)) == NULL) continue;
|
2017-12-03 22:11:51 +08:00
|
|
|
if ((dim->len == h5dimlen[d]) &&
|
|
|
|
((h5dimlenmax[d] == H5S_UNLIMITED && dim->unlimited) ||
|
|
|
|
(h5dimlenmax[d] != H5S_UNLIMITED && !dim->unlimited)))
|
2018-03-17 01:46:18 +08:00
|
|
|
{match = k; break;}
|
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
/* Didn't find a phony dim? Then create one. */
|
2018-03-17 01:46:18 +08:00
|
|
|
if (match < 0)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
char phony_dim_name[NC_MAX_NAME + 1];
|
2018-03-17 01:46:18 +08:00
|
|
|
sprintf(phony_dim_name, "phony_dim_%d", grp->nc4_info->next_dimid);
|
|
|
|
LOG((3, "%s: creating phony dim for var %s", __func__, var->hdr.name));
|
|
|
|
if ((retval = nc4_dim_list_add(grp, phony_dim_name, h5dimlen[d], -1, &dim))) {
|
2017-12-03 22:11:51 +08:00
|
|
|
free(h5dimlenmax);
|
|
|
|
free(h5dimlen);
|
|
|
|
return retval;
|
2014-11-12 06:24:38 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
if (h5dimlenmax[d] == H5S_UNLIMITED)
|
|
|
|
dim->unlimited = NC_TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The variable must remember the dimid. */
|
2018-03-17 01:46:18 +08:00
|
|
|
var->dimids[d] = dim->hdr.id;
|
2017-12-03 22:11:51 +08:00
|
|
|
var->dim[d] = dim;
|
|
|
|
} /* next dim */
|
2014-11-12 06:24:38 +08:00
|
|
|
|
|
|
|
/* Free the memory we malloced. */
|
2017-12-03 22:11:51 +08:00
|
|
|
free(h5dimlen);
|
|
|
|
free(h5dimlenmax);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
return retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal Get the length, in bytes, of one element of a type in
|
2018-06-09 01:18:08 +08:00
|
|
|
* memory.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param h5 Pointer to HDF5 file info struct.
|
|
|
|
* @param xtype NetCDF type ID.
|
2018-04-24 06:38:08 +08:00
|
|
|
* @param len Pointer that gets length in bytes.
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EBADTYPE Type not found
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2014-11-12 06:17:08 +08:00
|
|
|
int
|
2018-03-17 01:46:18 +08:00
|
|
|
nc4_get_typelen_mem(NC_HDF5_FILE_INFO_T *h5, nc_type xtype, size_t *len)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
NC_TYPE_INFO_T *type;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
LOG((4, "%s xtype: %d", __func__, xtype));
|
|
|
|
assert(len);
|
|
|
|
|
|
|
|
/* If this is an atomic type, the answer is easy. */
|
|
|
|
switch (xtype)
|
|
|
|
{
|
|
|
|
case NC_BYTE:
|
|
|
|
case NC_CHAR:
|
|
|
|
case NC_UBYTE:
|
2014-11-12 06:24:38 +08:00
|
|
|
*len = sizeof(char);
|
|
|
|
return NC_NOERR;
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_SHORT:
|
|
|
|
case NC_USHORT:
|
2014-11-12 06:24:38 +08:00
|
|
|
*len = sizeof(short);
|
|
|
|
return NC_NOERR;
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_INT:
|
|
|
|
case NC_UINT:
|
2018-03-17 01:46:18 +08:00
|
|
|
*len = sizeof(int);
|
2014-11-12 06:24:38 +08:00
|
|
|
return NC_NOERR;
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_FLOAT:
|
2014-11-12 06:24:38 +08:00
|
|
|
*len = sizeof(float);
|
|
|
|
return NC_NOERR;
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_DOUBLE:
|
2014-11-12 06:24:38 +08:00
|
|
|
*len = sizeof(double);
|
|
|
|
return NC_NOERR;
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_INT64:
|
|
|
|
case NC_UINT64:
|
2014-11-12 06:24:38 +08:00
|
|
|
*len = sizeof(long long);
|
|
|
|
return NC_NOERR;
|
2017-12-03 22:11:51 +08:00
|
|
|
case NC_STRING:
|
2014-11-12 06:24:38 +08:00
|
|
|
*len = sizeof(char *);
|
|
|
|
return NC_NOERR;
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* See if var is compound type. */
|
|
|
|
if ((retval = nc4_find_type(h5, xtype, &type)))
|
|
|
|
return retval;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
if (!type)
|
|
|
|
return NC_EBADTYPE;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
*len = type->size;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
LOG((5, "type->size: %d", type->size));
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
return NC_NOERR;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
2018-06-09 01:18:08 +08:00
|
|
|
* @internal Get the class of a type
|
2017-12-05 03:21:14 +08:00
|
|
|
*
|
|
|
|
* @param h5 Pointer to the HDF5 file info struct.
|
|
|
|
* @param xtype NetCDF type ID.
|
|
|
|
* @param type_class Pointer that gets class of type, NC_INT,
|
|
|
|
* NC_FLOAT, NC_CHAR, or NC_STRING, NC_ENUM, NC_VLEN, NC_COMPOUND, or
|
|
|
|
* NC_OPAQUE.
|
|
|
|
*
|
|
|
|
* @return ::NC_NOERR No error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
2014-11-12 06:17:08 +08:00
|
|
|
int
|
2014-02-12 07:12:08 +08:00
|
|
|
nc4_get_typeclass(const NC_HDF5_FILE_INFO_T *h5, nc_type xtype, int *type_class)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
int retval = NC_NOERR;
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
LOG((4, "%s xtype: %d", __func__, xtype));
|
|
|
|
assert(type_class);
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/* If this is an atomic type, the answer is easy. */
|
|
|
|
if (xtype <= NC_STRING)
|
|
|
|
{
|
2014-02-12 07:12:08 +08:00
|
|
|
switch (xtype)
|
2017-12-03 22:11:51 +08:00
|
|
|
{
|
|
|
|
case NC_BYTE:
|
|
|
|
case NC_UBYTE:
|
|
|
|
case NC_SHORT:
|
|
|
|
case NC_USHORT:
|
|
|
|
case NC_INT:
|
|
|
|
case NC_UINT:
|
|
|
|
case NC_INT64:
|
|
|
|
case NC_UINT64:
|
|
|
|
/* NC_INT is class used for all integral types */
|
|
|
|
*type_class = NC_INT;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NC_FLOAT:
|
|
|
|
case NC_DOUBLE:
|
|
|
|
/* NC_FLOAT is class used for all floating-point types */
|
|
|
|
*type_class = NC_FLOAT;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NC_CHAR:
|
|
|
|
*type_class = NC_CHAR;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NC_STRING:
|
|
|
|
*type_class = NC_STRING;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
BAIL(NC_EBADTYPE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2014-02-12 07:12:08 +08:00
|
|
|
NC_TYPE_INFO_T *type;
|
|
|
|
|
|
|
|
/* See if it's a used-defined type */
|
|
|
|
if ((retval = nc4_find_type(h5, xtype, &type)))
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(retval);
|
2014-02-12 07:12:08 +08:00
|
|
|
if (!type)
|
2017-12-03 22:11:51 +08:00
|
|
|
BAIL(NC_EBADTYPE);
|
2014-02-12 07:12:08 +08:00
|
|
|
|
|
|
|
*type_class = type->nc_type_class;
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2014-02-12 07:12:08 +08:00
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
exit:
|
|
|
|
return retval;
|
2014-02-12 07:12:08 +08:00
|
|
|
}
|
2016-05-04 11:17:06 +08:00
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal
|
|
|
|
*
|
2018-03-17 01:46:18 +08:00
|
|
|
* @param uselog
|
2017-12-05 03:21:14 +08:00
|
|
|
* @param id HDF5 ID.
|
|
|
|
* @param type
|
|
|
|
*
|
|
|
|
* @return NC_NOERR No error.
|
|
|
|
*/
|
2016-05-04 11:17:06 +08:00
|
|
|
void
|
2018-03-17 01:46:18 +08:00
|
|
|
reportobject(int uselog, hid_t id, unsigned int type)
|
2016-05-04 11:17:06 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
char name[MAXNAME];
|
|
|
|
ssize_t len;
|
|
|
|
const char* typename = NULL;
|
2018-03-17 01:46:18 +08:00
|
|
|
long long printid = (long long)id;
|
2017-12-03 22:11:51 +08:00
|
|
|
|
|
|
|
len = H5Iget_name(id, name, MAXNAME);
|
|
|
|
if(len < 0) return;
|
|
|
|
name[len] = '\0';
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case H5F_OBJ_FILE: typename = "File"; break;
|
|
|
|
case H5F_OBJ_DATASET: typename = "Dataset"; break;
|
|
|
|
case H5F_OBJ_GROUP: typename = "Group"; break;
|
|
|
|
case H5F_OBJ_DATATYPE: typename = "Datatype"; break;
|
|
|
|
case H5F_OBJ_ATTR:
|
|
|
|
typename = "Attribute";
|
|
|
|
len = H5Aget_name(id, MAXNAME, name);
|
|
|
|
if(len < 0) len = 0;
|
|
|
|
name[len] = '\0';
|
|
|
|
break;
|
|
|
|
default: typename = "<unknown>"; break;
|
|
|
|
}
|
2016-05-04 11:17:06 +08:00
|
|
|
#ifdef LOGGING
|
2018-03-17 01:46:18 +08:00
|
|
|
if(uselog) {
|
|
|
|
LOG((0,"Type = %s(%lld) name='%s'",typename,printid,name));
|
|
|
|
} else
|
2016-05-04 11:17:06 +08:00
|
|
|
#endif
|
2018-03-17 01:46:18 +08:00
|
|
|
{
|
2018-05-07 22:56:19 +08:00
|
|
|
fprintf(stderr,"Type = %s(%lld) name='%s'",typename,printid,name);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
2018-06-09 01:18:08 +08:00
|
|
|
|
2016-05-04 11:17:06 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal
|
|
|
|
*
|
2018-03-17 01:46:18 +08:00
|
|
|
* @param uselog
|
2017-12-05 03:21:14 +08:00
|
|
|
* @param fid HDF5 ID.
|
|
|
|
* @param ntypes Number of types.
|
|
|
|
* @param otypes Pointer that gets number of open types.
|
|
|
|
*
|
|
|
|
* @return ::NC_NOERR No error.
|
|
|
|
*/
|
2016-05-04 11:17:06 +08:00
|
|
|
static void
|
2018-03-17 01:46:18 +08:00
|
|
|
reportopenobjectsT(int uselog, hid_t fid, int ntypes, unsigned int* otypes)
|
2016-05-04 11:17:06 +08:00
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
int t,i;
|
|
|
|
ssize_t ocount;
|
|
|
|
size_t maxobjs = -1;
|
|
|
|
hid_t* idlist = NULL;
|
2016-05-04 11:17:06 +08:00
|
|
|
|
2018-03-17 01:46:18 +08:00
|
|
|
/* Always report somehow */
|
2016-05-04 11:17:06 +08:00
|
|
|
#ifdef LOGGING
|
2018-03-21 11:50:58 +08:00
|
|
|
if(uselog)
|
2018-03-17 01:46:18 +08:00
|
|
|
LOG((0,"\nReport: open objects on %lld",(long long)fid));
|
2018-03-21 11:50:58 +08:00
|
|
|
else
|
2016-05-04 11:17:06 +08:00
|
|
|
#endif
|
2018-03-17 01:46:18 +08:00
|
|
|
fprintf(stdout,"\nReport: open objects on %lld\n",(long long)fid);
|
2017-12-03 22:11:51 +08:00
|
|
|
maxobjs = H5Fget_obj_count(fid,H5F_OBJ_ALL);
|
|
|
|
if(idlist != NULL) free(idlist);
|
|
|
|
idlist = (hid_t*)malloc(sizeof(hid_t)*maxobjs);
|
|
|
|
for(t=0;t<ntypes;t++) {
|
|
|
|
unsigned int ot = otypes[t];
|
|
|
|
ocount = H5Fget_obj_ids(fid,ot,maxobjs,idlist);
|
|
|
|
for(i=0;i<ocount;i++) {
|
|
|
|
hid_t o = idlist[i];
|
2018-03-17 01:46:18 +08:00
|
|
|
reportobject(uselog,o,ot);
|
2017-12-03 22:11:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if(idlist != NULL) free(idlist);
|
2016-05-04 11:17:06 +08:00
|
|
|
}
|
|
|
|
|
2017-12-05 03:21:14 +08:00
|
|
|
/**
|
|
|
|
* @internal Report open objects.
|
|
|
|
*
|
2018-03-17 01:46:18 +08:00
|
|
|
* @param uselog
|
2017-12-05 03:21:14 +08:00
|
|
|
* @param fid HDF5 file ID.
|
|
|
|
*
|
|
|
|
* @return NC_NOERR No error.
|
|
|
|
*/
|
2016-05-04 11:17:06 +08:00
|
|
|
void
|
2018-03-17 01:46:18 +08:00
|
|
|
reportopenobjects(int uselog, hid_t fid)
|
2016-05-04 11:17:06 +08:00
|
|
|
{
|
2018-03-17 01:46:18 +08:00
|
|
|
reportopenobjectsT(uselog, fid ,5, OTYPES);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @internal Report open objects given a pointer to NC_HDF5_FILE_INFO_T object
|
|
|
|
*
|
|
|
|
* @param h5 file object
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
showopenobjects5(NC_HDF5_FILE_INFO_T* h5)
|
|
|
|
{
|
|
|
|
fprintf(stderr,"===== begin showopenobjects =====\n");
|
|
|
|
reportopenobjects(0,h5->hdfid);
|
|
|
|
fprintf(stderr,"===== end showopenobjects =====\n");
|
|
|
|
fflush(stderr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @internal Report open objects given an ncid
|
|
|
|
* Defined to support user or gdb level call.
|
|
|
|
*
|
|
|
|
* @param ncid file id
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
showopenobjects(int ncid)
|
|
|
|
{
|
|
|
|
NC_HDF5_FILE_INFO_T* h5 = NULL;
|
|
|
|
|
|
|
|
/* Find our metadata for this file. */
|
|
|
|
if (nc4_find_nc_grp_h5(ncid, NULL, NULL, &h5) != NC_NOERR)
|
|
|
|
fprintf(stderr,"failed\n");
|
|
|
|
else
|
|
|
|
showopenobjects5(h5);
|
|
|
|
fflush(stderr);
|
2016-05-04 11:17:06 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Get HDF5 library version.
|
|
|
|
*
|
|
|
|
* @param major Pointer that gets major version number.
|
|
|
|
* @param minor Pointer that gets minor version number.
|
|
|
|
* @param release Pointer that gets release version number.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned error.
|
|
|
|
* @author Dennis Heimbigner
|
|
|
|
*/
|
2016-05-04 11:17:06 +08:00
|
|
|
int
|
|
|
|
NC4_hdf5get_libversion(unsigned* major,unsigned* minor,unsigned* release)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
if(H5get_libversion(major,minor,release) < 0)
|
|
|
|
return NC_EHDFERR;
|
|
|
|
return NC_NOERR;
|
2016-05-04 11:17:06 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Get HDF5 superblock version.
|
|
|
|
*
|
|
|
|
* @param h5 Pointer to HDF5 file info struct.
|
|
|
|
* @param idp Pointer that gets superblock version.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @returns NC_EHDFERR HDF5 returned error.
|
|
|
|
* @author Dennis Heimbigner
|
|
|
|
*/
|
2016-05-04 11:17:06 +08:00
|
|
|
int
|
|
|
|
NC4_hdf5get_superblock(struct NC_HDF5_FILE_INFO* h5, int* idp)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
int stat = NC_NOERR;
|
|
|
|
unsigned super;
|
|
|
|
hid_t plist = -1;
|
|
|
|
if((plist = H5Fget_create_plist(h5->hdfid)) < 0)
|
|
|
|
{stat = NC_EHDFERR; goto done;}
|
|
|
|
if(H5Pget_version(plist, &super, NULL, NULL, NULL) < 0)
|
|
|
|
{stat = NC_EHDFERR; goto done;}
|
|
|
|
if(idp) *idp = (int)super;
|
2016-05-04 11:17:06 +08:00
|
|
|
done:
|
2017-12-03 22:11:51 +08:00
|
|
|
if(plist >= 0) H5Pclose(plist);
|
|
|
|
return stat;
|
2016-05-04 11:17:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int NC4_get_strict_att(NC_HDF5_FILE_INFO_T*);
|
|
|
|
static int NC4_walk(hid_t, int*);
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Determine whether file is netCDF-4.
|
|
|
|
*
|
|
|
|
* We define a file as being from netcdf-4 if any of the following
|
|
|
|
* are true:
|
|
|
|
* 1. NCPROPS attribute exists in root group
|
|
|
|
* 2. NC3_STRICT_ATT_NAME exists in root group
|
|
|
|
* 3. any of NC_ATT_REFERENCE_LIST, NC_ATT_CLASS,
|
|
|
|
* NC_ATT_DIMENSION_LIST, NC_ATT_NAME,
|
|
|
|
* NC_ATT_COORDINATES, NC_DIMID_ATT_NAME
|
|
|
|
* exist anywhere in the file; note that this
|
|
|
|
* requires walking the file.
|
|
|
|
|
|
|
|
* @note WARNINGS:
|
|
|
|
* 1. False negatives are possible for a small subset of netcdf-4
|
|
|
|
* created files.
|
|
|
|
* 2. Deliberate falsification in the file can be used to cause
|
|
|
|
* a false positive.
|
|
|
|
*
|
|
|
|
* @param h5 Pointer to HDF5 file info struct.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @author Dennis Heimbigner.
|
|
|
|
*/
|
2016-05-04 11:17:06 +08:00
|
|
|
int
|
|
|
|
NC4_isnetcdf4(struct NC_HDF5_FILE_INFO* h5)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
int stat;
|
|
|
|
int isnc4 = 0;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
/* Look for NC3_STRICT_ATT_NAME */
|
|
|
|
isnc4 = NC4_get_strict_att(h5);
|
|
|
|
if(isnc4 > 0)
|
|
|
|
goto done;
|
|
|
|
/* attribute did not exist */
|
|
|
|
/* => last resort: walk the HDF5 file looking for markers */
|
|
|
|
count = 0;
|
|
|
|
stat = NC4_walk(h5->root_grp->hdf_grpid, &count);
|
|
|
|
if(stat != NC_NOERR)
|
|
|
|
isnc4 = 0;
|
|
|
|
else /* Threshold is at least two matches */
|
|
|
|
isnc4 = (count >= 2);
|
2016-05-04 11:17:06 +08:00
|
|
|
|
|
|
|
done:
|
2017-12-03 22:11:51 +08:00
|
|
|
return isnc4;
|
2016-05-04 11:17:06 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Get the NC3 strict attribute.
|
|
|
|
*
|
|
|
|
* @param h5 Pointer to HDF5 file info struct.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @author Dennis Heimbigner.
|
|
|
|
*/
|
2016-05-04 11:17:06 +08:00
|
|
|
static int
|
|
|
|
NC4_get_strict_att(NC_HDF5_FILE_INFO_T* h5)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
hid_t grp = -1;
|
|
|
|
hid_t attid = -1;
|
|
|
|
|
|
|
|
/* Get root group */
|
|
|
|
grp = h5->root_grp->hdf_grpid; /* get root group */
|
|
|
|
/* Try to extract the NC3_STRICT_ATT_NAME attribute */
|
|
|
|
attid = H5Aopen_name(grp, NC3_STRICT_ATT_NAME);
|
|
|
|
H5Aclose(attid);
|
|
|
|
return attid;
|
2016-05-04 11:17:06 +08:00
|
|
|
}
|
|
|
|
|
2017-12-03 22:11:51 +08:00
|
|
|
/**
|
|
|
|
* @internal Walk group struct.
|
|
|
|
*
|
|
|
|
* @param gid HDF5 ID of starting group.
|
|
|
|
* @param countp Pointer that gets count.
|
|
|
|
*
|
|
|
|
* @returns NC_NOERR No error.
|
|
|
|
* @author Dennis Heimbigner
|
|
|
|
*/
|
2016-05-04 11:17:06 +08:00
|
|
|
static int
|
|
|
|
NC4_walk(hid_t gid, int* countp)
|
|
|
|
{
|
2017-12-03 22:11:51 +08:00
|
|
|
int ncstat = NC_NOERR;
|
|
|
|
int i,j,na;
|
|
|
|
ssize_t len;
|
|
|
|
hsize_t nobj;
|
|
|
|
herr_t err;
|
|
|
|
int otype;
|
|
|
|
hid_t grpid, dsid;
|
|
|
|
char name[NC_HDF5_MAX_NAME];
|
|
|
|
|
|
|
|
/* walk group members of interest */
|
|
|
|
err = H5Gget_num_objs(gid, &nobj);
|
|
|
|
if(err < 0) return err;
|
|
|
|
|
|
|
|
for(i = 0; i < nobj; i++) {
|
|
|
|
/* Get name & kind of object in the group */
|
|
|
|
len = H5Gget_objname_by_idx(gid,(hsize_t)i,name,(size_t)NC_HDF5_MAX_NAME);
|
|
|
|
if(len < 0) return len;
|
|
|
|
|
|
|
|
otype = H5Gget_objtype_by_idx(gid,(size_t)i);
|
|
|
|
switch(otype) {
|
|
|
|
case H5G_GROUP:
|
|
|
|
grpid = H5Gopen(gid,name);
|
|
|
|
NC4_walk(grpid,countp);
|
|
|
|
H5Gclose(grpid);
|
|
|
|
break;
|
|
|
|
case H5G_DATASET: /* variables */
|
|
|
|
/* Check for phony_dim */
|
|
|
|
if(strcmp(name,"phony_dim")==0)
|
|
|
|
*countp = *countp + 1;
|
|
|
|
dsid = H5Dopen(gid,name);
|
|
|
|
na = H5Aget_num_attrs(dsid);
|
|
|
|
for(j = 0; j < na; j++) {
|
|
|
|
hid_t aid = H5Aopen_idx(dsid,(unsigned int) j);
|
|
|
|
if(aid >= 0) {
|
2018-03-17 01:46:18 +08:00
|
|
|
const NC_reservedatt* ra;
|
2017-12-03 22:11:51 +08:00
|
|
|
ssize_t len = H5Aget_name(aid, NC_HDF5_MAX_NAME, name);
|
|
|
|
if(len < 0) return len;
|
|
|
|
/* Is this a netcdf-4 marker attribute */
|
2018-03-17 01:46:18 +08:00
|
|
|
/* Is this a netcdf-4 marker attribute */
|
|
|
|
ra = NC_findreserved(name);
|
|
|
|
if(ra != NULL)
|
2017-12-03 22:11:51 +08:00
|
|
|
*countp = *countp + 1;
|
2016-05-04 11:17:06 +08:00
|
|
|
}
|
2017-12-03 22:11:51 +08:00
|
|
|
H5Aclose(aid);
|
|
|
|
}
|
|
|
|
H5Dclose(dsid);
|
|
|
|
break;
|
|
|
|
default:/* ignore */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ncstat;
|
2016-05-04 11:17:06 +08:00
|
|
|
}
|
2018-05-23 06:50:52 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @internal Write a strided array of data to a variable.
|
|
|
|
*
|
|
|
|
* @param nc Pointer to the file NC struct.
|
|
|
|
* @param ncid File ID.
|
|
|
|
* @param varid Variable ID.
|
|
|
|
* @param startp Array of start indices.
|
|
|
|
* @param countp Array of counts.
|
|
|
|
* @param stridep Array of strides.
|
|
|
|
* @param mem_nc_type The type of the data in memory.
|
|
|
|
* @param is_long True only if NC_LONG is the memory type.
|
|
|
|
* @param data The data to be written.
|
|
|
|
*
|
|
|
|
* @returns ::NC_NOERR No error.
|
|
|
|
* @returns ::NC_EBADID Bad ncid.
|
|
|
|
* @returns ::NC_ENOTVAR Var not found.
|
|
|
|
* @returns ::NC_EHDFERR HDF5 function returned error.
|
|
|
|
* @returns ::NC_EINVALCOORDS Incorrect start.
|
|
|
|
* @returns ::NC_EEDGE Incorrect start/count.
|
|
|
|
* @returns ::NC_ENOMEM Out of memory.
|
|
|
|
* @returns ::NC_EMPI MPI library error (parallel only)
|
|
|
|
* @returns ::NC_ECANTEXTEND Can't extend dimension for write.
|
|
|
|
* @returns ::NC_ERANGE Data conversion error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nc4_put_vars(NC *nc, int ncid, int varid, const size_t *startp,
|
|
|
|
const size_t *countp, const ptrdiff_t* stridep,
|
|
|
|
nc_type mem_nc_type, int is_long, void *data)
|
|
|
|
{
|
|
|
|
NC_GRP_INFO_T *grp;
|
|
|
|
NC_HDF5_FILE_INFO_T *h5;
|
|
|
|
NC_VAR_INFO_T *var;
|
|
|
|
NC_DIM_INFO_T *dim;
|
|
|
|
hid_t file_spaceid = 0, mem_spaceid = 0, xfer_plistid = 0;
|
|
|
|
long long unsigned xtend_size[NC_MAX_VAR_DIMS];
|
|
|
|
hsize_t fdims[NC_MAX_VAR_DIMS], fmaxdims[NC_MAX_VAR_DIMS];
|
|
|
|
hsize_t start[NC_MAX_VAR_DIMS], count[NC_MAX_VAR_DIMS];
|
|
|
|
hssize_t stride[NC_MAX_VAR_DIMS];
|
|
|
|
char *name_to_use;
|
|
|
|
int need_to_extend = 0;
|
|
|
|
#ifdef USE_PARALLEL4
|
|
|
|
int extend_possible = 0;
|
|
|
|
#endif
|
|
|
|
int retval = NC_NOERR, range_error = 0, i, d2;
|
|
|
|
void *bufr = NULL;
|
|
|
|
#ifndef HDF5_CONVERT
|
|
|
|
int need_to_convert = 0;
|
|
|
|
size_t len = 1;
|
|
|
|
#endif
|
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
hid_t mem_typeid = 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Find our metadata for this file, group, and var. */
|
|
|
|
assert(nc);
|
|
|
|
if ((retval = nc4_find_g_var_nc(nc, ncid, varid, &grp, &var)))
|
|
|
|
return retval;
|
|
|
|
h5 = NC4_DATA(nc);
|
|
|
|
assert(grp && h5 && var && var->hdr.name);
|
|
|
|
|
|
|
|
LOG((3, "%s: var->hdr.name %s mem_nc_type %d is_long %d",
|
|
|
|
__func__, var->hdr.name, mem_nc_type, is_long));
|
|
|
|
|
|
|
|
/* Check some stuff about the type and the file. If the file must
|
|
|
|
* be switched from define mode, it happens here. */
|
|
|
|
if ((retval = check_for_vara(&mem_nc_type, var, h5)))
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
/* Convert from size_t and ptrdiff_t to hssize_t, and hsize_t. */
|
|
|
|
/* Also do sanity checks */
|
|
|
|
for (i = 0; i < var->ndims; i++)
|
|
|
|
{
|
|
|
|
start[i] = (startp == NULL ? 0 : startp[i]);
|
|
|
|
count[i] = (countp == NULL ? 1 : countp[i]);
|
|
|
|
stride[i] = (stridep == NULL ? 1 : stridep[i]);
|
|
|
|
/* Check for non-positive stride */
|
|
|
|
if(stride[i] <= 0)
|
|
|
|
return NC_ESTRIDE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Open this dataset if necessary, also checking for a weird case:
|
|
|
|
* a non-coordinate (and non-scalar) variable that has the same
|
|
|
|
* name as a dimension. */
|
|
|
|
if (var->hdf5_name && strlen(var->hdf5_name) >= strlen(NON_COORD_PREPEND) &&
|
|
|
|
strncmp(var->hdf5_name, NON_COORD_PREPEND, strlen(NON_COORD_PREPEND)) == 0 &&
|
|
|
|
var->ndims)
|
|
|
|
name_to_use = var->hdf5_name;
|
|
|
|
else
|
|
|
|
name_to_use = var->hdr.name;
|
|
|
|
if (!var->hdf_datasetid)
|
|
|
|
if ((var->hdf_datasetid = H5Dopen2(grp->hdf_grpid, name_to_use, H5P_DEFAULT)) < 0)
|
|
|
|
return NC_ENOTVAR;
|
|
|
|
|
|
|
|
/* Get file space of data. */
|
|
|
|
if ((file_spaceid = H5Dget_space(var->hdf_datasetid)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* Check to ensure the user selection is
|
|
|
|
* valid. H5Sget_simple_extent_dims gets the sizes of all the dims
|
|
|
|
* and put them in fdims. */
|
|
|
|
if (H5Sget_simple_extent_dims(file_spaceid, fdims, fmaxdims) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
#ifdef LOGGING
|
|
|
|
log_dim_info(var, fdims, fmaxdims, start, count);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Check dimension bounds. Remember that unlimited dimensions can
|
|
|
|
* put data beyond their current length. */
|
|
|
|
for (d2 = 0; d2 < var->ndims; d2++)
|
|
|
|
{
|
|
|
|
hsize_t endindex = start[d2] + (stride[d2]*(count[d2]-1)); /* last index written */
|
|
|
|
dim = var->dim[d2];
|
|
|
|
assert(dim && dim->hdr.id == var->dimids[d2]);
|
|
|
|
if(count[d2] == 0)
|
|
|
|
endindex = start[d2]; /* fixup for zero read count */
|
|
|
|
if (!dim->unlimited)
|
|
|
|
{
|
|
|
|
#ifdef RELAX_COORD_BOUND
|
|
|
|
if (start[d2] > (hssize_t)fdims[d2] ||
|
|
|
|
(start[d2] == (hssize_t)fdims[d2] && count[d2] > 0))
|
|
|
|
#else
|
|
|
|
if (start[d2] >= (hssize_t)fdims[d2])
|
|
|
|
#endif
|
|
|
|
BAIL_QUIET(NC_EINVALCOORDS);
|
|
|
|
if (endindex >= fdims[d2])
|
|
|
|
BAIL_QUIET(NC_EEDGE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now you would think that no one would be crazy enough to write
|
|
|
|
a scalar dataspace with one of the array function calls, but you
|
|
|
|
would be wrong. So let's check to see if the dataset is
|
|
|
|
scalar. If it is, we won't try to set up a hyperslab. */
|
|
|
|
if (H5Sget_simple_extent_type(file_spaceid) == H5S_SCALAR)
|
|
|
|
{
|
|
|
|
if ((mem_spaceid = H5Screate(H5S_SCALAR)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET, start, stride,
|
|
|
|
count, NULL) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* Create a space for the memory, just big enough to hold the slab
|
|
|
|
we want. */
|
|
|
|
if ((mem_spaceid = H5Screate_simple(var->ndims, count, NULL)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef HDF5_CONVERT
|
|
|
|
/* Are we going to convert any data? (No converting of compound or
|
|
|
|
* opaque types.) */
|
|
|
|
if ((mem_nc_type != var->type_info->hdr.id || (var->type_info->hdr.id == NC_INT && is_long)) &&
|
|
|
|
mem_nc_type != NC_COMPOUND && mem_nc_type != NC_OPAQUE)
|
|
|
|
{
|
|
|
|
size_t file_type_size;
|
|
|
|
|
|
|
|
/* We must convert - allocate a buffer. */
|
|
|
|
need_to_convert++;
|
|
|
|
if (var->ndims)
|
|
|
|
for (d2=0; d2<var->ndims; d2++)
|
|
|
|
len *= countp[d2];
|
|
|
|
LOG((4, "converting data for var %s type=%d len=%d", var->hdr.name,
|
|
|
|
var->type_info->hdr.id, len));
|
|
|
|
|
|
|
|
/* Later on, we will need to know the size of this type in the
|
|
|
|
* file. */
|
|
|
|
assert(var->type_info->size);
|
|
|
|
file_type_size = var->type_info->size;
|
|
|
|
|
|
|
|
/* If we're reading, we need bufr to have enough memory to store
|
|
|
|
* the data in the file. If we're writing, we need bufr to be
|
|
|
|
* big enough to hold all the data in the file's type. */
|
|
|
|
if(len > 0)
|
|
|
|
if (!(bufr = malloc(len * file_type_size)))
|
|
|
|
BAIL(NC_ENOMEM);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif /* ifndef HDF5_CONVERT */
|
|
|
|
bufr = data;
|
|
|
|
|
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
/* Get the HDF type of the data in memory. */
|
|
|
|
if ((retval = nc4_get_hdf_typeid(h5, mem_nc_type, &mem_typeid,
|
|
|
|
var->type_info->endianness)))
|
|
|
|
BAIL(retval);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Create the data transfer property list. */
|
|
|
|
if ((xfer_plistid = H5Pcreate(H5P_DATASET_XFER)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* Apply the callback function which will detect range
|
|
|
|
* errors. Which one to call depends on the length of the
|
|
|
|
* destination buffer type. */
|
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
if (H5Pset_type_conv_cb(xfer_plistid, except_func, &range_error) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef USE_PARALLEL4
|
|
|
|
/* Set up parallel I/O, if needed. */
|
|
|
|
if ((retval = set_par_access(h5, var, xfer_plistid)))
|
|
|
|
BAIL(retval);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Read this hyperslab from memory. */
|
|
|
|
/* Does the dataset have to be extended? If it's already
|
|
|
|
extended to the required size, it will do no harm to reextend
|
|
|
|
it to that size. */
|
|
|
|
if (var->ndims)
|
|
|
|
{
|
|
|
|
for (d2 = 0; d2 < var->ndims; d2++)
|
|
|
|
{
|
|
|
|
hsize_t endindex = start[d2] + (stride[d2]*(count[d2]-1)); /* last index written */
|
|
|
|
if(count[d2] == 0)
|
|
|
|
endindex = start[d2];
|
|
|
|
dim = var->dim[d2];
|
|
|
|
assert(dim && dim->hdr.id == var->dimids[d2]);
|
|
|
|
if (dim->unlimited)
|
|
|
|
{
|
|
|
|
#ifdef USE_PARALLEL4
|
|
|
|
extend_possible = 1;
|
|
|
|
#endif
|
|
|
|
if (endindex >= fdims[d2])
|
|
|
|
{
|
|
|
|
xtend_size[d2] = (long long unsigned)(endindex+1);
|
|
|
|
need_to_extend++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
xtend_size[d2] = (long long unsigned)fdims[d2];
|
|
|
|
|
|
|
|
if (endindex >= dim->len)
|
|
|
|
{
|
|
|
|
dim->len = endindex+1;
|
|
|
|
dim->extended = NC_TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
xtend_size[d2] = (long long unsigned)dim->len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef USE_PARALLEL4
|
|
|
|
/* Check if anyone wants to extend */
|
|
|
|
if (extend_possible && h5->parallel && NC_COLLECTIVE == var->parallel_access)
|
|
|
|
{
|
|
|
|
/* Form consensus opinion among all processes about whether to perform
|
|
|
|
* collective I/O
|
|
|
|
*/
|
|
|
|
if(MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &need_to_extend, 1, MPI_INT, MPI_BOR, h5->comm))
|
|
|
|
BAIL(NC_EMPI);
|
|
|
|
}
|
|
|
|
#endif /* USE_PARALLEL4 */
|
|
|
|
|
|
|
|
/* If we need to extend it, we also need a new file_spaceid
|
|
|
|
to reflect the new size of the space. */
|
|
|
|
if (need_to_extend)
|
|
|
|
{
|
|
|
|
LOG((4, "extending dataset"));
|
|
|
|
#ifdef USE_PARALLEL4
|
|
|
|
if (h5->parallel)
|
|
|
|
{
|
|
|
|
if(NC_COLLECTIVE != var->parallel_access)
|
|
|
|
BAIL(NC_ECANTEXTEND);
|
|
|
|
|
|
|
|
/* Reach consensus about dimension sizes to extend to */
|
|
|
|
if(MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, xtend_size, var->ndims, MPI_UNSIGNED_LONG_LONG, MPI_MAX, h5->comm))
|
|
|
|
BAIL(NC_EMPI);
|
|
|
|
}
|
|
|
|
#endif /* USE_PARALLEL4 */
|
|
|
|
/* Convert xtend_size back to hsize_t for use with H5Dset_extent */
|
|
|
|
for (d2 = 0; d2 < var->ndims; d2++)
|
|
|
|
fdims[d2] = (hsize_t)xtend_size[d2];
|
|
|
|
|
|
|
|
if (H5Dset_extent(var->hdf_datasetid, fdims) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
if (file_spaceid > 0 && H5Sclose(file_spaceid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
if ((file_spaceid = H5Dget_space(var->hdf_datasetid)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET,
|
|
|
|
start, stride, count, NULL) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef HDF5_CONVERT
|
|
|
|
/* Do we need to convert the data? */
|
|
|
|
if (need_to_convert)
|
|
|
|
{
|
|
|
|
if ((retval = nc4_convert_type(data, bufr, mem_nc_type, var->type_info->hdr.id,
|
|
|
|
len, &range_error, var->fill_value,
|
|
|
|
(h5->cmode & NC_CLASSIC_MODEL), is_long, 0)))
|
|
|
|
BAIL(retval);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Write the data. At last! */
|
|
|
|
LOG((4, "about to H5Dwrite datasetid 0x%x mem_spaceid 0x%x "
|
|
|
|
"file_spaceid 0x%x", var->hdf_datasetid, mem_spaceid, file_spaceid));
|
|
|
|
if (H5Dwrite(var->hdf_datasetid, var->type_info->hdf_typeid,
|
|
|
|
mem_spaceid, file_spaceid, xfer_plistid, bufr) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* Remember that we have written to this var so that Fill Value
|
|
|
|
* can't be set for it. */
|
|
|
|
if (!var->written_to)
|
|
|
|
var->written_to = NC_TRUE;
|
|
|
|
|
|
|
|
/* For strict netcdf-3 rules, ignore erange errors between UBYTE
|
|
|
|
* and BYTE types. */
|
|
|
|
if ((h5->cmode & NC_CLASSIC_MODEL) &&
|
|
|
|
(var->type_info->hdr.id == NC_UBYTE || var->type_info->hdr.id == NC_BYTE) &&
|
|
|
|
(mem_nc_type == NC_UBYTE || mem_nc_type == NC_BYTE) &&
|
|
|
|
range_error)
|
|
|
|
range_error = 0;
|
|
|
|
|
|
|
|
exit:
|
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
if (mem_typeid > 0 && H5Tclose(mem_typeid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
#endif
|
|
|
|
if (file_spaceid > 0 && H5Sclose(file_spaceid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
if (mem_spaceid > 0 && H5Sclose(mem_spaceid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
if (xfer_plistid && (H5Pclose(xfer_plistid) < 0))
|
|
|
|
BAIL2(NC_EPARINIT);
|
|
|
|
#ifndef HDF5_CONVERT
|
|
|
|
if (need_to_convert && bufr) free(bufr);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* If there was an error return it, otherwise return any potential
|
|
|
|
range error value. If none, return NC_NOERR as usual.*/
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
if (range_error)
|
|
|
|
return NC_ERANGE;
|
|
|
|
return NC_NOERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @internal Read a strided array of data from a variable.
|
|
|
|
*
|
|
|
|
* @param nc Pointer to the file NC struct.
|
|
|
|
* @param ncid File ID.
|
|
|
|
* @param varid Variable ID.
|
|
|
|
* @param startp Array of start indices.
|
|
|
|
* @param countp Array of counts.
|
|
|
|
* @param stridep Array of strides.
|
|
|
|
* @param mem_nc_type The type of the data in memory. (Convert to this
|
|
|
|
* type from file type.)
|
|
|
|
* @param is_long True only if NC_LONG is the memory type.
|
|
|
|
* @param data The data to be written.
|
|
|
|
*
|
|
|
|
* @returns ::NC_NOERR No error.
|
|
|
|
* @returns ::NC_EBADID Bad ncid.
|
|
|
|
* @returns ::NC_ENOTVAR Var not found.
|
|
|
|
* @returns ::NC_EHDFERR HDF5 function returned error.
|
|
|
|
* @returns ::NC_EINVALCOORDS Incorrect start.
|
|
|
|
* @returns ::NC_EEDGE Incorrect start/count.
|
|
|
|
* @returns ::NC_ENOMEM Out of memory.
|
|
|
|
* @returns ::NC_EMPI MPI library error (parallel only)
|
|
|
|
* @returns ::NC_ECANTEXTEND Can't extend dimension for write.
|
|
|
|
* @returns ::NC_ERANGE Data conversion error.
|
|
|
|
* @author Ed Hartnett
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nc4_get_vars(NC *nc, int ncid, int varid, const size_t *startp,
|
|
|
|
const size_t *countp, const ptrdiff_t* stridep,
|
|
|
|
nc_type mem_nc_type, int is_long, void *data)
|
|
|
|
{
|
|
|
|
NC_GRP_INFO_T *grp;
|
|
|
|
NC_HDF5_FILE_INFO_T *h5;
|
|
|
|
NC_VAR_INFO_T *var;
|
|
|
|
NC_DIM_INFO_T *dim;
|
|
|
|
hid_t file_spaceid = 0, mem_spaceid = 0;
|
|
|
|
hid_t xfer_plistid = 0;
|
|
|
|
size_t file_type_size;
|
|
|
|
hsize_t *xtend_size = NULL, count[NC_MAX_VAR_DIMS];
|
|
|
|
hsize_t fdims[NC_MAX_VAR_DIMS], fmaxdims[NC_MAX_VAR_DIMS];
|
|
|
|
hsize_t start[NC_MAX_VAR_DIMS];
|
|
|
|
hsize_t stride[NC_MAX_VAR_DIMS];
|
|
|
|
char *name_to_use;
|
|
|
|
void *fillvalue = NULL;
|
|
|
|
int no_read = 0, provide_fill = 0;
|
|
|
|
int fill_value_size[NC_MAX_VAR_DIMS];
|
|
|
|
int scalar = 0, retval = NC_NOERR, range_error = 0, i, d2;
|
|
|
|
void *bufr = NULL;
|
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
hid_t mem_typeid = 0;
|
|
|
|
#endif
|
|
|
|
#ifndef HDF5_CONVERT
|
|
|
|
int need_to_convert = 0;
|
|
|
|
size_t len = 1;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Find our metadata for this file, group, and var. */
|
|
|
|
assert(nc);
|
|
|
|
if ((retval = nc4_find_g_var_nc(nc, ncid, varid, &grp, &var)))
|
|
|
|
return retval;
|
|
|
|
h5 = NC4_DATA(nc);
|
|
|
|
assert(grp && h5 && var && var->hdr.name);
|
|
|
|
|
|
|
|
LOG((3, "%s: var->hdr.name %s mem_nc_type %d is_long %d",
|
|
|
|
__func__, var->hdr.name, mem_nc_type, is_long));
|
|
|
|
|
|
|
|
/* Check some stuff about the type and the file. */
|
|
|
|
if ((retval = check_for_vara(&mem_nc_type, var, h5)))
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
/* Convert from size_t and ptrdiff_t to hssize_t, and hsize_t. */
|
|
|
|
/* Also do sanity checks */
|
|
|
|
for (i = 0; i < var->ndims; i++)
|
|
|
|
{
|
|
|
|
start[i] = (startp == NULL ? 0 : startp[i]);
|
|
|
|
count[i] = (countp == NULL ? 1 : countp[i]);
|
|
|
|
stride[i] = (stridep == NULL ? 1 : stridep[i]);
|
|
|
|
/* if any of the count values are zero don't actually read. */
|
|
|
|
if (count[i] == 0)
|
|
|
|
no_read++;
|
|
|
|
/* if any of the stride values are non-positive, fail. */
|
|
|
|
if (stride[i] <= 0)
|
|
|
|
return NC_ESTRIDE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Open this dataset if necessary, also checking for a weird case:
|
|
|
|
* a non-coordinate (and non-scalar) variable that has the same
|
|
|
|
* name as a dimension. */
|
|
|
|
if (var->hdf5_name && strlen(var->hdf5_name) >= strlen(NON_COORD_PREPEND) &&
|
|
|
|
strncmp(var->hdf5_name, NON_COORD_PREPEND, strlen(NON_COORD_PREPEND)) == 0 &&
|
|
|
|
var->ndims)
|
|
|
|
name_to_use = var->hdf5_name;
|
|
|
|
else
|
|
|
|
name_to_use = var->hdr.name;
|
|
|
|
if (!var->hdf_datasetid)
|
|
|
|
if ((var->hdf_datasetid = H5Dopen2(grp->hdf_grpid, name_to_use, H5P_DEFAULT)) < 0)
|
|
|
|
return NC_ENOTVAR;
|
|
|
|
|
|
|
|
/* Get file space of data. */
|
|
|
|
if ((file_spaceid = H5Dget_space(var->hdf_datasetid)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* Check to ensure the user selection is
|
|
|
|
* valid. H5Sget_simple_extent_dims gets the sizes of all the dims
|
|
|
|
* and put them in fdims. */
|
|
|
|
if (H5Sget_simple_extent_dims(file_spaceid, fdims, fmaxdims) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
#ifdef LOGGING
|
|
|
|
log_dim_info(var, fdims, fmaxdims, start, count);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Check dimension bounds. Remember that unlimited dimensions can
|
|
|
|
* put data beyond their current length. */
|
|
|
|
for (d2 = 0; d2 < var->ndims; d2++) {
|
|
|
|
hsize_t endindex = start[d2] + (stride[d2]*(count[d2]-1)); /* last index read */
|
|
|
|
dim = var->dim[d2];
|
|
|
|
assert(dim && dim->hdr.id == var->dimids[d2]);
|
|
|
|
if(count[d2] == 0)
|
|
|
|
endindex = start[d2]; /* fixup for zero read count */
|
|
|
|
if (dim->unlimited)
|
|
|
|
{
|
|
|
|
size_t ulen;
|
|
|
|
|
|
|
|
/* We can't go beyond the largest current extent of
|
|
|
|
the unlimited dim. */
|
|
|
|
if ((retval = NC4_inq_dim(ncid, dim->hdr.id, NULL, &ulen)))
|
|
|
|
BAIL(retval);
|
|
|
|
|
|
|
|
/* Check for out of bound requests. */
|
|
|
|
#ifdef RELAX_COORD_BOUND
|
|
|
|
if (start[d2] > (hssize_t)ulen ||
|
|
|
|
(start[d2] == (hssize_t)ulen && count[d2] > 0))
|
|
|
|
#else
|
|
|
|
if (start[d2] >= (hssize_t)ulen && ulen > 0)
|
|
|
|
#endif
|
|
|
|
BAIL_QUIET(NC_EINVALCOORDS);
|
|
|
|
if (endindex >= ulen)
|
|
|
|
BAIL_QUIET(NC_EEDGE);
|
|
|
|
|
|
|
|
/* Things get a little tricky here. If we're getting
|
|
|
|
a GET request beyond the end of this var's
|
|
|
|
current length in an unlimited dimension, we'll
|
|
|
|
later need to return the fill value for the
|
|
|
|
variable. */
|
|
|
|
if (start[d2] >= (hssize_t)fdims[d2])
|
|
|
|
fill_value_size[d2] = count[d2];
|
|
|
|
else if (endindex >= fdims[d2])
|
|
|
|
fill_value_size[d2] = count[d2] - ((fdims[d2] - start[d2])/stride[d2]);
|
|
|
|
else
|
|
|
|
fill_value_size[d2] = 0;
|
|
|
|
count[d2] -= fill_value_size[d2];
|
|
|
|
if (fill_value_size[d2])
|
|
|
|
provide_fill++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Check for out of bound requests. */
|
|
|
|
#ifdef RELAX_COORD_BOUND
|
|
|
|
if (start[d2] > (hssize_t)fdims[d2] ||
|
|
|
|
(start[d2] == (hssize_t)fdims[d2] && count[d2] > 0))
|
|
|
|
#else
|
|
|
|
if (start[d2] >= (hssize_t)fdims[d2])
|
|
|
|
#endif
|
|
|
|
BAIL_QUIET(NC_EINVALCOORDS);
|
|
|
|
if (endindex >= fdims[d2])
|
|
|
|
BAIL_QUIET(NC_EEDGE);
|
|
|
|
|
|
|
|
/* Set the fill value boundary */
|
|
|
|
fill_value_size[d2] = count[d2];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Later on, we will need to know the size of this type in the
|
|
|
|
* file. */
|
|
|
|
assert(var->type_info->size);
|
|
|
|
file_type_size = var->type_info->size;
|
|
|
|
|
|
|
|
if (!no_read)
|
|
|
|
{
|
|
|
|
/* Now you would think that no one would be crazy enough to write
|
|
|
|
a scalar dataspace with one of the array function calls, but you
|
|
|
|
would be wrong. So let's check to see if the dataset is
|
|
|
|
scalar. If it is, we won't try to set up a hyperslab. */
|
|
|
|
if (H5Sget_simple_extent_type(file_spaceid) == H5S_SCALAR)
|
|
|
|
{
|
|
|
|
if ((mem_spaceid = H5Screate(H5S_SCALAR)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
scalar++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (H5Sselect_hyperslab(file_spaceid, H5S_SELECT_SET,
|
|
|
|
start, stride, count, NULL) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
/* Create a space for the memory, just big enough to hold the slab
|
|
|
|
we want. */
|
|
|
|
if ((mem_spaceid = H5Screate_simple(var->ndims, count, NULL)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fix bug when reading HDF5 files with variable of type
|
|
|
|
* fixed-length string. We need to make it look like a
|
|
|
|
* variable-length string, because that's all netCDF-4 data
|
|
|
|
* model supports, lacking anonymous dimensions. So
|
|
|
|
* variable-length strings are in allocated memory that user has
|
|
|
|
* to free, which we allocate here. */
|
|
|
|
if(var->type_info->nc_type_class == NC_STRING &&
|
|
|
|
H5Tget_size(var->type_info->hdf_typeid) > 1 &&
|
|
|
|
!H5Tis_variable_str(var->type_info->hdf_typeid)) {
|
|
|
|
hsize_t fstring_len;
|
|
|
|
|
|
|
|
if ((fstring_len = H5Tget_size(var->type_info->hdf_typeid)) == 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
if (!(*(char **)data = malloc(1 + fstring_len)))
|
|
|
|
BAIL(NC_ENOMEM);
|
|
|
|
bufr = *(char **)data;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef HDF5_CONVERT
|
|
|
|
/* Are we going to convert any data? (No converting of compound or
|
|
|
|
* opaque types.) */
|
|
|
|
if ((mem_nc_type != var->type_info->hdr.id || (var->type_info->hdr.id == NC_INT && is_long)) &&
|
|
|
|
mem_nc_type != NC_COMPOUND && mem_nc_type != NC_OPAQUE)
|
|
|
|
{
|
|
|
|
/* We must convert - allocate a buffer. */
|
|
|
|
need_to_convert++;
|
|
|
|
if (var->ndims)
|
|
|
|
for (d2 = 0; d2 < var->ndims; d2++)
|
|
|
|
len *= countp[d2];
|
|
|
|
LOG((4, "converting data for var %s type=%d len=%d", var->hdr.name,
|
|
|
|
var->type_info->hdr.id, len));
|
|
|
|
|
|
|
|
/* If we're reading, we need bufr to have enough memory to store
|
|
|
|
* the data in the file. If we're writing, we need bufr to be
|
|
|
|
* big enough to hold all the data in the file's type. */
|
|
|
|
if(len > 0)
|
|
|
|
if (!(bufr = malloc(len * file_type_size)))
|
|
|
|
BAIL(NC_ENOMEM);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif /* ifndef HDF5_CONVERT */
|
|
|
|
if(!bufr)
|
|
|
|
bufr = data;
|
|
|
|
|
|
|
|
/* Get the HDF type of the data in memory. */
|
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
if ((retval = nc4_get_hdf_typeid(h5, mem_nc_type, &mem_typeid,
|
|
|
|
var->type_info->endianness)))
|
|
|
|
BAIL(retval);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Create the data transfer property list. */
|
|
|
|
if ((xfer_plistid = H5Pcreate(H5P_DATASET_XFER)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
/* Apply the callback function which will detect range
|
|
|
|
* errors. Which one to call depends on the length of the
|
|
|
|
* destination buffer type. */
|
|
|
|
if (H5Pset_type_conv_cb(xfer_plistid, except_func, &range_error) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef USE_PARALLEL4
|
|
|
|
/* Set up parallel I/O, if needed. */
|
|
|
|
if ((retval = set_par_access(h5, var, xfer_plistid)))
|
|
|
|
BAIL(retval);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Read this hyperslab into memory. */
|
|
|
|
LOG((5, "About to H5Dread some data..."));
|
|
|
|
if (H5Dread(var->hdf_datasetid, var->type_info->native_hdf_typeid,
|
|
|
|
mem_spaceid, file_spaceid, xfer_plistid, bufr) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
#ifndef HDF5_CONVERT
|
|
|
|
/* Eventually the block below will go away. Right now it's
|
|
|
|
needed to support conversions between int/float, and range
|
|
|
|
checking converted data in the netcdf way. These features are
|
|
|
|
being added to HDF5 at the HDF5 World Hall of Coding right
|
|
|
|
now, by a staff of thousands of programming gnomes. */
|
|
|
|
if (need_to_convert)
|
|
|
|
{
|
|
|
|
if ((retval = nc4_convert_type(bufr, data, var->type_info->hdr.id, mem_nc_type,
|
|
|
|
len, &range_error, var->fill_value,
|
|
|
|
(h5->cmode & NC_CLASSIC_MODEL), 0, is_long)))
|
|
|
|
BAIL(retval);
|
|
|
|
|
|
|
|
/* For strict netcdf-3 rules, ignore erange errors between UBYTE
|
|
|
|
* and BYTE types. */
|
|
|
|
if ((h5->cmode & NC_CLASSIC_MODEL) &&
|
|
|
|
(var->type_info->hdr.id == NC_UBYTE || var->type_info->hdr.id == NC_BYTE) &&
|
|
|
|
(mem_nc_type == NC_UBYTE || mem_nc_type == NC_BYTE) &&
|
|
|
|
range_error)
|
|
|
|
range_error = 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* For strict netcdf-3 rules, ignore erange errors between UBYTE
|
|
|
|
* and BYTE types. */
|
|
|
|
if ((h5->cmode & NC_CLASSIC_MODEL) &&
|
|
|
|
(var->type_info->hdr.id == NC_UBYTE || var->type_info->hdr.id == NC_BYTE) &&
|
|
|
|
(mem_nc_type == NC_UBYTE || mem_nc_type == NC_BYTE) &&
|
|
|
|
range_error)
|
|
|
|
range_error = 0;
|
|
|
|
|
|
|
|
} /* endif ! no_read */
|
|
|
|
|
|
|
|
else {
|
|
|
|
#ifdef USE_PARALLEL4 /* Start block contributed by HDF group. */
|
|
|
|
/* For collective IO read, some processes may not have any element for reading.
|
|
|
|
Collective requires all processes to participate, so we use H5Sselect_none
|
|
|
|
for these processes. */
|
|
|
|
if(var->parallel_access == NC_COLLECTIVE) {
|
|
|
|
|
|
|
|
/* Create the data transfer property list. */
|
|
|
|
if ((xfer_plistid = H5Pcreate(H5P_DATASET_XFER)) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
if ((retval = set_par_access(h5, var, xfer_plistid)))
|
|
|
|
BAIL(retval);
|
|
|
|
|
|
|
|
if (H5Sselect_none(file_spaceid)<0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* Since no element will be selected, we just get the memory space the same as the file space.
|
|
|
|
*/
|
|
|
|
if((mem_spaceid = H5Dget_space(var->hdf_datasetid))<0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
if (H5Sselect_none(mem_spaceid)<0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* Read this hyperslab into memory. */
|
|
|
|
LOG((5, "About to H5Dread some data..."));
|
|
|
|
if (H5Dread(var->hdf_datasetid, var->type_info->native_hdf_typeid,
|
|
|
|
mem_spaceid, file_spaceid, xfer_plistid, bufr) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
#endif /* End ifdef USE_PARALLEL4 */
|
|
|
|
}
|
|
|
|
/* Now we need to fake up any further data that was asked for,
|
|
|
|
using the fill values instead. First skip past the data we
|
|
|
|
just read, if any. */
|
|
|
|
if (!scalar && provide_fill)
|
|
|
|
{
|
|
|
|
void *filldata;
|
|
|
|
size_t real_data_size = 0;
|
|
|
|
size_t fill_len;
|
|
|
|
|
|
|
|
/* Skip past the real data we've already read. */
|
|
|
|
if (!no_read)
|
|
|
|
for (real_data_size = file_type_size, d2 = 0; d2 < var->ndims; d2++)
|
|
|
|
real_data_size *= (count[d2] - start[d2]);
|
|
|
|
|
|
|
|
/* Get the fill value from the HDF5 variable. Memory will be
|
|
|
|
* allocated. */
|
|
|
|
if (get_fill_value(h5, var, &fillvalue) < 0)
|
|
|
|
BAIL(NC_EHDFERR);
|
|
|
|
|
|
|
|
/* How many fill values do we need? */
|
|
|
|
for (fill_len = 1, d2 = 0; d2 < var->ndims; d2++)
|
|
|
|
fill_len *= (fill_value_size[d2] ? fill_value_size[d2] : 1);
|
|
|
|
|
|
|
|
/* Copy the fill value into the rest of the data buffer. */
|
|
|
|
filldata = (char *)data + real_data_size;
|
|
|
|
for (i = 0; i < fill_len; i++)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (var->type_info->nc_type_class == NC_STRING)
|
|
|
|
{
|
|
|
|
if (*(char **)fillvalue)
|
|
|
|
{
|
|
|
|
if (!(*(char **)filldata = strdup(*(char **)fillvalue)))
|
|
|
|
BAIL(NC_ENOMEM);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
*(char **)filldata = NULL;
|
|
|
|
}
|
|
|
|
else if(var->type_info->nc_type_class == NC_VLEN) {
|
|
|
|
if(fillvalue) {
|
|
|
|
memcpy(filldata,fillvalue,file_type_size);
|
|
|
|
} else {
|
|
|
|
*(char **)filldata = NULL;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
memcpy(filldata, fillvalue, file_type_size);
|
|
|
|
filldata = (char *)filldata + file_type_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
exit:
|
|
|
|
#ifdef HDF5_CONVERT
|
|
|
|
if (mem_typeid > 0 && H5Tclose(mem_typeid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
#endif
|
|
|
|
if (file_spaceid > 0)
|
|
|
|
{
|
|
|
|
if (H5Sclose(file_spaceid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
if (mem_spaceid > 0)
|
|
|
|
{
|
|
|
|
if (H5Sclose(mem_spaceid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
if (xfer_plistid > 0)
|
|
|
|
{
|
|
|
|
if (H5Pclose(xfer_plistid) < 0)
|
|
|
|
BAIL2(NC_EHDFERR);
|
|
|
|
}
|
|
|
|
#ifndef HDF5_CONVERT
|
|
|
|
if (need_to_convert && bufr != NULL)
|
|
|
|
free(bufr);
|
|
|
|
#endif
|
|
|
|
if (xtend_size)
|
|
|
|
free(xtend_size);
|
|
|
|
if (fillvalue)
|
|
|
|
{
|
|
|
|
if (var->type_info->nc_type_class == NC_VLEN)
|
|
|
|
nc_free_vlen((nc_vlen_t *)fillvalue);
|
|
|
|
else if (var->type_info->nc_type_class == NC_STRING && *(char **)fillvalue)
|
|
|
|
free(*(char **)fillvalue);
|
|
|
|
free(fillvalue);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If there was an error return it, otherwise return any potential
|
|
|
|
range error value. If none, return NC_NOERR as usual.*/
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
if (range_error)
|
|
|
|
return NC_ERANGE;
|
|
|
|
return NC_NOERR;
|
|
|
|
}
|