netcdf-c/libsrc4/nc4var.c

1932 lines
62 KiB
C
Raw Normal View History

2018-12-07 05:56:42 +08:00
/* Copyright 2003-2018, University Corporation for Atmospheric
2018-07-19 21:05:55 +08:00
* Research. See COPYRIGHT file for copying and redistribution
* conditions.*/
2017-12-01 23:18:49 +08:00
/**
* @file
2018-07-19 21:05:55 +08:00
* @internal This file is part of netcdf-4, a netCDF-like interface
* for HDF5, or a HDF5 backend for netCDF, depending on your point of
* view. This file handles the NetCDF-4 variable functions.
*
* @author Ed Hartnett, Dennis Heimbigner, Ward Fisher
2017-12-01 23:18:49 +08:00
*/
2010-06-03 21:24:43 +08:00
2018-07-19 21:23:03 +08:00
#include "config.h"
2021-04-25 11:44:57 +08:00
#include "nc4internal.h"
2010-06-03 21:24:43 +08:00
#include "nc4dispatch.h"
#ifdef USE_HDF5
#include "hdf5internal.h"
#endif
2010-06-03 21:24:43 +08:00
#include <math.h>
Mostly revert the filter code to reduce its complexity of use. re: https://github.com/Unidata/netcdf-c/issues/1836 Revert the internal filter code to simplify it. From the user's point of view, the only visible changes should be: 1. The functions that convert text to filter specs have had their signature reverted and have been moved to netcdf_aux.h 2. Some filter API functions now return NC_ENOFILTER when inquiry is made about some filter. Internally,the dispatch table has been modified to get rid of the filter_actions entry and associated complex structures. It has been replaced with inq_var_filter_ids and inq_var_filter_info entries and the dispatch table version has been bumped to 3. Corresponding NOOP and NOTNC4 functions were added to libdispatch/dnotnc4.c. Also, the filter_action entries in dispatch tables were replaced for all dispatch code bases (HDF5, DAP2, etc). This should only impact UDF users. In the process, it became clear that the form of the filters field in NC_VAR_INFO_T was format dependent, so I converted it to be of type void* and pushed its management into the various dispatch code bases. Specifically libhdf5 and libnczarr now manage the filters field in their own way. The auxilliary functions for parsing textual filter specifications were moved to netcdf_aux.h and were renamed to the following: * ncaux_h5filterspec_parse * ncaux_h5filterspec_parselist * ncaux_h5filterspec_free * ncaux_h5filter_fix8 Misc. Other Changes: 1. Document NUG/filters.md updated to reflect the changes above. 2. All the old data types (structs and enums) used by filter_actions actions were deleted. The exception is the NC_H5_Filterspec because it is needed by ncaux_h5filterspec_parselist. 3. Clientside filters were removed -- another enhancement for which no-one ever asked. 4. The ability to remove filters was itself removed. 5. Some functionality needed by nczarr was moved from libhdf5 to libsrc4 e.g. nc4_find_default_chunksizes 6. All the filterx code was removed 7. ncfilter.h and nc4filter.c no longer used Misc. Unrelated Changes: 1. The nczarr_test makefile clean was leaving some directories; so add clean-local to take care of them.
2020-09-28 02:43:46 +08:00
/** @internal Default size for unlimited dim chunksize. */
#define DEFAULT_1D_UNLIM_SIZE (4096)
2021-08-29 14:30:17 +08:00
/* Define log_e for 10 and 2. Prefer constants defined in math.h,
* however, GCC environments can have hard time defining M_LN10/M_LN2
* despite finding math.h */
#ifndef M_LN10
# define M_LN10 2.30258509299404568402 /**< log_e 10 */
#endif /* M_LN10 */
#ifndef M_LN2
# define M_LN2 0.69314718055994530942 /**< log_e 2 */
#endif /* M_LN2 */
2021-09-01 18:13:31 +08:00
/** Used in quantize code. Number of explicit bits in significand for
* floats. Bits 0-22 of SP significands are explicit. Bit 23 is
* implicitly 1. Currently redundant with NC_QUANTIZE_MAX_FLOAT_NSB
2022-02-19 03:00:37 +08:00
* and with limits.h/climit (FLT_MANT_DIG-1) */
2021-09-01 18:13:31 +08:00
#define BIT_XPL_NBR_SGN_FLT (23)
/** Used in quantize code. Number of explicit bits in significand for
2022-02-19 03:00:37 +08:00
* doubles. Bits 0-51 of DP significands are explicit. Bit 52 is
* implicitly 1. Currently redundant with NC_QUANTIZE_MAX_DOUBLE_NSB
* and with limits.h/climit (DBL_MANT_DIG-1) */
#define BIT_XPL_NBR_SGN_DBL (52)
2021-08-29 14:30:17 +08:00
/** Pointer union for floating point and bitmask types. */
typedef union { /* ptr_unn */
2021-08-29 14:30:17 +08:00
float *fp;
double *dp;
unsigned int *ui32p;
unsigned long long *ui64p;
void *vp;
} ptr_unn;
2017-12-01 23:18:49 +08:00
/**
* @internal This is called by nc_get_var_chunk_cache(). Get chunk
* cache size for a variable.
*
* @param ncid File ID.
* @param varid Variable ID.
* @param sizep Gets size in bytes of cache.
* @param nelemsp Gets number of element slots in cache.
* @param preemptionp Gets cache swapping setting.
*
* @returns ::NC_NOERR No error.
* @returns ::NC_EBADID Bad ncid.
* @returns ::NC_ENOTVAR Invalid variable ID.
* @returns ::NC_ENOTNC4 Not a netCDF-4 file.
* @author Ed Hartnett
*/
2010-06-03 21:24:43 +08:00
int
2016-01-01 02:47:39 +08:00
NC4_get_var_chunk_cache(int ncid, int varid, size_t *sizep,
2017-12-01 23:18:49 +08:00
size_t *nelemsp, float *preemptionp)
2010-06-03 21:24:43 +08:00
{
NC *nc;
NC_GRP_INFO_T *grp;
NC_FILE_INFO_T *h5;
NC_VAR_INFO_T *var;
int retval;
/* Find info for this file and group, and set pointer to each. */
if ((retval = nc4_find_nc_grp_h5(ncid, &nc, &grp, &h5)))
return retval;
assert(nc && grp && h5);
/* Find the var. */
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,varid);
if(!var)
return NC_ENOTVAR;
assert(var && var->hdr.id == varid);
/* Give the user what they want. */
if (sizep)
Add support for setting HDF5 alignment property when creating a file re: https://github.com/Unidata/netcdf-c/issues/2177 re: https://github.com/Unidata/netcdf-c/pull/2178 Provide get/set functions to store global data alignment information and apply it when a file is created. The api is as follows: ```` int nc_set_alignment(int threshold, int alignment); int nc_get_alignment(int* thresholdp, int* alignmentp); ```` If defined, then for every file created opened after the call to nc_set_alignment, for every new variable added to the file, the most recently set threshold and alignment values will be applied to that variable. The nc_get_alignment function return the last values set by nc_set_alignment. If nc_set_alignment has not been called, then it returns the value 0 for both threshold and alignment. The alignment parameters are stored in the NCglobalstate object (see below) for use as needed. Repeated calls to nc_set_alignment will overwrite any existing values in NCglobalstate. The alignment parameters are applied in libhdf5/hdf5create.c and libhdf5/hdf5open.c The set/get alignment functions are defined in libsrc4/nc4internal.c. A test program was added as nc_test4/tst_alignment.c. ## Misc. Changes Unrelated to Alignment * The NCRCglobalstate type was renamed to NCglobalstate to indicate that it represented more general global state than just .rc data. It was also moved to nc4internal.h. This led to a large number of small changes: mostly renaming. The global state management functions were moved to nc4internal.c. * The global chunk cache variables have been moved into NCglobalstate. As warranted, other global state will be moved as well. * Some misc. problems with the nczarr performance tests were corrected.
2022-01-30 06:27:52 +08:00
*sizep = var->chunkcache.size;
if (nelemsp)
Add support for setting HDF5 alignment property when creating a file re: https://github.com/Unidata/netcdf-c/issues/2177 re: https://github.com/Unidata/netcdf-c/pull/2178 Provide get/set functions to store global data alignment information and apply it when a file is created. The api is as follows: ```` int nc_set_alignment(int threshold, int alignment); int nc_get_alignment(int* thresholdp, int* alignmentp); ```` If defined, then for every file created opened after the call to nc_set_alignment, for every new variable added to the file, the most recently set threshold and alignment values will be applied to that variable. The nc_get_alignment function return the last values set by nc_set_alignment. If nc_set_alignment has not been called, then it returns the value 0 for both threshold and alignment. The alignment parameters are stored in the NCglobalstate object (see below) for use as needed. Repeated calls to nc_set_alignment will overwrite any existing values in NCglobalstate. The alignment parameters are applied in libhdf5/hdf5create.c and libhdf5/hdf5open.c The set/get alignment functions are defined in libsrc4/nc4internal.c. A test program was added as nc_test4/tst_alignment.c. ## Misc. Changes Unrelated to Alignment * The NCRCglobalstate type was renamed to NCglobalstate to indicate that it represented more general global state than just .rc data. It was also moved to nc4internal.h. This led to a large number of small changes: mostly renaming. The global state management functions were moved to nc4internal.c. * The global chunk cache variables have been moved into NCglobalstate. As warranted, other global state will be moved as well. * Some misc. problems with the nczarr performance tests were corrected.
2022-01-30 06:27:52 +08:00
*nelemsp = var->chunkcache.nelems;
if (preemptionp)
Add support for setting HDF5 alignment property when creating a file re: https://github.com/Unidata/netcdf-c/issues/2177 re: https://github.com/Unidata/netcdf-c/pull/2178 Provide get/set functions to store global data alignment information and apply it when a file is created. The api is as follows: ```` int nc_set_alignment(int threshold, int alignment); int nc_get_alignment(int* thresholdp, int* alignmentp); ```` If defined, then for every file created opened after the call to nc_set_alignment, for every new variable added to the file, the most recently set threshold and alignment values will be applied to that variable. The nc_get_alignment function return the last values set by nc_set_alignment. If nc_set_alignment has not been called, then it returns the value 0 for both threshold and alignment. The alignment parameters are stored in the NCglobalstate object (see below) for use as needed. Repeated calls to nc_set_alignment will overwrite any existing values in NCglobalstate. The alignment parameters are applied in libhdf5/hdf5create.c and libhdf5/hdf5open.c The set/get alignment functions are defined in libsrc4/nc4internal.c. A test program was added as nc_test4/tst_alignment.c. ## Misc. Changes Unrelated to Alignment * The NCRCglobalstate type was renamed to NCglobalstate to indicate that it represented more general global state than just .rc data. It was also moved to nc4internal.h. This led to a large number of small changes: mostly renaming. The global state management functions were moved to nc4internal.c. * The global chunk cache variables have been moved into NCglobalstate. As warranted, other global state will be moved as well. * Some misc. problems with the nczarr performance tests were corrected.
2022-01-30 06:27:52 +08:00
*preemptionp = var->chunkcache.preemption;
return NC_NOERR;
2010-06-03 21:24:43 +08:00
}
2017-12-01 23:18:49 +08:00
/**
* @internal A wrapper for NC4_get_var_chunk_cache(), we need this
* version for fortran.
*
* @param ncid File ID.
* @param varid Variable ID.
* @param sizep Gets size in MB of cache.
2017-12-01 23:18:49 +08:00
* @param nelemsp Gets number of element slots in cache.
* @param preemptionp Gets cache swapping setting.
*
* @returns ::NC_NOERR No error.
* @returns ::NC_EBADID Bad ncid.
* @returns ::NC_ENOTVAR Invalid variable ID.
* @returns ::NC_ENOTNC4 Not a netCDF-4 file.
* @author Ed Hartnett
*/
2010-06-03 21:24:43 +08:00
int
2016-01-01 02:47:39 +08:00
nc_get_var_chunk_cache_ints(int ncid, int varid, int *sizep,
2017-12-01 23:18:49 +08:00
int *nelemsp, int *preemptionp)
2010-06-03 21:24:43 +08:00
{
size_t real_size, real_nelems;
float real_preemption;
int ret;
if ((ret = NC4_get_var_chunk_cache(ncid, varid, &real_size,
&real_nelems, &real_preemption)))
return ret;
if (sizep)
*sizep = real_size / MEGABYTE;
if (nelemsp)
*nelemsp = (int)real_nelems;
if(preemptionp)
*preemptionp = (int)(real_preemption * 100);
return NC_NOERR;
2010-06-03 21:24:43 +08:00
}
2017-12-01 23:18:49 +08:00
/**
* @internal Get all the information about a variable. Pass NULL for
* whatever you don't care about. This is the internal function called
* by nc_inq_var(), nc_inq_var_deflate(), nc_inq_var_fletcher32(),
* nc_inq_var_chunking(), nc_inq_var_chunking_ints(),
* nc_inq_var_fill(), nc_inq_var_endian(), nc_inq_var_filter(), and
* nc_inq_var_szip().
2017-12-01 23:18:49 +08:00
*
* @param ncid File ID.
* @param varid Variable ID.
* @param name Gets name.
* @param xtypep Gets type.
* @param ndimsp Gets number of dims.
* @param dimidsp Gets array of dim IDs.
* @param nattsp Gets number of attributes.
* @param shufflep Gets shuffle setting.
* @param deflatep Gets deflate setting.
* @param deflate_levelp Gets deflate level.
* @param fletcher32p Gets fletcher32 setting.
2020-03-08 20:38:44 +08:00
* @param storagep Gets storage setting.
2017-12-01 23:18:49 +08:00
* @param chunksizesp Gets chunksizes.
* @param no_fill Gets fill mode.
* @param fill_valuep Gets fill value.
* @param endiannessp Gets one of ::NC_ENDIAN_BIG ::NC_ENDIAN_LITTLE
* ::NC_ENDIAN_NATIVE
* @param idp Pointer to memory to store filter id.
* @param nparamsp Pointer to memory to store filter parameter count.
* @param params Pointer to vector of unsigned integers into which
* to store filter parameters.
*
* @returns ::NC_NOERR No error.
* @returns ::NC_EBADID Bad ncid.
* @returns ::NC_ENOTVAR Bad varid.
* @returns ::NC_ENOMEM Out of memory.
* @returns ::NC_EINVAL Invalid input.
* @author Ed Hartnett, Dennis Heimbigner
*/
2016-01-01 02:47:39 +08:00
int
NC4_inq_var_all(int ncid, int varid, char *name, nc_type *xtypep,
2017-12-01 23:18:49 +08:00
int *ndimsp, int *dimidsp, int *nattsp,
int *shufflep, int *deflatep, int *deflate_levelp,
2020-02-28 05:06:45 +08:00
int *fletcher32p, int *storagep, size_t *chunksizesp,
2017-12-01 23:18:49 +08:00
int *no_fill, void *fill_valuep, int *endiannessp,
unsigned int *idp, size_t *nparamsp, unsigned int *params)
2010-06-03 21:24:43 +08:00
{
NC_GRP_INFO_T *grp;
NC_FILE_INFO_T *h5;
NC_VAR_INFO_T *var;
int d;
int retval;
LOG((2, "%s: ncid 0x%x varid %d", __func__, ncid, varid));
/* Find info for this file and group, and set pointer to each. */
if ((retval = nc4_find_nc_grp_h5(ncid, NULL, &grp, &h5)))
return retval;
assert(grp && h5);
/* If the varid is -1, find the global atts and call it a day. */
if (varid == NC_GLOBAL && nattsp)
{
*nattsp = ncindexcount(grp->att);
return NC_NOERR;
}
/* Find the var. */
if (!(var = (NC_VAR_INFO_T *)ncindexith(grp->vars, varid)))
return NC_ENOTVAR;
assert(var && var->hdr.id == varid);
/* Copy the data to the user's data buffers. */
if (name)
strcpy(name, var->hdr.name);
if (xtypep)
*xtypep = var->type_info->hdr.id;
if (ndimsp)
*ndimsp = var->ndims;
if (dimidsp)
for (d = 0; d < var->ndims; d++)
dimidsp[d] = var->dimids[d];
if (nattsp)
*nattsp = ncindexcount(var->att);
2019-12-04 23:49:37 +08:00
/* Did the user want the chunksizes? */
2020-03-08 20:38:44 +08:00
if (var->storage == NC_CHUNKED && chunksizesp)
2019-12-04 23:49:37 +08:00
{
for (d = 0; d < var->ndims; d++)
{
chunksizesp[d] = var->chunksizes[d];
LOG((4, "chunksizesp[%d]=%d", d, chunksizesp[d]));
}
2019-12-04 23:49:37 +08:00
}
2019-12-04 23:49:37 +08:00
/* Did the user inquire about the storage? */
2020-02-28 05:06:45 +08:00
if (storagep)
2020-03-08 20:38:44 +08:00
*storagep = var->storage;
/* Filter stuff. */
Enhance/Fix filter support re: Discussion https://github.com/Unidata/netcdf-c/discussions/2214 The primary change is to support so-called "standard filters". A standard filter is one that is defined by the following netcdf-c API: ```` int nc_def_var_XXX(int ncid, int varid, size_t nparams, unsigned* params); int nc_inq_var_XXXX(int ncid, int varid, int* usefilterp, unsigned* params); ```` So for example, zstandard would be a standard filter by defining the functions *nc_def_var_zstandard* and *nc_inq_var_zstandard*. In order to define these functions, we need a new dispatch function: ```` int nc_inq_filter_avail(int ncid, unsigned filterid); ```` This function, combined with the existing filter API can be used to implement arbitrary standard filters using a simple code pattern. Note that I would have preferred that this function return a list of all available filters, but HDF5 does not support that functionality. So this PR implements the dispatch function and implements the following standard functions: + bzip2 + zstandard + blosc Specific test cases are also provided for HDF5 and NCZarr. Over time, other specific standard filters will be defined. ## Primary Changes * Add nc_inq_filter_avail() to netcdf-c API. * Add standard filter implementations to test use of *nc_inq_filter_avail*. * Bump the dispatch table version number and add to all the relevant dispatch tables (libsrc, libsrcp, etc). * Create a program to invoke nc_inq_filter_avail so that it is accessible to shell scripts. * Cleanup szip support to properly support szip when HDF5 is disabled. This involves detecting libsz separately from testing if HDF5 supports szip. * Integrate shuffle and fletcher32 into the existing filter API. This means that, for example, nc_def_var_fletcher32 is now a wrapper around nc_def_var_filter. * Extend the Codec defaulting to allow multiple default shared libraries. ## Misc. Changes * Modify configure.ac/CMakeLists.txt to look for the relevant libraries implementing standard filters. * Modify libnetcdf.settings to list available standard filters (including deflate and szip). * Add CMake test modules to locate libbz2 and libzstd. * Cleanup the HDF5 memory manager function use in the plugins. * remove unused file include//ncfilter.h * remove tests for the HDF5 memory operations e.g. H5allocate_memory. * Add flag to ncdump to force use of _Filter instead of _Deflate or _Shuffle or _Fletcher32. Used for testing.
2022-03-15 02:39:37 +08:00
if (shufflep) {
retval = nc_inq_var_filter_info(ncid,varid,H5Z_FILTER_SHUFFLE,0,NULL);
if(retval && retval != NC_ENOFILTER) return retval;
*shufflep = (retval == NC_NOERR?1:0);
}
if (fletcher32p) {
retval = nc_inq_var_filter_info(ncid,varid,H5Z_FILTER_FLETCHER32,0,NULL);
if(retval && retval != NC_ENOFILTER) return retval;
*fletcher32p = (retval == NC_NOERR?1:0);
}
Add support for multiple filters per variable. re: https://github.com/Unidata/netcdf-c/issues/1584 Support has been added for multiple filters per variable. This affects a number of components in netcdf. The new APIs are documented in NUG/filters.md. The primary changes are: * A set of new functions are provided (see __include/netcdf_filter.h__). - Obtain a list of the filters associated with a variable - Obtain the parameters for a specific filter. * The existing __nc_inq_var_filter__ function now returns info about the first defined filter. * The utilities (ncgen, ncdump, and nccopy) now support an extended format for specifying a sequence of filters. The general form is __<filter>|<filter>..._. * The ncdump **_Filter** attribute now dumps a list of all the filters associated with a variable using the above new format. * Filter specifications can now use a filter name instead of number for filters known to the netcdf library, which in turn is taken from the HDF5 filter registration page. * New errors are defined: NC_EFILTER and NC_ENOFILTER. The latter is returned if an attempt is made to access an unknown filter. * Internally, the dispatch table has been extended to add a function to handle all of the filter functions. * New, filter-related, tests were added to nc_test4. * A new plugin was added to the plugins directory to help with testing. Notes: 1. The shuffle and fletcher32 filters are not part of the multifilter system. Misc. changes: 1. A debug module was added to libhdf5 to help catch error locations.
2020-02-17 03:59:33 +08:00
if (deflatep)
return NC_EFILTER;
if (idp) {
return NC_EFILTER;
}
/* Fill value stuff. */
if (no_fill)
*no_fill = (int)var->no_fill;
/* Don't do a thing with fill_valuep if no_fill mode is set for
* this var, or if fill_valuep is NULL. */
if (!var->no_fill && fill_valuep)
{
/* Do we have a fill value for this var? */
if (var->fill_value)
Fix various problem around VLEN's re: https://github.com/Unidata/netcdf-c/issues/541 re: https://github.com/Unidata/netcdf-c/issues/1208 re: https://github.com/Unidata/netcdf-c/issues/2078 re: https://github.com/Unidata/netcdf-c/issues/2041 re: https://github.com/Unidata/netcdf-c/issues/2143 For a long time, there have been known problems with the management of complex types containing VLENs. This also involves the string type because it is stored as a VLEN of chars. This PR (mostly) fixes this problem. But note that it adds new functions to netcdf.h (see below) and this may require bumping the .so number. These new functions can be removed, if desired, in favor of functions in netcdf_aux.h, but netcdf.h seems the better place for them because they are intended as alternatives to the nc_free_vlen and nc_free_string functions already in netcdf.h. The term complex type refers to any type that directly or transitively references a VLEN type. So an array of VLENS, a compound with a VLEN field, and so on. In order to properly handle instances of these complex types, it is necessary to have function that can recursively walk instances of such types to perform various actions on them. The term "deep" is also used to mean recursive. At the moment, the two operations needed by the netcdf library are: * free'ing an instance of the complex type * copying an instance of the complex type. The current library does only shallow free and shallow copy of complex types. This means that only the top level is properly free'd or copied, but deep internal blocks in the instance are not touched. Note that the term "vector" will be used to mean a contiguous (in memory) sequence of instances of some type. Given an array with, say, dimensions 2 X 3 X 4, this will be stored in memory as a vector of length 2*3*4=24 instances. The use cases are primarily these. ## nc_get_vars Suppose one is reading a vector of instances using nc_get_vars (or nc_get_vara or nc_get_var, etc.). These functions will return the vector in the top-level memory provided. All interior blocks (form nested VLEN or strings) will have been dynamically allocated. After using this vector of instances, it is necessary to free (aka reclaim) the dynamically allocated memory, otherwise a memory leak occurs. So, the recursive reclaim function is used to walk the returned instance vector and do a deep reclaim of the data. Currently functions are defined in netcdf.h that are supposed to handle this: nc_free_vlen(), nc_free_vlens(), and nc_free_string(). Unfortunately, these functions only do a shallow free, so deeply nested instances are not properly handled by them. Note that internally, the provided data is immediately written so there is no need to copy it. But the caller may need to reclaim the data it passed into the function. ## nc_put_att Suppose one is writing a vector of instances as the data of an attribute using, say, nc_put_att. Internally, the incoming attribute data must be copied and stored so that changes/reclamation of the input data will not affect the attribute. Again, the code inside the netcdf library does only shallow copying rather than deep copy. As a result, one sees effects such as described in Github Issue https://github.com/Unidata/netcdf-c/issues/2143. Also, after defining the attribute, it may be necessary for the user to free the data that was provided as input to nc_put_att(). ## nc_get_att Suppose one is reading a vector of instances as the data of an attribute using, say, nc_get_att. Internally, the existing attribute data must be copied and returned to the caller, and the caller is responsible for reclaiming the returned data. Again, the code inside the netcdf library does only shallow copying rather than deep copy. So this can lead to memory leaks and errors because the deep data is shared between the library and the user. # Solution The solution is to build properly recursive reclaim and copy functions and use those as needed. These recursive functions are defined in libdispatch/dinstance.c and their signatures are defined in include/netcdf.h. For back compatibility, corresponding "ncaux_XXX" functions are defined in include/netcdf_aux.h. ```` int nc_reclaim_data(int ncid, nc_type xtypeid, void* memory, size_t count); int nc_reclaim_data_all(int ncid, nc_type xtypeid, void* memory, size_t count); int nc_copy_data(int ncid, nc_type xtypeid, const void* memory, size_t count, void* copy); int nc_copy_data_all(int ncid, nc_type xtypeid, const void* memory, size_t count, void** copyp); ```` There are two variants. The first two, nc_reclaim_data() and nc_copy_data(), assume the top-level vector is managed by the caller. For reclaim, this is so the user can use, for example, a statically allocated vector. For copy, it assumes the user provides the space into which the copy is stored. The second two, nc_reclaim_data_all() and nc_copy_data_all(), allows the functions to manage the top-level. So for nc_reclaim_data_all, the top level is assumed to be dynamically allocated and will be free'd by nc_reclaim_data_all(). The nc_copy_data_all() function will allocate the top level and return a pointer to it to the user. The user can later pass that pointer to nc_reclaim_data_all() to reclaim the instance(s). # Internal Changes The netcdf-c library internals are changed to use the proper reclaim and copy functions. It turns out that the places where these functions are needed is quite pervasive in the netcdf-c library code. Using these functions also allows some simplification of the code since the stdata and vldata fields of NC_ATT_INFO are no longer needed. Currently this is commented out using the SEPDATA \#define macro. When any bugs are largely fixed, all this code will be removed. # Known Bugs 1. There is still one known failure that has not been solved. All the failures revolve around some variant of this .cdl file. The proximate cause of failure is the use of a VLEN FillValue. ```` netcdf x { types: float(*) row_of_floats ; dimensions: m = 5 ; variables: row_of_floats ragged_array(m) ; row_of_floats ragged_array:_FillValue = {-999} ; data: ragged_array = {10, 11, 12, 13, 14}, {20, 21, 22, 23}, {30, 31, 32}, {40, 41}, _ ; } ```` When a solution is found, I will either add it to this PR or post a new PR. # Related Changes * Mark nc_free_vlen(s) as deprecated in favor of ncaux_reclaim_data. * Remove the --enable-unfixed-memory-leaks option. * Remove the NC_VLENS_NOTEST code that suppresses some vlen tests. * Document this change in docs/internal.md * Disable the tst_vlen_data test in ncdump/tst_nccopy4.sh. * Mark types as fixed size or not (transitively) to optimize the reclaim and copy functions. # Misc. Changes * Make Doxygen process libdispatch/daux.c * Make sure the NC_ATT_INFO_T.container field is set.
2022-01-09 09:30:00 +08:00
{
int xtype = var->type_info->hdr.id;
Improve performance of the nc_reclaim_data and nc_copy_data functions. re: Issue https://github.com/Unidata/netcdf-c/issues/2685 re: PR https://github.com/Unidata/netcdf-c/pull/2179 As noted in PR https://github.com/Unidata/netcdf-c/pull/2179, the old code did not allow for reclaiming instances of types, nor for properly copying them. That PR provided new functions capable of reclaiming/copying instances of arbitrary types. However, as noted by Issue https://github.com/Unidata/netcdf-c/issues/2685, using these most general functions resulted in a significant performance degradation, even for common cases. This PR attempts to mitigate the cost of using the general reclaim/copy functions in two ways. First, the previous functions operating at the top level by using ncid and typeid arguments. These functions were augmented with equivalent versions that used the netcdf-c library internal data structures to allow direct access to needed information. These new functions are used internally to the library. The second mitigation involves optimizing the internal functions by providing early tests for common cases. This avoids unnecessary recursive function calls. The overall result is a significant improvement in speed by a factor of roughly twenty -- your mileage may vary. These optimized functions are still not as fast as the original (more limited) functions, but they are getting close. Additional optimizations are possible. But the cost is a significant "uglification" of the code that I deemed a step too far, at least for now. ## Misc. Changes 1. Added a test case to check the proper reclamation/copy of complex types. 2. Found and fixed some places where nc_reclaim/copy should have been used. 3. Replaced, in the netcdf-c library, (almost all) occurrences of nc_reclaim_copy with calls to NC_reclaim/copy. This plus the optimizations is the primary speed-up mechanism. 4. In DAP4, the metadata is held in a substrate in-memory file; this required some changes so that the reclaim/copy code accessed that substrate dispatcher rather than the DAP4 dispatcher. 5. Re-factored and isolated the code that computes if a type is (transitively) variable-sized or not. 6. Clean up the reclamation code in ncgen; adding the use of nc_reclaim exposed some memory problems.
2023-05-21 07:11:25 +08:00
if((retval = NC_copy_data(h5->controller,xtype,var->fill_value,1,fill_valuep))) return retval;
Fix various problem around VLEN's re: https://github.com/Unidata/netcdf-c/issues/541 re: https://github.com/Unidata/netcdf-c/issues/1208 re: https://github.com/Unidata/netcdf-c/issues/2078 re: https://github.com/Unidata/netcdf-c/issues/2041 re: https://github.com/Unidata/netcdf-c/issues/2143 For a long time, there have been known problems with the management of complex types containing VLENs. This also involves the string type because it is stored as a VLEN of chars. This PR (mostly) fixes this problem. But note that it adds new functions to netcdf.h (see below) and this may require bumping the .so number. These new functions can be removed, if desired, in favor of functions in netcdf_aux.h, but netcdf.h seems the better place for them because they are intended as alternatives to the nc_free_vlen and nc_free_string functions already in netcdf.h. The term complex type refers to any type that directly or transitively references a VLEN type. So an array of VLENS, a compound with a VLEN field, and so on. In order to properly handle instances of these complex types, it is necessary to have function that can recursively walk instances of such types to perform various actions on them. The term "deep" is also used to mean recursive. At the moment, the two operations needed by the netcdf library are: * free'ing an instance of the complex type * copying an instance of the complex type. The current library does only shallow free and shallow copy of complex types. This means that only the top level is properly free'd or copied, but deep internal blocks in the instance are not touched. Note that the term "vector" will be used to mean a contiguous (in memory) sequence of instances of some type. Given an array with, say, dimensions 2 X 3 X 4, this will be stored in memory as a vector of length 2*3*4=24 instances. The use cases are primarily these. ## nc_get_vars Suppose one is reading a vector of instances using nc_get_vars (or nc_get_vara or nc_get_var, etc.). These functions will return the vector in the top-level memory provided. All interior blocks (form nested VLEN or strings) will have been dynamically allocated. After using this vector of instances, it is necessary to free (aka reclaim) the dynamically allocated memory, otherwise a memory leak occurs. So, the recursive reclaim function is used to walk the returned instance vector and do a deep reclaim of the data. Currently functions are defined in netcdf.h that are supposed to handle this: nc_free_vlen(), nc_free_vlens(), and nc_free_string(). Unfortunately, these functions only do a shallow free, so deeply nested instances are not properly handled by them. Note that internally, the provided data is immediately written so there is no need to copy it. But the caller may need to reclaim the data it passed into the function. ## nc_put_att Suppose one is writing a vector of instances as the data of an attribute using, say, nc_put_att. Internally, the incoming attribute data must be copied and stored so that changes/reclamation of the input data will not affect the attribute. Again, the code inside the netcdf library does only shallow copying rather than deep copy. As a result, one sees effects such as described in Github Issue https://github.com/Unidata/netcdf-c/issues/2143. Also, after defining the attribute, it may be necessary for the user to free the data that was provided as input to nc_put_att(). ## nc_get_att Suppose one is reading a vector of instances as the data of an attribute using, say, nc_get_att. Internally, the existing attribute data must be copied and returned to the caller, and the caller is responsible for reclaiming the returned data. Again, the code inside the netcdf library does only shallow copying rather than deep copy. So this can lead to memory leaks and errors because the deep data is shared between the library and the user. # Solution The solution is to build properly recursive reclaim and copy functions and use those as needed. These recursive functions are defined in libdispatch/dinstance.c and their signatures are defined in include/netcdf.h. For back compatibility, corresponding "ncaux_XXX" functions are defined in include/netcdf_aux.h. ```` int nc_reclaim_data(int ncid, nc_type xtypeid, void* memory, size_t count); int nc_reclaim_data_all(int ncid, nc_type xtypeid, void* memory, size_t count); int nc_copy_data(int ncid, nc_type xtypeid, const void* memory, size_t count, void* copy); int nc_copy_data_all(int ncid, nc_type xtypeid, const void* memory, size_t count, void** copyp); ```` There are two variants. The first two, nc_reclaim_data() and nc_copy_data(), assume the top-level vector is managed by the caller. For reclaim, this is so the user can use, for example, a statically allocated vector. For copy, it assumes the user provides the space into which the copy is stored. The second two, nc_reclaim_data_all() and nc_copy_data_all(), allows the functions to manage the top-level. So for nc_reclaim_data_all, the top level is assumed to be dynamically allocated and will be free'd by nc_reclaim_data_all(). The nc_copy_data_all() function will allocate the top level and return a pointer to it to the user. The user can later pass that pointer to nc_reclaim_data_all() to reclaim the instance(s). # Internal Changes The netcdf-c library internals are changed to use the proper reclaim and copy functions. It turns out that the places where these functions are needed is quite pervasive in the netcdf-c library code. Using these functions also allows some simplification of the code since the stdata and vldata fields of NC_ATT_INFO are no longer needed. Currently this is commented out using the SEPDATA \#define macro. When any bugs are largely fixed, all this code will be removed. # Known Bugs 1. There is still one known failure that has not been solved. All the failures revolve around some variant of this .cdl file. The proximate cause of failure is the use of a VLEN FillValue. ```` netcdf x { types: float(*) row_of_floats ; dimensions: m = 5 ; variables: row_of_floats ragged_array(m) ; row_of_floats ragged_array:_FillValue = {-999} ; data: ragged_array = {10, 11, 12, 13, 14}, {20, 21, 22, 23}, {30, 31, 32}, {40, 41}, _ ; } ```` When a solution is found, I will either add it to this PR or post a new PR. # Related Changes * Mark nc_free_vlen(s) as deprecated in favor of ncaux_reclaim_data. * Remove the --enable-unfixed-memory-leaks option. * Remove the NC_VLENS_NOTEST code that suppresses some vlen tests. * Document this change in docs/internal.md * Disable the tst_vlen_data test in ncdump/tst_nccopy4.sh. * Mark types as fixed size or not (transitively) to optimize the reclaim and copy functions. # Misc. Changes * Make Doxygen process libdispatch/daux.c * Make sure the NC_ATT_INFO_T.container field is set.
2022-01-09 09:30:00 +08:00
}
else
{
if ((retval = nc4_get_default_fill_value(var->type_info, fill_valuep)))
return retval;
}
}
/* Does the user want the endianness of this variable? */
if (endiannessp)
This PR adds EXPERIMENTAL support for accessing data in the cloud using a variant of the Zarr protocol and storage format. This enhancement is generically referred to as "NCZarr". The data model supported by NCZarr is netcdf-4 minus the user-defined types and the String type. In this sense it is similar to the CDF-5 data model. More detailed information about enabling and using NCZarr is described in the document NUG/nczarr.md and in a [Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in). WARNING: this code has had limited testing, so do use this version for production work. Also, performance improvements are ongoing. Note especially the following platform matrix of successful tests: Platform | Build System | S3 support ------------------------------------ Linux+gcc | Automake | yes Linux+gcc | CMake | yes Visual Studio | CMake | no Additionally, and as a consequence of the addition of NCZarr, major changes have been made to the Filter API. NOTE: NCZarr does not yet support filters, but these changes are enablers for that support in the future. Note that it is possible (probable?) that there will be some accidental reversions if the changes here did not correctly mimic the existing filter testing. In any case, previously filter ids and parameters were of type unsigned int. In order to support the more general zarr filter model, this was all converted to char*. The old HDF5-specific, unsigned int operations are still supported but they are wrappers around the new, char* based nc_filterx_XXX functions. This entailed at least the following changes: 1. Added the files libdispatch/dfilterx.c and include/ncfilter.h 2. Some filterx utilities have been moved to libdispatch/daux.c 3. A new entry, "filter_actions" was added to the NCDispatch table and the version bumped. 4. An overly complex set of structs was created to support funnelling all of the filterx operations thru a single dispatch "filter_actions" entry. 5. Move common code to from libhdf5 to libsrc4 so that it is accessible to nczarr. Changes directly related to Zarr: 1. Modified CMakeList.txt and configure.ac to support both C and C++ -- this is in support of S3 support via the awd-sdk libraries. 2. Define a size64_t type to support nczarr. 3. More reworking of libdispatch/dinfermodel.c to support zarr and to regularize the structure of the fragments section of a URL. Changes not directly related to Zarr: 1. Make client-side filter registration be conditional, with default off. 2. Hack include/nc4internal.h to make some flags added by Ed be unique: e.g. NC_CREAT, NC_INDEF, etc. 3. cleanup include/nchttp.h and libdispatch/dhttp.c. 4. Misc. changes to support compiling under Visual Studio including: * Better testing under windows for dirent.h and opendir and closedir. 5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags and to centralize error reporting. 6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them. 7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible. Changes Left TO-DO: 1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
*endiannessp = var->endianness;
return NC_NOERR;
2010-06-03 21:24:43 +08:00
}
2017-12-01 23:18:49 +08:00
/**
* @internal Inquire about chunking settings for a var. This is used
* by the fortran API.
*
* @param ncid File ID.
* @param varid Variable ID.
2020-02-28 05:06:45 +08:00
* @param storagep Gets contiguous setting.
2017-12-01 23:18:49 +08:00
* @param chunksizesp Gets chunksizes.
*
* @returns ::NC_NOERR No error.
* @returns ::NC_EBADID Bad ncid.
* @returns ::NC_ENOTVAR Invalid variable ID.
* @returns ::NC_EINVAL Invalid input
* @returns ::NC_ENOMEM Out of memory.
* @author Ed Hartnett
*/
2010-06-03 21:24:43 +08:00
int
2020-02-28 05:06:45 +08:00
nc_inq_var_chunking_ints(int ncid, int varid, int *storagep, int *chunksizesp)
2010-06-03 21:24:43 +08:00
{
NC_VAR_INFO_T *var;
size_t *cs = NULL;
int i, retval;
/* Get pointer to the var. */
if ((retval = nc4_find_grp_h5_var(ncid, varid, NULL, NULL, &var)))
return retval;
assert(var);
/* Allocate space for the size_t copy of the chunksizes array. */
if (var->ndims)
if (!(cs = malloc(var->ndims * sizeof(size_t))))
return NC_ENOMEM;
/* Call the netcdf-4 version directly. */
retval = NC4_inq_var_all(ncid, varid, NULL, NULL, NULL, NULL, NULL,
2020-02-28 05:06:45 +08:00
NULL, NULL, NULL, NULL, storagep, cs, NULL,
NULL, NULL, NULL, NULL, NULL);
/* Copy from size_t array. */
2020-03-08 21:13:07 +08:00
if (!retval && chunksizesp && var->storage == NC_CHUNKED)
{
for (i = 0; i < var->ndims; i++)
{
chunksizesp[i] = (int)cs[i];
if (cs[i] > NC_MAX_INT)
retval = NC_ERANGE;
}
}
if (var->ndims)
free(cs);
return retval;
2010-06-03 21:24:43 +08:00
}
2017-12-01 23:18:49 +08:00
/**
* @internal Find the ID of a variable, from the name. This function
* is called by nc_inq_varid().
*
* @param ncid File ID.
* @param name Name of the variable.
* @param varidp Gets variable ID.
* @returns ::NC_NOERR No error.
* @returns ::NC_EBADID Bad ncid.
* @returns ::NC_ENOTVAR Bad variable ID.
*/
2010-06-03 21:24:43 +08:00
int
NC4_inq_varid(int ncid, const char *name, int *varidp)
{
NC *nc;
NC_GRP_INFO_T *grp;
NC_VAR_INFO_T *var;
char norm_name[NC_MAX_NAME + 1];
int retval;
if (!name)
return NC_EINVAL;
if (!varidp)
return NC_NOERR;
LOG((2, "%s: ncid 0x%x name %s", __func__, ncid, name));
/* Find info for this file and group, and set pointer to each. */
if ((retval = nc4_find_nc_grp_h5(ncid, &nc, &grp, NULL)))
return retval;
/* Normalize name. */
if ((retval = nc4_normalize_name(name, norm_name)))
return retval;
/* Find var of this name. */
var = (NC_VAR_INFO_T*)ncindexlookup(grp->vars,norm_name);
if(var)
{
*varidp = var->hdr.id;
return NC_NOERR;
}
return NC_ENOTVAR;
2010-06-03 21:24:43 +08:00
}
2017-12-01 23:18:49 +08:00
/**
* @internal
*
* This function will change the parallel access of a variable from
* independent to collective.
*
* @param ncid File ID.
* @param varid Variable ID.
* @param par_access NC_COLLECTIVE or NC_INDEPENDENT.
*
* @returns ::NC_NOERR No error.
* @returns ::NC_EBADID Invalid ncid passed.
* @returns ::NC_ENOTVAR Invalid varid passed.
* @returns ::NC_ENOPAR LFile was not opened with nc_open_par/nc_create_par.
* @returns ::NC_EINVAL Invalid par_access specified.
2017-12-01 23:18:49 +08:00
* @returns ::NC_NOERR for success
* @author Ed Hartnett, Dennis Heimbigner
2017-12-01 23:18:49 +08:00
*/
2010-06-03 21:24:43 +08:00
int
2016-01-01 02:47:39 +08:00
NC4_var_par_access(int ncid, int varid, int par_access)
2010-06-03 21:24:43 +08:00
{
2015-08-16 06:26:35 +08:00
#ifndef USE_PARALLEL4
NC_UNUSED(ncid);
NC_UNUSED(varid);
NC_UNUSED(par_access);
return NC_ENOPAR;
2010-06-03 21:24:43 +08:00
#else
NC *nc;
NC_GRP_INFO_T *grp;
NC_FILE_INFO_T *h5;
NC_VAR_INFO_T *var;
int retval;
LOG((1, "%s: ncid 0x%x varid %d par_access %d", __func__, ncid,
varid, par_access));
if (par_access != NC_INDEPENDENT && par_access != NC_COLLECTIVE)
return NC_EINVAL;
/* Find info for this file and group, and set pointer to each. */
if ((retval = nc4_find_nc_grp_h5(ncid, &nc, &grp, &h5)))
return retval;
/* This function only for files opened with nc_open_par or nc_create_par. */
if (!h5->parallel)
return NC_ENOPAR;
/* Find the var, and set its preference. */
var = (NC_VAR_INFO_T*)ncindexith(grp->vars,varid);
if (!var) return NC_ENOTVAR;
assert(var->hdr.id == varid);
/* If zlib, shuffle, or fletcher32 filters are in use, then access
* must be collective. Fail an attempt to set such a variable to
* independent access. */
2022-03-28 06:33:41 +08:00
if (nclistlength((NClist*)var->filters) > 0 &&
par_access == NC_INDEPENDENT)
return NC_EINVAL;
if (par_access)
var->parallel_access = NC_COLLECTIVE;
else
var->parallel_access = NC_INDEPENDENT;
return NC_NOERR;
2015-08-16 06:26:35 +08:00
#endif /* USE_PARALLEL4 */
2010-06-03 21:24:43 +08:00
}
2018-11-22 05:49:52 +08:00
2021-08-29 22:42:21 +08:00
/**
* @internal Copy data from one buffer to another, performing
2021-09-01 18:29:24 +08:00
* appropriate data conversion.
*
* This function will copy data from one buffer to another, in
* accordance with the types. Range errors will be noted, and the fill
* value used (or the default fill value if none is supplied) for
* values that overflow the type.
2021-08-29 22:42:21 +08:00
*
* This function applies quantization to float and double data, if
2022-02-19 03:00:37 +08:00
* desired. The code to do this is derived from the corresponding
* filter in the CCR project (e.g.,
2021-08-29 22:42:21 +08:00
* https://github.com/ccr/ccr/blob/master/hdf5_plugins/BITGROOM/src/H5Zbitgroom.c).
*
* @param src Pointer to source of data.
* @param dest Pointer that gets data.
* @param src_type Type ID of source data.
* @param dest_type Type ID of destination data.
* @param len Number of elements of data to copy.
* @param range_error Pointer that gets 1 if there was a range error.
* @param fill_value The fill value.
* @param strict_nc3 Non-zero if strict model in effect.
2022-02-19 03:00:37 +08:00
* @param quantize_mode May be ::NC_NOQUANTIZE, ::NC_QUANTIZE_BITGROOM,
* ::NC_QUANTIZE_GRANULARBR, or ::NC_QUANTIZE_BITROUND.
* @param nsd Number of significant digits for quantize. Ignored
* unless quantize_mode is ::NC_QUANTIZE_BITGROOM,
* ::NC_QUANTIZE_GRANULARBR, or ::NC_QUANTIZE_BITROUND
*
2021-08-29 22:42:21 +08:00
* @returns ::NC_NOERR No error.
* @returns ::NC_EBADTYPE Type not found.
* @author Ed Hartnett, Dennis Heimbigner
*/
int
2021-09-01 18:29:24 +08:00
nc4_convert_type(const void *src, void *dest, const nc_type src_type,
2021-08-29 22:42:21 +08:00
const nc_type dest_type, const size_t len, int *range_error,
const void *fill_value, int strict_nc3, int quantize_mode,
int nsd)
{
2021-09-01 18:29:24 +08:00
/* These vars are used with quantize feature. */
const double bit_per_dgt = M_LN10 / M_LN2; /* 3.32 [frc] Bits per decimal digit of precision = log2(10) */
const double dgt_per_bit= M_LN2 / M_LN10; /* 0.301 [frc] Decimal digits per bit of precision = log10(2) */
2021-10-22 02:04:18 +08:00
double mnt; /* [frc] Mantissa, 0.5 <= mnt < 1.0 */
double mnt_fabs; /* [frc] fabs(mantissa) */
double mnt_log10_fabs; /* [frc] log10(fabs(mantissa))) */
double val; /* [frc] Copy of input value to avoid indirection */
2021-08-29 22:42:21 +08:00
double mss_val_cmp_dbl; /* Missing value for comparison to double precision values */
float mss_val_cmp_flt; /* Missing value for comparison to single precision values */
int bit_xpl_nbr_zro; /* [nbr] Number of explicit bits to zero */
int dgt_nbr; /* [nbr] Number of digits before decimal point */
int qnt_pwr; /* [nbr] Power of two in quantization mask: qnt_msk = 2^qnt_pwr */
int xpn_bs2; /* [nbr] Binary exponent xpn_bs2 in val = sign(val) * 2^xpn_bs2 * mnt, 0.5 < mnt <= 1.0 */
2021-08-29 22:42:21 +08:00
size_t idx;
unsigned int *u32_ptr;
unsigned int msk_f32_u32_zro;
unsigned int msk_f32_u32_one;
unsigned int msk_f32_u32_hshv;
2021-08-29 22:42:21 +08:00
unsigned long long int *u64_ptr;
unsigned long long int msk_f64_u64_zro;
unsigned long long int msk_f64_u64_one;
unsigned long long int msk_f64_u64_hshv;
2021-08-29 22:42:21 +08:00
unsigned short prc_bnr_xpl_rqr; /* [nbr] Explicitly represented binary digits required to retain */
ptr_unn op1; /* I/O [frc] Values to quantize */
2021-09-01 18:29:24 +08:00
char *cp, *cp1;
float *fp, *fp1;
double *dp, *dp1;
int *ip, *ip1;
short *sp, *sp1;
signed char *bp, *bp1;
unsigned char *ubp, *ubp1;
unsigned short *usp, *usp1;
unsigned int *uip, *uip1;
long long *lip, *lip1;
unsigned long long *ulip, *ulip1;
size_t count = 0;
*range_error = 0;
LOG((3, "%s: len %d src_type %d dest_type %d", __func__, len, src_type,
dest_type));
2021-09-01 18:29:24 +08:00
/* If quantize is in use, set up some values. Quantize can only be
* used when the destination type is NC_FLOAT or NC_DOUBLE. */
if (quantize_mode != NC_NOQUANTIZE)
{
2021-09-01 18:29:24 +08:00
assert(dest_type == NC_FLOAT || dest_type == NC_DOUBLE);
2022-02-19 03:00:37 +08:00
/* Parameters shared by all quantization codecs */
2021-09-01 18:29:24 +08:00
if (dest_type == NC_FLOAT)
{
2021-09-01 18:29:24 +08:00
/* Determine the fill value. */
if (fill_value)
mss_val_cmp_flt = *(float *)fill_value;
2021-09-01 18:29:24 +08:00
else
mss_val_cmp_flt = NC_FILL_FLOAT;
2021-09-01 18:29:24 +08:00
}
2021-09-01 18:29:24 +08:00
else
{
/* Determine the fill value. */
2021-09-01 18:29:24 +08:00
if (fill_value)
mss_val_cmp_dbl = *(double *)fill_value;
2021-09-01 18:29:24 +08:00
else
mss_val_cmp_dbl = NC_FILL_DOUBLE;
2021-09-01 18:29:24 +08:00
}
2021-09-01 18:29:24 +08:00
2022-02-19 03:00:37 +08:00
/* Set parameters used by BitGroom and BitRound here, outside value loop.
Equivalent parameters used by GranularBR are set inside value loop,
since keep bits and thus masks can change for every value. */
if (quantize_mode == NC_QUANTIZE_BITGROOM ||
quantize_mode == NC_QUANTIZE_BITROUND )
{
2022-02-19 03:00:37 +08:00
if (quantize_mode == NC_QUANTIZE_BITGROOM){
/* BitGroom interprets nsd as number of significant decimal digits
* Must convert that to number of significant bits to preserve
* How many bits to preserve? Being conservative, we round up the
* exact binary digits of precision. Add one because the first bit
* is implicit not explicit but corner cases prevent our taking
* advantage of this. */
prc_bnr_xpl_rqr = (unsigned short)ceil(nsd * bit_per_dgt) + 1;
2022-02-19 03:00:37 +08:00
}else if (quantize_mode == NC_QUANTIZE_BITROUND){
2022-02-19 03:00:37 +08:00
/* BitRound interprets nsd as number of significant binary digits (bits) */
prc_bnr_xpl_rqr = nsd;
}
if (dest_type == NC_FLOAT)
{
bit_xpl_nbr_zro = BIT_XPL_NBR_SGN_FLT - prc_bnr_xpl_rqr;
/* Create mask */
msk_f32_u32_zro = 0u; /* Zero all bits */
msk_f32_u32_zro = ~msk_f32_u32_zro; /* Turn all bits to ones */
2022-02-19 03:00:37 +08:00
/* BitShave mask for AND: Left shift zeros into bits to be
* rounded, leave ones in untouched bits. */
msk_f32_u32_zro <<= bit_xpl_nbr_zro;
2022-02-19 03:00:37 +08:00
/* BitSet mask for OR: Put ones into bits to be set, zeros in
* untouched bits. */
msk_f32_u32_one = ~msk_f32_u32_zro;
2022-02-19 03:00:37 +08:00
/* BitRound mask for ADD: Set one bit: the MSB of LSBs */
msk_f32_u32_hshv=msk_f32_u32_one & (msk_f32_u32_zro >> 1);
}
else
{
bit_xpl_nbr_zro = BIT_XPL_NBR_SGN_DBL - prc_bnr_xpl_rqr;
/* Create mask. */
msk_f64_u64_zro = 0ul; /* Zero all bits. */
msk_f64_u64_zro = ~msk_f64_u64_zro; /* Turn all bits to ones. */
2022-02-19 03:00:37 +08:00
/* BitShave mask for AND: Left shift zeros into bits to be
* rounded, leave ones in untouched bits. */
msk_f64_u64_zro <<= bit_xpl_nbr_zro;
2022-02-19 03:00:37 +08:00
/* BitSet mask for OR: Put ones into bits to be set, zeros in
* untouched bits. */
msk_f64_u64_one =~ msk_f64_u64_zro;
2022-02-19 03:00:37 +08:00
/* BitRound mask for ADD: Set one bit: the MSB of LSBs */
msk_f64_u64_hshv = msk_f64_u64_one & (msk_f64_u64_zro >> 1);
}
}
} /* endif quantize */
/* OK, this is ugly. If you can think of anything better, I'm open
to suggestions!
Note that we don't use a default fill value for type
NC_BYTE. This is because Lord Voldemort cast a nofilleramous spell
at Harry Potter, but it bounced off his scar and hit the netcdf-4
code.
*/
switch (src_type)
{
case NC_CHAR:
switch (dest_type)
{
case NC_CHAR:
for (cp = (char *)src, cp1 = dest; count < len; count++)
*cp1++ = *cp++;
break;
default:
LOG((0, "%s: Unknown destination type.", __func__));
}
break;
case NC_BYTE:
switch (dest_type)
{
case NC_BYTE:
for (bp = (signed char *)src, bp1 = dest; count < len; count++)
*bp1++ = *bp++;
break;
case NC_UBYTE:
for (bp = (signed char *)src, ubp = dest; count < len; count++)
{
if (*bp < 0)
(*range_error)++;
*ubp++ = (unsigned char)*bp++;
}
break;
case NC_SHORT:
for (bp = (signed char *)src, sp = dest; count < len; count++)
*sp++ = *bp++;
break;
case NC_USHORT:
for (bp = (signed char *)src, usp = dest; count < len; count++)
{
if (*bp < 0)
(*range_error)++;
*usp++ = (unsigned short)*bp++;
}
break;
case NC_INT:
for (bp = (signed char *)src, ip = dest; count < len; count++)
*ip++ = *bp++;
break;
case NC_UINT:
for (bp = (signed char *)src, uip = dest; count < len; count++)
{
if (*bp < 0)
(*range_error)++;
*uip++ = (unsigned int)*bp++;
}
break;
case NC_INT64:
for (bp = (signed char *)src, lip = dest; count < len; count++)
*lip++ = *bp++;
break;
case NC_UINT64:
for (bp = (signed char *)src, ulip = dest; count < len; count++)
{
if (*bp < 0)
(*range_error)++;
*ulip++ = (unsigned long long)*bp++;
}
break;
case NC_FLOAT:
2021-08-25 15:31:26 +08:00
for (bp = (signed char *)src, fp = dest; count < len; count++)
*fp++ = *bp++;
break;
case NC_DOUBLE:
for (bp = (signed char *)src, dp = dest; count < len; count++)
*dp++ = *bp++;
break;
default:
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
__func__, src_type, dest_type));
return NC_EBADTYPE;
}
break;
case NC_UBYTE:
switch (dest_type)
{
case NC_BYTE:
for (ubp = (unsigned char *)src, bp = dest; count < len; count++)
{
if (!strict_nc3 && *ubp > X_SCHAR_MAX)
(*range_error)++;
*bp++ = (signed char)*ubp++;
}
break;
case NC_SHORT:
for (ubp = (unsigned char *)src, sp = dest; count < len; count++)
*sp++ = *ubp++;
break;
case NC_UBYTE:
for (ubp = (unsigned char *)src, ubp1 = dest; count < len; count++)
*ubp1++ = *ubp++;
break;
case NC_USHORT:
for (ubp = (unsigned char *)src, usp = dest; count < len; count++)
*usp++ = *ubp++;
break;
case NC_INT:
for (ubp = (unsigned char *)src, ip = dest; count < len; count++)
*ip++ = *ubp++;
break;
case NC_UINT:
for (ubp = (unsigned char *)src, uip = dest; count < len; count++)
*uip++ = *ubp++;
break;
case NC_INT64:
for (ubp = (unsigned char *)src, lip = dest; count < len; count++)
*lip++ = *ubp++;
break;
case NC_UINT64:
for (ubp = (unsigned char *)src, ulip = dest; count < len; count++)
*ulip++ = *ubp++;
break;
case NC_FLOAT:
for (ubp = (unsigned char *)src, fp = dest; count < len; count++)
*fp++ = *ubp++;
break;
case NC_DOUBLE:
for (ubp = (unsigned char *)src, dp = dest; count < len; count++)
*dp++ = *ubp++;
break;
default:
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
__func__, src_type, dest_type));
return NC_EBADTYPE;
}
break;
case NC_SHORT:
switch (dest_type)
{
case NC_UBYTE:
for (sp = (short *)src, ubp = dest; count < len; count++)
{
if (*sp > X_UCHAR_MAX || *sp < 0)
(*range_error)++;
*ubp++ = (unsigned char)*sp++;
}
break;
case NC_BYTE:
for (sp = (short *)src, bp = dest; count < len; count++)
{
if (*sp > X_SCHAR_MAX || *sp < X_SCHAR_MIN)
(*range_error)++;
*bp++ = (signed char)*sp++;
}
break;
case NC_SHORT:
for (sp = (short *)src, sp1 = dest; count < len; count++)
*sp1++ = *sp++;
break;
case NC_USHORT:
for (sp = (short *)src, usp = dest; count < len; count++)
{
if (*sp < 0)
(*range_error)++;
*usp++ = (unsigned short)*sp++;
}
break;
case NC_INT:
for (sp = (short *)src, ip = dest; count < len; count++)
*ip++ = *sp++;
break;
case NC_UINT:
for (sp = (short *)src, uip = dest; count < len; count++)
{
if (*sp < 0)
(*range_error)++;
*uip++ = (unsigned int)*sp++;
}
break;
case NC_INT64:
for (sp = (short *)src, lip = dest; count < len; count++)
*lip++ = *sp++;
break;
case NC_UINT64:
for (sp = (short *)src, ulip = dest; count < len; count++)
{
if (*sp < 0)
(*range_error)++;
*ulip++ = (unsigned long long)*sp++;
}
break;
case NC_FLOAT:
for (sp = (short *)src, fp = dest; count < len; count++)
*fp++ = *sp++;
break;
case NC_DOUBLE:
for (sp = (short *)src, dp = dest; count < len; count++)
*dp++ = *sp++;
break;
default:
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
__func__, src_type, dest_type));
return NC_EBADTYPE;
}
break;
case NC_USHORT:
switch (dest_type)
{
case NC_UBYTE:
for (usp = (unsigned short *)src, ubp = dest; count < len; count++)
{
if (*usp > X_UCHAR_MAX)
(*range_error)++;
*ubp++ = (unsigned char)*usp++;
}
break;
case NC_BYTE:
for (usp = (unsigned short *)src, bp = dest; count < len; count++)
{
if (*usp > X_SCHAR_MAX)
(*range_error)++;
*bp++ = (signed char)*usp++;
}
break;
case NC_SHORT:
for (usp = (unsigned short *)src, sp = dest; count < len; count++)
{
if (*usp > X_SHORT_MAX)
(*range_error)++;
*sp++ = (signed short)*usp++;
}
break;
case NC_USHORT:
for (usp = (unsigned short *)src, usp1 = dest; count < len; count++)
*usp1++ = *usp++;
break;
case NC_INT:
for (usp = (unsigned short *)src, ip = dest; count < len; count++)
*ip++ = *usp++;
break;
case NC_UINT:
for (usp = (unsigned short *)src, uip = dest; count < len; count++)
*uip++ = *usp++;
break;
case NC_INT64:
for (usp = (unsigned short *)src, lip = dest; count < len; count++)
*lip++ = *usp++;
break;
case NC_UINT64:
for (usp = (unsigned short *)src, ulip = dest; count < len; count++)
*ulip++ = *usp++;
break;
case NC_FLOAT:
for (usp = (unsigned short *)src, fp = dest; count < len; count++)
*fp++ = *usp++;
break;
case NC_DOUBLE:
for (usp = (unsigned short *)src, dp = dest; count < len; count++)
*dp++ = *usp++;
break;
default:
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
__func__, src_type, dest_type));
return NC_EBADTYPE;
}
break;
case NC_INT:
switch (dest_type)
{
case NC_UBYTE:
for (ip = (int *)src, ubp = dest; count < len; count++)
{
if (*ip > X_UCHAR_MAX || *ip < 0)
(*range_error)++;
*ubp++ = (unsigned char)*ip++;
}
break;
case NC_BYTE:
for (ip = (int *)src, bp = dest; count < len; count++)
{
if (*ip > X_SCHAR_MAX || *ip < X_SCHAR_MIN)
(*range_error)++;
*bp++ = (signed char)*ip++;
}
break;
case NC_SHORT:
for (ip = (int *)src, sp = dest; count < len; count++)
{
if (*ip > X_SHORT_MAX || *ip < X_SHORT_MIN)
(*range_error)++;
*sp++ = (short)*ip++;
}
break;
case NC_USHORT:
for (ip = (int *)src, usp = dest; count < len; count++)
{
if (*ip > X_USHORT_MAX || *ip < 0)
(*range_error)++;
*usp++ = (unsigned short)*ip++;
}
break;
case NC_INT: /* src is int */
for (ip = (int *)src, ip1 = dest; count < len; count++)
{
if (*ip > X_INT_MAX || *ip < X_INT_MIN)
(*range_error)++;
*ip1++ = *ip++;
}
break;
case NC_UINT:
for (ip = (int *)src, uip = dest; count < len; count++)
{
if (*ip > X_UINT_MAX || *ip < 0)
(*range_error)++;
*uip++ = (unsigned int)*ip++;
}
break;
case NC_INT64:
for (ip = (int *)src, lip = dest; count < len; count++)
*lip++ = *ip++;
break;
case NC_UINT64:
for (ip = (int *)src, ulip = dest; count < len; count++)
{
if (*ip < 0)
(*range_error)++;
*ulip++ = (unsigned long long)*ip++;
}
break;
case NC_FLOAT:
for (ip = (int *)src, fp = dest; count < len; count++)
*fp++ = (float)*ip++;
break;
case NC_DOUBLE:
for (ip = (int *)src, dp = dest; count < len; count++)
*dp++ = (double)*ip++;
break;
default:
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
__func__, src_type, dest_type));
return NC_EBADTYPE;
}
break;
case NC_UINT:
switch (dest_type)
{
case NC_UBYTE:
for (uip = (unsigned int *)src, ubp = dest; count < len; count++)
{
if (*uip > X_UCHAR_MAX)
(*range_error)++;
*ubp++ = (unsigned char)*uip++;
}
break;
case NC_BYTE:
for (uip = (unsigned int *)src, bp = dest; count < len; count++)
{
if (*uip > X_SCHAR_MAX)
(*range_error)++;
*bp++ = (signed char)*uip++;
}
break;
case NC_SHORT:
for (uip = (unsigned int *)src, sp = dest; count < len; count++)
{
if (*uip > X_SHORT_MAX)
(*range_error)++;
*sp++ = (signed short)*uip++;
}
break;
case NC_USHORT:
for (uip = (unsigned int *)src, usp = dest; count < len; count++)
{
if (*uip > X_USHORT_MAX)
(*range_error)++;
*usp++ = (unsigned short)*uip++;
}
break;
case NC_INT:
for (uip = (unsigned int *)src, ip = dest; count < len; count++)
{
if (*uip > X_INT_MAX)
(*range_error)++;
*ip++ = (int)*uip++;
}
break;
case NC_UINT:
for (uip = (unsigned int *)src, uip1 = dest; count < len; count++)
{
if (*uip > X_UINT_MAX)
(*range_error)++;
*uip1++ = *uip++;
}
break;
case NC_INT64:
for (uip = (unsigned int *)src, lip = dest; count < len; count++)
*lip++ = *uip++;
break;
case NC_UINT64:
for (uip = (unsigned int *)src, ulip = dest; count < len; count++)
*ulip++ = *uip++;
break;
case NC_FLOAT:
for (uip = (unsigned int *)src, fp = dest; count < len; count++)
*fp++ = (float)*uip++;
break;
case NC_DOUBLE:
for (uip = (unsigned int *)src, dp = dest; count < len; count++)
*dp++ = *uip++;
break;
default:
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
__func__, src_type, dest_type));
return NC_EBADTYPE;
}
break;
case NC_INT64:
switch (dest_type)
{
case NC_UBYTE:
for (lip = (long long *)src, ubp = dest; count < len; count++)
{
if (*lip > X_UCHAR_MAX || *lip < 0)
(*range_error)++;
*ubp++ = (unsigned char)*lip++;
}
break;
case NC_BYTE:
for (lip = (long long *)src, bp = dest; count < len; count++)
{
if (*lip > X_SCHAR_MAX || *lip < X_SCHAR_MIN)
(*range_error)++;
*bp++ = (signed char)*lip++;
}
break;
case NC_SHORT:
for (lip = (long long *)src, sp = dest; count < len; count++)
{
if (*lip > X_SHORT_MAX || *lip < X_SHORT_MIN)
(*range_error)++;
*sp++ = (short)*lip++;
}
break;
case NC_USHORT:
for (lip = (long long *)src, usp = dest; count < len; count++)
{
if (*lip > X_USHORT_MAX || *lip < 0)
(*range_error)++;
*usp++ = (unsigned short)*lip++;
}
break;
case NC_UINT:
for (lip = (long long *)src, uip = dest; count < len; count++)
{
if (*lip > X_UINT_MAX || *lip < 0)
(*range_error)++;
*uip++ = (unsigned int)*lip++;
}
break;
case NC_INT:
for (lip = (long long *)src, ip = dest; count < len; count++)
{
if (*lip > X_INT_MAX || *lip < X_INT_MIN)
(*range_error)++;
*ip++ = (int)*lip++;
}
break;
case NC_INT64:
for (lip = (long long *)src, lip1 = dest; count < len; count++)
*lip1++ = *lip++;
break;
case NC_UINT64:
for (lip = (long long *)src, ulip = dest; count < len; count++)
{
if (*lip < 0)
(*range_error)++;
*ulip++ = (unsigned long long)*lip++;
}
break;
case NC_FLOAT:
for (lip = (long long *)src, fp = dest; count < len; count++)
*fp++ = (float)*lip++;
break;
case NC_DOUBLE:
for (lip = (long long *)src, dp = dest; count < len; count++)
*dp++ = (double)*lip++;
break;
default:
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
__func__, src_type, dest_type));
return NC_EBADTYPE;
}
break;
case NC_UINT64:
switch (dest_type)
{
case NC_UBYTE:
for (ulip = (unsigned long long *)src, ubp = dest; count < len; count++)
{
if (*ulip > X_UCHAR_MAX)
(*range_error)++;
*ubp++ = (unsigned char)*ulip++;
}
break;
case NC_BYTE:
for (ulip = (unsigned long long *)src, bp = dest; count < len; count++)
{
if (*ulip > X_SCHAR_MAX)
(*range_error)++;
*bp++ = (signed char)*ulip++;
}
break;
case NC_SHORT:
for (ulip = (unsigned long long *)src, sp = dest; count < len; count++)
{
if (*ulip > X_SHORT_MAX)
(*range_error)++;
*sp++ = (short)*ulip++;
}
break;
case NC_USHORT:
for (ulip = (unsigned long long *)src, usp = dest; count < len; count++)
{
if (*ulip > X_USHORT_MAX)
(*range_error)++;
*usp++ = (unsigned short)*ulip++;
}
break;
case NC_UINT:
for (ulip = (unsigned long long *)src, uip = dest; count < len; count++)
{
if (*ulip > X_UINT_MAX)
(*range_error)++;
*uip++ = (unsigned int)*ulip++;
}
break;
case NC_INT:
for (ulip = (unsigned long long *)src, ip = dest; count < len; count++)
{
if (*ulip > X_INT_MAX)
(*range_error)++;
*ip++ = (int)*ulip++;
}
break;
case NC_INT64:
for (ulip = (unsigned long long *)src, lip = dest; count < len; count++)
{
if (*ulip > X_INT64_MAX)
(*range_error)++;
*lip++ = (long long)*ulip++;
}
break;
case NC_UINT64:
for (ulip = (unsigned long long *)src, ulip1 = dest; count < len; count++)
*ulip1++ = *ulip++;
break;
case NC_FLOAT:
for (ulip = (unsigned long long *)src, fp = dest; count < len; count++)
*fp++ = (float)*ulip++;
break;
case NC_DOUBLE:
for (ulip = (unsigned long long *)src, dp = dest; count < len; count++)
*dp++ = (double)*ulip++;
break;
default:
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
__func__, src_type, dest_type));
return NC_EBADTYPE;
}
break;
case NC_FLOAT:
switch (dest_type)
{
case NC_UBYTE:
for (fp = (float *)src, ubp = dest; count < len; count++)
{
if (*fp > X_UCHAR_MAX || *fp < 0)
(*range_error)++;
*ubp++ = (unsigned char)*fp++;
}
break;
case NC_BYTE:
for (fp = (float *)src, bp = dest; count < len; count++)
{
if (*fp > (double)X_SCHAR_MAX || *fp < (double)X_SCHAR_MIN)
(*range_error)++;
*bp++ = (signed char)*fp++;
}
break;
case NC_SHORT:
for (fp = (float *)src, sp = dest; count < len; count++)
{
if (*fp > (double)X_SHORT_MAX || *fp < (double)X_SHORT_MIN)
(*range_error)++;
*sp++ = (short)*fp++;
}
break;
case NC_USHORT:
for (fp = (float *)src, usp = dest; count < len; count++)
{
if (*fp > X_USHORT_MAX || *fp < 0)
(*range_error)++;
*usp++ = (unsigned short)*fp++;
}
break;
case NC_UINT:
for (fp = (float *)src, uip = dest; count < len; count++)
{
if (*fp > (float)X_UINT_MAX || *fp < 0)
(*range_error)++;
*uip++ = (unsigned int)*fp++;
}
break;
case NC_INT:
for (fp = (float *)src, ip = dest; count < len; count++)
{
if (*fp > (double)X_INT_MAX || *fp < (double)X_INT_MIN)
(*range_error)++;
*ip++ = (int)*fp++;
}
break;
case NC_INT64:
for (fp = (float *)src, lip = dest; count < len; count++)
{
if (*fp > (float)X_INT64_MAX || *fp <X_INT64_MIN)
(*range_error)++;
*lip++ = (long long)*fp++;
}
break;
case NC_UINT64:
for (fp = (float *)src, ulip = dest; count < len; count++)
{
if (*fp > (float)X_UINT64_MAX || *fp < 0)
(*range_error)++;
*ulip++ = (unsigned long long)*fp++;
}
break;
case NC_FLOAT:
2021-09-01 18:29:24 +08:00
for (fp = (float *)src, fp1 = dest; count < len; count++)
*fp1++ = *fp++;
break;
case NC_DOUBLE:
2021-09-01 18:29:24 +08:00
for (fp = (float *)src, dp = dest; count < len; count++)
*dp++ = *fp++;
break;
default:
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
__func__, src_type, dest_type));
return NC_EBADTYPE;
}
break;
case NC_DOUBLE:
switch (dest_type)
{
case NC_UBYTE:
for (dp = (double *)src, ubp = dest; count < len; count++)
{
if (*dp > X_UCHAR_MAX || *dp < 0)
(*range_error)++;
*ubp++ = (unsigned char)*dp++;
}
break;
case NC_BYTE:
for (dp = (double *)src, bp = dest; count < len; count++)
{
if (*dp > X_SCHAR_MAX || *dp < X_SCHAR_MIN)
(*range_error)++;
*bp++ = (signed char)*dp++;
}
break;
case NC_SHORT:
for (dp = (double *)src, sp = dest; count < len; count++)
{
if (*dp > X_SHORT_MAX || *dp < X_SHORT_MIN)
(*range_error)++;
*sp++ = (short)*dp++;
}
break;
case NC_USHORT:
for (dp = (double *)src, usp = dest; count < len; count++)
{
if (*dp > X_USHORT_MAX || *dp < 0)
(*range_error)++;
*usp++ = (unsigned short)*dp++;
}
break;
case NC_UINT:
for (dp = (double *)src, uip = dest; count < len; count++)
{
if (*dp > X_UINT_MAX || *dp < 0)
(*range_error)++;
*uip++ = (unsigned int)*dp++;
}
break;
case NC_INT:
for (dp = (double *)src, ip = dest; count < len; count++)
{
if (*dp > X_INT_MAX || *dp < X_INT_MIN)
(*range_error)++;
*ip++ = (int)*dp++;
}
break;
case NC_INT64:
for (dp = (double *)src, lip = dest; count < len; count++)
{
if (*dp > (double)X_INT64_MAX || *dp < X_INT64_MIN)
(*range_error)++;
*lip++ = (long long)*dp++;
}
break;
case NC_UINT64:
for (dp = (double *)src, ulip = dest; count < len; count++)
{
if (*dp > (double)X_UINT64_MAX || *dp < 0)
(*range_error)++;
*ulip++ = (unsigned long long)*dp++;
}
break;
case NC_FLOAT:
2021-09-01 18:29:24 +08:00
for (dp = (double *)src, fp = dest; count < len; count++)
{
if (isgreater(*dp, X_FLOAT_MAX) || isless(*dp, X_FLOAT_MIN))
(*range_error)++;
*fp++ = (float)*dp++;
2021-09-01 18:29:24 +08:00
}
break;
case NC_DOUBLE:
2021-09-01 18:29:24 +08:00
for (dp = (double *)src, dp1 = dest; count < len; count++)
*dp1++ = *dp++;
break;
default:
LOG((0, "%s: unexpected dest type. src_type %d, dest_type %d",
__func__, src_type, dest_type));
return NC_EBADTYPE;
}
break;
default:
LOG((0, "%s: unexpected src type. src_type %d, dest_type %d",
__func__, src_type, dest_type));
return NC_EBADTYPE;
}
2021-09-01 18:29:24 +08:00
/* If quantize is in use, determine masks, copy the data, do the
* quantization. */
if (quantize_mode == NC_QUANTIZE_BITGROOM)
{
if (dest_type == NC_FLOAT)
{
2022-02-19 03:00:37 +08:00
/* BitGroom: alternately shave and set LSBs */
2021-09-01 18:29:24 +08:00
op1.fp = (float *)dest;
u32_ptr = op1.ui32p;
for (idx = 0L; idx < len; idx += 2L)
if (op1.fp[idx] != mss_val_cmp_flt)
u32_ptr[idx] &= msk_f32_u32_zro;
for (idx = 1L; idx < len; idx += 2L)
if (op1.fp[idx] != mss_val_cmp_flt && u32_ptr[idx] != 0U) /* Never quantize upwards floating point values of zero */
u32_ptr[idx] |= msk_f32_u32_one;
}
else
{
2022-02-19 03:00:37 +08:00
/* BitGroom: alternately shave and set LSBs. */
2021-09-01 18:29:24 +08:00
op1.dp = (double *)dest;
u64_ptr = op1.ui64p;
for (idx = 0L; idx < len; idx += 2L)
if (op1.dp[idx] != mss_val_cmp_dbl)
u64_ptr[idx] &= msk_f64_u64_zro;
for (idx = 1L; idx < len; idx += 2L)
if (op1.dp[idx] != mss_val_cmp_dbl && u64_ptr[idx] != 0ULL) /* Never quantize upwards floating point values of zero */
u64_ptr[idx] |= msk_f64_u64_one;
}
} /* endif BitGroom */
2022-02-19 03:00:37 +08:00
if (quantize_mode == NC_QUANTIZE_BITROUND)
{
2022-02-19 03:00:37 +08:00
if (dest_type == NC_FLOAT)
{
2022-02-19 03:00:37 +08:00
/* BitRound: Quantize to user-specified NSB with IEEE-rounding */
op1.fp = (float *)dest;
u32_ptr = op1.ui32p;
for (idx = 0L; idx < len; idx++){
if (op1.fp[idx] != mss_val_cmp_flt){
u32_ptr[idx] += msk_f32_u32_hshv; /* Add 1 to the MSB of LSBs, carry 1 to mantissa or even exponent */
u32_ptr[idx] &= msk_f32_u32_zro; /* Shave it */
}
}
}
2022-02-19 03:00:37 +08:00
else
{
2022-02-19 03:00:37 +08:00
/* BitRound: Quantize to user-specified NSB with IEEE-rounding */
op1.dp = (double *)dest;
u64_ptr = op1.ui64p;
for (idx = 0L; idx < len; idx++){
if (op1.dp[idx] != mss_val_cmp_dbl){
u64_ptr[idx] += msk_f64_u64_hshv; /* Add 1 to the MSB of LSBs, carry 1 to mantissa or even exponent */
u64_ptr[idx] &= msk_f64_u64_zro; /* Shave it */
}
}
}
} /* endif BitRound */
2022-02-19 03:00:37 +08:00
if (quantize_mode == NC_QUANTIZE_GRANULARBR)
{
if (dest_type == NC_FLOAT)
{
/* Granular BitRound */
op1.fp = (float *)dest;
u32_ptr = op1.ui32p;
for (idx = 0L; idx < len; idx++)
{
2021-10-22 02:04:18 +08:00
if((val = op1.fp[idx]) != mss_val_cmp_flt && u32_ptr[idx] != 0U)
{
mnt = frexp(val, &xpn_bs2); /* DGG19 p. 4102 (8) */
mnt_fabs = fabs(mnt);
mnt_log10_fabs = log10(mnt_fabs);
/* 20211003 Continuous determination of dgt_nbr improves CR by ~10% */
dgt_nbr = (int)floor(xpn_bs2 * dgt_per_bit + mnt_log10_fabs) + 1; /* DGG19 p. 4102 (8.67) */
qnt_pwr = (int)floor(bit_per_dgt * (dgt_nbr - nsd)); /* DGG19 p. 4101 (7) */
prc_bnr_xpl_rqr = mnt_fabs == 0.0 ? 0 : abs((int)floor(xpn_bs2 - bit_per_dgt*mnt_log10_fabs) - qnt_pwr); /* Protect against mnt = -0.0 */
prc_bnr_xpl_rqr--; /* 20211003 Reduce formula result by 1 bit: Passes all tests, improves CR by ~10% */
bit_xpl_nbr_zro = BIT_XPL_NBR_SGN_FLT - prc_bnr_xpl_rqr;
msk_f32_u32_zro = 0u; /* Zero all bits */
msk_f32_u32_zro = ~msk_f32_u32_zro; /* Turn all bits to ones */
/* Bit Shave mask for AND: Left shift zeros into bits to be rounded, leave ones in untouched bits */
msk_f32_u32_zro <<= bit_xpl_nbr_zro;
/* Bit Set mask for OR: Put ones into bits to be set, zeros in untouched bits */
msk_f32_u32_one = ~msk_f32_u32_zro;
msk_f32_u32_hshv = msk_f32_u32_one & (msk_f32_u32_zro >> 1); /* Set one bit: the MSB of LSBs */
u32_ptr[idx] += msk_f32_u32_hshv; /* Add 1 to the MSB of LSBs, carry 1 to mantissa or even exponent */
u32_ptr[idx] &= msk_f32_u32_zro; /* Shave it */
} /* !mss_val_cmp_flt */
}
}
else
{
/* Granular BitRound */
op1.dp = (double *)dest;
u64_ptr = op1.ui64p;
for (idx = 0L; idx < len; idx++)
{
if((val = op1.dp[idx]) != mss_val_cmp_dbl && u64_ptr[idx] != 0ULL)
{
mnt = frexp(val, &xpn_bs2); /* DGG19 p. 4102 (8) */
mnt_fabs = fabs(mnt);
mnt_log10_fabs = log10(mnt_fabs);
/* 20211003 Continuous determination of dgt_nbr improves CR by ~10% */
dgt_nbr = (int)floor(xpn_bs2 * dgt_per_bit + mnt_log10_fabs) + 1; /* DGG19 p. 4102 (8.67) */
qnt_pwr = (int)floor(bit_per_dgt * (dgt_nbr - nsd)); /* DGG19 p. 4101 (7) */
prc_bnr_xpl_rqr = mnt_fabs == 0.0 ? 0 : abs((int)floor(xpn_bs2 - bit_per_dgt*mnt_log10_fabs) - qnt_pwr); /* Protect against mnt = -0.0 */
prc_bnr_xpl_rqr--; /* 20211003 Reduce formula result by 1 bit: Passes all tests, improves CR by ~10% */
bit_xpl_nbr_zro = BIT_XPL_NBR_SGN_DBL - prc_bnr_xpl_rqr;
msk_f64_u64_zro = 0ull; /* Zero all bits */
msk_f64_u64_zro = ~msk_f64_u64_zro; /* Turn all bits to ones */
/* Bit Shave mask for AND: Left shift zeros into bits to be rounded, leave ones in untouched bits */
msk_f64_u64_zro <<= bit_xpl_nbr_zro;
/* Bit Set mask for OR: Put ones into bits to be set, zeros in untouched bits */
msk_f64_u64_one = ~msk_f64_u64_zro;
msk_f64_u64_hshv = msk_f64_u64_one & (msk_f64_u64_zro >> 1); /* Set one bit: the MSB of LSBs */
u64_ptr[idx] += msk_f64_u64_hshv; /* Add 1 to the MSB of LSBs, carry 1 to mantissa or even exponent */
u64_ptr[idx] &= msk_f64_u64_zro; /* Shave it */
} /* !mss_val_cmp_dbl */
}
}
} /* endif GranularBR */
2021-09-01 18:29:24 +08:00
return NC_NOERR;
2018-11-22 05:49:52 +08:00
}
This PR adds EXPERIMENTAL support for accessing data in the cloud using a variant of the Zarr protocol and storage format. This enhancement is generically referred to as "NCZarr". The data model supported by NCZarr is netcdf-4 minus the user-defined types and the String type. In this sense it is similar to the CDF-5 data model. More detailed information about enabling and using NCZarr is described in the document NUG/nczarr.md and in a [Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in). WARNING: this code has had limited testing, so do use this version for production work. Also, performance improvements are ongoing. Note especially the following platform matrix of successful tests: Platform | Build System | S3 support ------------------------------------ Linux+gcc | Automake | yes Linux+gcc | CMake | yes Visual Studio | CMake | no Additionally, and as a consequence of the addition of NCZarr, major changes have been made to the Filter API. NOTE: NCZarr does not yet support filters, but these changes are enablers for that support in the future. Note that it is possible (probable?) that there will be some accidental reversions if the changes here did not correctly mimic the existing filter testing. In any case, previously filter ids and parameters were of type unsigned int. In order to support the more general zarr filter model, this was all converted to char*. The old HDF5-specific, unsigned int operations are still supported but they are wrappers around the new, char* based nc_filterx_XXX functions. This entailed at least the following changes: 1. Added the files libdispatch/dfilterx.c and include/ncfilter.h 2. Some filterx utilities have been moved to libdispatch/daux.c 3. A new entry, "filter_actions" was added to the NCDispatch table and the version bumped. 4. An overly complex set of structs was created to support funnelling all of the filterx operations thru a single dispatch "filter_actions" entry. 5. Move common code to from libhdf5 to libsrc4 so that it is accessible to nczarr. Changes directly related to Zarr: 1. Modified CMakeList.txt and configure.ac to support both C and C++ -- this is in support of S3 support via the awd-sdk libraries. 2. Define a size64_t type to support nczarr. 3. More reworking of libdispatch/dinfermodel.c to support zarr and to regularize the structure of the fragments section of a URL. Changes not directly related to Zarr: 1. Make client-side filter registration be conditional, with default off. 2. Hack include/nc4internal.h to make some flags added by Ed be unique: e.g. NC_CREAT, NC_INDEF, etc. 3. cleanup include/nchttp.h and libdispatch/dhttp.c. 4. Misc. changes to support compiling under Visual Studio including: * Better testing under windows for dirent.h and opendir and closedir. 5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags and to centralize error reporting. 6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them. 7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible. Changes Left TO-DO: 1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
/**
* @internal What fill value should be used for a variable?
*
* @param h5 Pointer to HDF5 file info struct.
* @param var Pointer to variable info struct.
* @param fillp Pointer that gets pointer to fill value.
*
* @returns NC_NOERR No error.
* @returns NC_ENOMEM Out of memory.
* @author Ed Hartnett
*/
int
nc4_get_fill_value(NC_FILE_INFO_T *h5, NC_VAR_INFO_T *var, void **fillp)
{
size_t size;
int retval;
/* Find out how much space we need for this type's fill value. */
if (var->type_info->nc_type_class == NC_VLEN)
size = sizeof(nc_vlen_t);
else if (var->type_info->nc_type_class == NC_STRING)
size = sizeof(char *);
else
{
if ((retval = nc4_get_typelen_mem(h5, var->type_info->hdr.id, &size)))
return retval;
}
assert(size);
/* Allocate the space. */
if (!((*fillp) = calloc(1, size)))
return NC_ENOMEM;
/* If the user has set a fill_value for this var, use, otherwise
* find the default fill value. */
if (var->fill_value)
{
LOG((4, "Found a fill value for var %s", var->hdr.name));
if (var->type_info->nc_type_class == NC_VLEN)
{
nc_vlen_t *in_vlen = (nc_vlen_t *)(var->fill_value), *fv_vlen = (nc_vlen_t *)(*fillp);
size_t basetypesize = 0;
if((retval=nc4_get_typelen_mem(h5, var->type_info->u.v.base_nc_typeid, &basetypesize)))
return retval;
fv_vlen->len = in_vlen->len;
if (!(fv_vlen->p = malloc(basetypesize * in_vlen->len)))
{
free(*fillp);
*fillp = NULL;
return NC_ENOMEM;
}
memcpy(fv_vlen->p, in_vlen->p, in_vlen->len * basetypesize);
}
else if (var->type_info->nc_type_class == NC_STRING)
{
if (*(char **)var->fill_value)
if (!(**(char ***)fillp = strdup(*(char **)var->fill_value)))
{
free(*fillp);
*fillp = NULL;
return NC_ENOMEM;
}
}
else
memcpy((*fillp), var->fill_value, size);
}
else
{
if (nc4_get_default_fill_value(var->type_info, *fillp))
This PR adds EXPERIMENTAL support for accessing data in the cloud using a variant of the Zarr protocol and storage format. This enhancement is generically referred to as "NCZarr". The data model supported by NCZarr is netcdf-4 minus the user-defined types and the String type. In this sense it is similar to the CDF-5 data model. More detailed information about enabling and using NCZarr is described in the document NUG/nczarr.md and in a [Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in). WARNING: this code has had limited testing, so do use this version for production work. Also, performance improvements are ongoing. Note especially the following platform matrix of successful tests: Platform | Build System | S3 support ------------------------------------ Linux+gcc | Automake | yes Linux+gcc | CMake | yes Visual Studio | CMake | no Additionally, and as a consequence of the addition of NCZarr, major changes have been made to the Filter API. NOTE: NCZarr does not yet support filters, but these changes are enablers for that support in the future. Note that it is possible (probable?) that there will be some accidental reversions if the changes here did not correctly mimic the existing filter testing. In any case, previously filter ids and parameters were of type unsigned int. In order to support the more general zarr filter model, this was all converted to char*. The old HDF5-specific, unsigned int operations are still supported but they are wrappers around the new, char* based nc_filterx_XXX functions. This entailed at least the following changes: 1. Added the files libdispatch/dfilterx.c and include/ncfilter.h 2. Some filterx utilities have been moved to libdispatch/daux.c 3. A new entry, "filter_actions" was added to the NCDispatch table and the version bumped. 4. An overly complex set of structs was created to support funnelling all of the filterx operations thru a single dispatch "filter_actions" entry. 5. Move common code to from libhdf5 to libsrc4 so that it is accessible to nczarr. Changes directly related to Zarr: 1. Modified CMakeList.txt and configure.ac to support both C and C++ -- this is in support of S3 support via the awd-sdk libraries. 2. Define a size64_t type to support nczarr. 3. More reworking of libdispatch/dinfermodel.c to support zarr and to regularize the structure of the fragments section of a URL. Changes not directly related to Zarr: 1. Make client-side filter registration be conditional, with default off. 2. Hack include/nc4internal.h to make some flags added by Ed be unique: e.g. NC_CREAT, NC_INDEF, etc. 3. cleanup include/nchttp.h and libdispatch/dhttp.c. 4. Misc. changes to support compiling under Visual Studio including: * Better testing under windows for dirent.h and opendir and closedir. 5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags and to centralize error reporting. 6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them. 7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible. Changes Left TO-DO: 1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
{
/* Note: release memory, but don't return error on failure */
free(*fillp);
*fillp = NULL;
}
}
return NC_NOERR;
}
/**
* @internal Get the length, in bytes, of one element of a type in
* memory.
*
* @param h5 Pointer to HDF5 file info struct.
* @param xtype NetCDF type ID.
* @param len Pointer that gets length in bytes.
*
* @returns NC_NOERR No error.
* @returns NC_EBADTYPE Type not found.
* @author Ed Hartnett
*/
int
nc4_get_typelen_mem(NC_FILE_INFO_T *h5, nc_type xtype, size_t *len)
{
NC_TYPE_INFO_T *type;
int retval;
LOG((4, "%s xtype: %d", __func__, xtype));
assert(len);
/* If this is an atomic type, the answer is easy. */
switch (xtype)
{
case NC_BYTE:
case NC_CHAR:
case NC_UBYTE:
*len = sizeof(char);
return NC_NOERR;
case NC_SHORT:
case NC_USHORT:
*len = sizeof(short);
return NC_NOERR;
case NC_INT:
case NC_UINT:
*len = sizeof(int);
return NC_NOERR;
case NC_FLOAT:
*len = sizeof(float);
return NC_NOERR;
case NC_DOUBLE:
*len = sizeof(double);
return NC_NOERR;
case NC_INT64:
case NC_UINT64:
*len = sizeof(long long);
return NC_NOERR;
case NC_STRING:
*len = sizeof(char *);
return NC_NOERR;
}
/* See if var is compound type. */
if ((retval = nc4_find_type(h5, xtype, &type)))
return retval;
if (!type)
return NC_EBADTYPE;
*len = type->size;
LOG((5, "type->size: %d", type->size));
return NC_NOERR;
}
Mostly revert the filter code to reduce its complexity of use. re: https://github.com/Unidata/netcdf-c/issues/1836 Revert the internal filter code to simplify it. From the user's point of view, the only visible changes should be: 1. The functions that convert text to filter specs have had their signature reverted and have been moved to netcdf_aux.h 2. Some filter API functions now return NC_ENOFILTER when inquiry is made about some filter. Internally,the dispatch table has been modified to get rid of the filter_actions entry and associated complex structures. It has been replaced with inq_var_filter_ids and inq_var_filter_info entries and the dispatch table version has been bumped to 3. Corresponding NOOP and NOTNC4 functions were added to libdispatch/dnotnc4.c. Also, the filter_action entries in dispatch tables were replaced for all dispatch code bases (HDF5, DAP2, etc). This should only impact UDF users. In the process, it became clear that the form of the filters field in NC_VAR_INFO_T was format dependent, so I converted it to be of type void* and pushed its management into the various dispatch code bases. Specifically libhdf5 and libnczarr now manage the filters field in their own way. The auxilliary functions for parsing textual filter specifications were moved to netcdf_aux.h and were renamed to the following: * ncaux_h5filterspec_parse * ncaux_h5filterspec_parselist * ncaux_h5filterspec_free * ncaux_h5filter_fix8 Misc. Other Changes: 1. Document NUG/filters.md updated to reflect the changes above. 2. All the old data types (structs and enums) used by filter_actions actions were deleted. The exception is the NC_H5_Filterspec because it is needed by ncaux_h5filterspec_parselist. 3. Clientside filters were removed -- another enhancement for which no-one ever asked. 4. The ability to remove filters was itself removed. 5. Some functionality needed by nczarr was moved from libhdf5 to libsrc4 e.g. nc4_find_default_chunksizes 6. All the filterx code was removed 7. ncfilter.h and nc4filter.c no longer used Misc. Unrelated Changes: 1. The nczarr_test makefile clean was leaving some directories; so add clean-local to take care of them.
2020-09-28 02:43:46 +08:00
/**
* @internal Check a set of chunksizes to see if they specify a chunk
* that is too big.
*
* @param grp Pointer to the group info.
* @param var Pointer to the var info.
* @param chunksizes Array of chunksizes to check.
*
* @returns ::NC_NOERR No error.
* @returns ::NC_EBADID Bad ncid.
* @returns ::NC_ENOTVAR Invalid variable ID.
* @returns ::NC_EBADCHUNK Bad chunksize.
*/
int
nc4_check_chunksizes(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var, const size_t *chunksizes)
{
double dprod;
size_t type_len;
int d;
int retval;
if ((retval = nc4_get_typelen_mem(grp->nc4_info, var->type_info->hdr.id, &type_len)))
return retval;
if (var->type_info->nc_type_class == NC_VLEN)
dprod = (double)sizeof(nc_vlen_t);
else
dprod = (double)type_len;
for (d = 0; d < var->ndims; d++)
dprod *= (double)chunksizes[d];
if (dprod > (double) NC_MAX_UINT)
return NC_EBADCHUNK;
return NC_NOERR;
}
/**
* @internal Determine some default chunksizes for a variable.
*
* @param grp Pointer to the group info.
* @param var Pointer to the var info.
*
* @returns ::NC_NOERR for success
* @returns ::NC_EBADID Bad ncid.
* @returns ::NC_ENOTVAR Invalid variable ID.
* @author Ed Hartnett, Dennis Heimbigner
*/
int
nc4_find_default_chunksizes2(NC_GRP_INFO_T *grp, NC_VAR_INFO_T *var)
{
int d;
size_t type_size;
float num_values = 1, num_unlim = 0;
int retval;
size_t suggested_size;
#ifdef LOGGING
double total_chunk_size;
#endif
if (var->type_info->nc_type_class == NC_STRING)
type_size = sizeof(char *);
else
type_size = var->type_info->size;
#ifdef LOGGING
/* Later this will become the total number of bytes in the default
* chunk. */
total_chunk_size = (double) type_size;
#endif
if(var->chunksizes == NULL) {
if((var->chunksizes = calloc(1,sizeof(size_t)*var->ndims)) == NULL)
return NC_ENOMEM;
}
/* How many values in the variable (or one record, if there are
* unlimited dimensions). */
for (d = 0; d < var->ndims; d++)
{
assert(var->dim[d]);
if (! var->dim[d]->unlimited)
num_values *= (float)var->dim[d]->len;
else {
num_unlim++;
var->chunksizes[d] = 1; /* overwritten below, if all dims are unlimited */
}
}
/* Special case to avoid 1D vars with unlim dim taking huge amount
of space (DEFAULT_CHUNK_SIZE bytes). Instead we limit to about
4KB */
if (var->ndims == 1 && num_unlim == 1) {
if (DEFAULT_CHUNK_SIZE / type_size <= 0)
suggested_size = 1;
else if (DEFAULT_CHUNK_SIZE / type_size > DEFAULT_1D_UNLIM_SIZE)
suggested_size = DEFAULT_1D_UNLIM_SIZE;
else
suggested_size = DEFAULT_CHUNK_SIZE / type_size;
var->chunksizes[0] = suggested_size / type_size;
LOG((4, "%s: name %s dim %d DEFAULT_CHUNK_SIZE %d num_values %f type_size %d "
"chunksize %ld", __func__, var->hdr.name, d, DEFAULT_CHUNK_SIZE, num_values, type_size, var->chunksizes[0]));
}
if (var->ndims > 1 && var->ndims == num_unlim) { /* all dims unlimited */
suggested_size = pow((double)DEFAULT_CHUNK_SIZE/type_size, 1.0/(double)(var->ndims));
for (d = 0; d < var->ndims; d++)
{
var->chunksizes[d] = suggested_size ? suggested_size : 1;
LOG((4, "%s: name %s dim %d DEFAULT_CHUNK_SIZE %d num_values %f type_size %d "
"chunksize %ld", __func__, var->hdr.name, d, DEFAULT_CHUNK_SIZE, num_values, type_size, var->chunksizes[d]));
}
}
/* Pick a chunk length for each dimension, if one has not already
* been picked above. */
for (d = 0; d < var->ndims; d++)
if (!var->chunksizes[d])
{
suggested_size = (pow((double)DEFAULT_CHUNK_SIZE/(num_values * type_size),
1.0/(double)(var->ndims - num_unlim)) * var->dim[d]->len - .5);
if (suggested_size > var->dim[d]->len)
suggested_size = var->dim[d]->len;
var->chunksizes[d] = suggested_size ? suggested_size : 1;
LOG((4, "%s: name %s dim %d DEFAULT_CHUNK_SIZE %d num_values %f type_size %d "
"chunksize %ld", __func__, var->hdr.name, d, DEFAULT_CHUNK_SIZE, num_values, type_size, var->chunksizes[d]));
}
#ifdef LOGGING
/* Find total chunk size. */
for (d = 0; d < var->ndims; d++)
total_chunk_size *= (double) var->chunksizes[d];
LOG((4, "total_chunk_size %f", total_chunk_size));
#endif
/* But did this result in a chunk that is too big? */
retval = nc4_check_chunksizes(grp, var, var->chunksizes);
if (retval)
{
/* Other error? */
if (retval != NC_EBADCHUNK)
return retval;
/* Chunk is too big! Reduce each dimension by half and try again. */
for ( ; retval == NC_EBADCHUNK; retval = nc4_check_chunksizes(grp, var, var->chunksizes))
for (d = 0; d < var->ndims; d++)
var->chunksizes[d] = var->chunksizes[d]/2 ? var->chunksizes[d]/2 : 1;
}
/* Do we have any big data overhangs? They can be dangerous to
* babies, the elderly, or confused campers who have had too much
* beer. */
for (d = 0; d < var->ndims; d++)
{
size_t num_chunks;
size_t overhang;
assert(var->chunksizes[d] > 0);
num_chunks = (var->dim[d]->len + var->chunksizes[d] - 1) / var->chunksizes[d];
if(num_chunks > 0) {
overhang = (num_chunks * var->chunksizes[d]) - var->dim[d]->len;
var->chunksizes[d] -= overhang / num_chunks;
}
}
return NC_NOERR;
}
/**
* @internal Get the default fill value for an atomic type. Memory for
* fill_value must already be allocated, or you are DOOMED!
*
Fix DAP4 remotetest server Warning: This PR is a follow on to PR https://github.com/Unidata/netcdf-c/pull/2555 and should not be merged until that prior PR has been merged. The changeset for this PR is a delta on the PR https://github.com/Unidata/netcdf-c/pull/2555. This PR re-enables the use of the server *remotetest.unidata.ucar.edu/d4ts* to test several features: 1. Show that access over the Internet to servers using the DAP4 protocol works. 2. Test that DAP4 support in the [Thredds Data Server](https://github.com/Unidata/tds) is operating correctly. 4. Test that the DAP4 support in the [netcdf-java library](https://github.com/Unidata/netcdf-java) library and the DAP4 support in the netcdf-c library are consistent and are interoperable. The test inputs (primarily *\*.nc* files) provided in the netcdf-c library are also used by the DAP4 Test Server (aka d4ts) to present web access to a collection of data files accessible via the DAP4 protocol and which can be used for testing Internet access to a working server. To be precise, this version of d4ts is currently in unmerged branches of the *netcdf-java* and *tds* Github repositories and so are not actually in the main repositories *yet*. However, the *d4ts.war* file was created from that branch and used to populate the *remotetest.unidata.ucar.edu* server The two other remote servers that were used in the past are *Hyrax* (OPenDAP.org) and *thredds-test*. These will continue to remain disabled until those servers can be fixed. ## Primary Changes * Rebuild the *baselineremote* directory. This directory contains the validation data needed to test the remote servers. * Re-enable using remotetest.unidata.ucar.edu as part of the DAP4 testing process. * Fix the *dap4_test/test_remote.sh* test script to match the current available test data. * Make some changes to libdap4 to improve the ability to catch malformed data streams [affects a lot of files in libdap4]. ## Misc. Unrelated Changes * Remove a raft of warnings, especially in nc_test4/tst_quantize.c. * Add some additional explanatory information to the NCZarr documentation. * Cleanup some Doxygen errors in the docs file and reorder some files.
2022-11-16 11:29:21 +08:00
* @param tinfo type object
* @param fill_value Pointer that gets the default fill value.
*
* @returns NC_NOERR No error.
* @returns NC_EINVAL Can't find atomic type.
* @author Ed Hartnett
*/
int
nc4_get_default_fill_value(NC_TYPE_INFO_T* tinfo, void *fill_value)
{
if(tinfo->hdr.id > NC_NAT && tinfo->hdr.id <= NC_MAX_ATOMIC_TYPE)
return nc4_get_default_atomic_fill_value(tinfo->hdr.id,fill_value);
#ifdef USE_NETCDF4
switch(tinfo->nc_type_class) {
case NC_ENUM:
return nc4_get_default_atomic_fill_value(tinfo->u.e.base_nc_typeid,fill_value);
case NC_OPAQUE:
case NC_VLEN:
case NC_COMPOUND:
if(fill_value)
memset(fill_value,0,tinfo->size);
break;
default: return NC_EBADTYPE;
}
#endif
return NC_NOERR;
}
/**
* @internal Get the default fill value for an atomic type. Memory for
* fill_value must already be allocated, or you are DOOMED!
*
* @param xtype type id
* @param fill_value Pointer that gets the default fill value.
*
* @returns NC_NOERR No error.
* @returns NC_EINVAL Can't find atomic type.
* @author Ed Hartnett
*/
int
nc4_get_default_atomic_fill_value(nc_type xtype, void *fill_value)
{
switch (xtype)
{
case NC_CHAR:
*(char *)fill_value = NC_FILL_CHAR;
break;
case NC_STRING:
*(char **)fill_value = strdup(NC_FILL_STRING);
break;
case NC_BYTE:
*(signed char *)fill_value = NC_FILL_BYTE;
break;
case NC_SHORT:
*(short *)fill_value = NC_FILL_SHORT;
break;
case NC_INT:
*(int *)fill_value = NC_FILL_INT;
break;
case NC_UBYTE:
*(unsigned char *)fill_value = NC_FILL_UBYTE;
break;
case NC_USHORT:
*(unsigned short *)fill_value = NC_FILL_USHORT;
break;
case NC_UINT:
*(unsigned int *)fill_value = NC_FILL_UINT;
break;
case NC_INT64:
*(long long *)fill_value = NC_FILL_INT64;
break;
case NC_UINT64:
*(unsigned long long *)fill_value = NC_FILL_UINT64;
break;
case NC_FLOAT:
*(float *)fill_value = NC_FILL_FLOAT;
break;
case NC_DOUBLE:
*(double *)fill_value = NC_FILL_DOUBLE;
break;
default:
return NC_EINVAL;
}
return NC_NOERR;
}