2010-06-03 21:24:43 +08:00
|
|
|
/*********************************************************************
|
2018-12-07 06:40:43 +08:00
|
|
|
* Copyright 2018, UCAR/Unidata
|
2010-06-03 21:24:43 +08:00
|
|
|
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
|
|
|
|
*********************************************************************/
|
|
|
|
/* $Id: data.c,v 1.7 2010/05/24 19:59:56 dmh Exp $ */
|
|
|
|
/* $Header: /upc/share/CVS/netcdf-3/ncgen/data.c,v 1.7 2010/05/24 19:59:56 dmh Exp $ */
|
|
|
|
|
|
|
|
#include "includes.h"
|
2017-03-09 08:01:10 +08:00
|
|
|
#include "ncoffsets.h"
|
2018-11-16 01:00:38 +08:00
|
|
|
#include "netcdf_aux.h"
|
2010-06-03 21:24:43 +08:00
|
|
|
#include "dump.h"
|
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
#undef VERIFY
|
2021-12-24 13:18:56 +08:00
|
|
|
#ifndef __MINGW32__
|
|
|
|
#define HHPRINT
|
|
|
|
#endif
|
2018-11-16 01:00:38 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
#define XVSNPRINTF vsnprintf
|
|
|
|
/*
|
|
|
|
#define XVSNPRINTF lvsnprintf
|
|
|
|
extern int lvsnprintf(char*, size_t, const char*, va_list);
|
|
|
|
*/
|
|
|
|
|
2014-09-19 08:26:06 +08:00
|
|
|
#define DATALISTINIT 32
|
|
|
|
|
|
|
|
/* Track all known datalist*/
|
2018-11-16 01:00:38 +08:00
|
|
|
List* alldatalists = NULL;
|
2014-09-19 08:26:06 +08:00
|
|
|
|
2013-07-17 04:22:48 +08:00
|
|
|
NCConstant nullconstant;
|
|
|
|
NCConstant fillconstant;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
Datalist* filldatalist;
|
2013-07-11 04:00:48 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
Bytebuffer* codebuffer;
|
|
|
|
Bytebuffer* codetmp;
|
|
|
|
Bytebuffer* stmt;
|
|
|
|
|
|
|
|
|
|
|
|
/* Forward */
|
2018-11-16 01:00:38 +08:00
|
|
|
static void setconstlist(NCConstant* con, Datalist* dl);
|
|
|
|
|
|
|
|
#ifdef VERIFY
|
|
|
|
/* index of match */
|
|
|
|
static int
|
|
|
|
verify(List* all, Datalist* dl)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for(i=0;i<listlength(all);i++) {
|
|
|
|
void* pi = listget(all,i);
|
|
|
|
if(pi == dl)
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/**************************************************/
|
|
|
|
/**************************************************/
|
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
NCConstant*
|
|
|
|
nullconst(void)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2018-11-16 01:00:38 +08:00
|
|
|
NCConstant* n = ecalloc(sizeof(NCConstant));
|
|
|
|
return n;
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
isstringable(nc_type nctype)
|
|
|
|
{
|
|
|
|
switch (nctype) {
|
|
|
|
case NC_CHAR: case NC_STRING:
|
|
|
|
case NC_BYTE: case NC_UBYTE:
|
|
|
|
case NC_FILLVALUE:
|
|
|
|
return 1;
|
|
|
|
default: break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
NCConstant*
|
2010-06-03 21:24:43 +08:00
|
|
|
list2const(Datalist* list)
|
|
|
|
{
|
2018-11-16 01:00:38 +08:00
|
|
|
NCConstant* con = nullconst();
|
2010-06-03 21:24:43 +08:00
|
|
|
ASSERT(list != NULL);
|
2018-11-16 01:00:38 +08:00
|
|
|
con->nctype = NC_COMPOUND;
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
if(!list->readonly) con->lineno = list->data[0]->lineno;
|
2018-11-16 01:00:38 +08:00
|
|
|
setconstlist(con,list);
|
|
|
|
con->filled = 0;
|
2010-06-03 21:24:43 +08:00
|
|
|
return con;
|
|
|
|
}
|
|
|
|
|
2010-07-30 04:37:05 +08:00
|
|
|
Datalist*
|
2013-07-17 04:22:48 +08:00
|
|
|
const2list(NCConstant* con)
|
2010-07-30 04:37:05 +08:00
|
|
|
{
|
|
|
|
Datalist* list;
|
|
|
|
ASSERT(con != NULL);
|
|
|
|
list = builddatalist(1);
|
|
|
|
if(list != NULL) {
|
|
|
|
dlappend(list,con);
|
|
|
|
}
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
/**************************************************/
|
2013-11-15 06:13:20 +08:00
|
|
|
#ifdef GENDEBUG
|
2010-06-03 21:24:43 +08:00
|
|
|
void
|
|
|
|
report(char* lead, Datalist* list)
|
|
|
|
{
|
|
|
|
extern void bufdump(Datalist*,Bytebuffer*);
|
|
|
|
Bytebuffer* buf = bbNew();
|
|
|
|
bufdump(list,buf);
|
|
|
|
fprintf(stderr,"\n%s::%s\n",lead,bbContents(buf));
|
|
|
|
fflush(stderr);
|
|
|
|
bbFree(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/**************************************************/
|
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
static void
|
|
|
|
setconstlist(NCConstant* con, Datalist* dl)
|
|
|
|
{
|
|
|
|
#ifdef VERIFY
|
|
|
|
int pos = verify(alldatalists,dl);
|
|
|
|
if(pos >= 0) {
|
|
|
|
dumpdatalist(listget(alldatalists,pos),"XXX");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
con->value.compoundv = dl;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Deep constant cloning; return struct not pointer to struct*/
|
|
|
|
NCConstant*
|
2013-07-17 04:22:48 +08:00
|
|
|
cloneconstant(NCConstant* con)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
2018-11-16 01:00:38 +08:00
|
|
|
NCConstant* newcon = NULL;
|
|
|
|
Datalist* newdl = NULL;
|
|
|
|
char* s = NULL;
|
|
|
|
|
|
|
|
newcon = nullconst();
|
|
|
|
if(newcon == NULL) return newcon;
|
|
|
|
*newcon = *con;
|
|
|
|
switch (newcon->nctype) {
|
2010-06-03 21:24:43 +08:00
|
|
|
case NC_STRING:
|
2018-11-16 01:00:38 +08:00
|
|
|
if(newcon->value.stringv.len == 0)
|
|
|
|
s = NULL;
|
|
|
|
else {
|
|
|
|
s = (char*)ecalloc(newcon->value.stringv.len+1);
|
|
|
|
if(newcon->value.stringv.len > 0)
|
|
|
|
memcpy(s,newcon->value.stringv.stringv,newcon->value.stringv.len);
|
|
|
|
s[newcon->value.stringv.len] = '\0';
|
|
|
|
}
|
|
|
|
newcon->value.stringv.stringv = s;
|
2010-06-03 21:24:43 +08:00
|
|
|
break;
|
|
|
|
case NC_OPAQUE:
|
2018-11-16 01:00:38 +08:00
|
|
|
s = (char*)ecalloc(newcon->value.opaquev.len+1);
|
|
|
|
if(newcon->value.opaquev.len > 0)
|
|
|
|
memcpy(s,newcon->value.opaquev.stringv,newcon->value.opaquev.len);
|
|
|
|
s[newcon->value.opaquev.len] = '\0';
|
|
|
|
newcon->value.opaquev.stringv = s;
|
|
|
|
break;
|
|
|
|
case NC_COMPOUND:
|
|
|
|
newdl = clonedatalist(con->value.compoundv);
|
|
|
|
setconstlist(newcon,newdl);
|
2010-06-03 21:24:43 +08:00
|
|
|
break;
|
|
|
|
default: break;
|
|
|
|
}
|
|
|
|
return newcon;
|
|
|
|
}
|
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
/* Deep constant clear*/
|
|
|
|
void
|
|
|
|
clearconstant(NCConstant* con)
|
|
|
|
{
|
|
|
|
if(con == NULL) return;
|
|
|
|
switch (con->nctype) {
|
|
|
|
case NC_STRING:
|
|
|
|
if(con->value.stringv.stringv != NULL)
|
|
|
|
efree(con->value.stringv.stringv);
|
|
|
|
break;
|
|
|
|
case NC_OPAQUE:
|
|
|
|
if(con->value.opaquev.stringv != NULL)
|
|
|
|
efree(con->value.opaquev.stringv);
|
|
|
|
break;
|
|
|
|
case NC_COMPOUND:
|
|
|
|
con->value.compoundv = NULL;
|
|
|
|
break;
|
|
|
|
default: break;
|
|
|
|
}
|
|
|
|
memset((void*)con,0,sizeof(NCConstant));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
freeconstant(NCConstant* con, int shallow)
|
|
|
|
{
|
|
|
|
if(!shallow) clearconstant(con);
|
|
|
|
nullfree(con);
|
|
|
|
}
|
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
/**************************************************/
|
|
|
|
|
2010-07-30 04:37:05 +08:00
|
|
|
int
|
|
|
|
datalistline(Datalist* ds)
|
|
|
|
{
|
|
|
|
if(ds == NULL || ds->length == 0) return 0;
|
2018-11-16 01:00:38 +08:00
|
|
|
return ds->data[0]->lineno;
|
2010-07-30 04:37:05 +08:00
|
|
|
}
|
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
|
|
|
|
/* Go thru a databuf of possibly nested constants
|
|
|
|
and insert commas as needed; ideally, this
|
|
|
|
operation should be idempotent so that
|
|
|
|
the caller need not worry about it having already
|
2012-02-14 08:25:32 +08:00
|
|
|
been applied. Also, handle situation where there may be missing
|
|
|
|
matching right braces.
|
2010-06-03 21:24:43 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
static char* commifyr(char* p, Bytebuffer* buf);
|
|
|
|
static char* wordstring(char* p, Bytebuffer* buf, int quote);
|
|
|
|
|
|
|
|
void
|
|
|
|
commify(Bytebuffer* buf)
|
|
|
|
{
|
|
|
|
char* list,*p;
|
|
|
|
|
|
|
|
if(bbLength(buf) == 0) return;
|
|
|
|
list = bbDup(buf);
|
|
|
|
p = list;
|
|
|
|
bbClear(buf);
|
|
|
|
commifyr(p,buf);
|
|
|
|
bbNull(buf);
|
|
|
|
efree(list);
|
|
|
|
}
|
|
|
|
|
2012-02-14 08:25:32 +08:00
|
|
|
/* Requires that the string be balanced
|
|
|
|
WRT to braces
|
|
|
|
*/
|
2010-06-03 21:24:43 +08:00
|
|
|
static char*
|
|
|
|
commifyr(char* p, Bytebuffer* buf)
|
|
|
|
{
|
|
|
|
int comma = 0;
|
|
|
|
int c;
|
|
|
|
while((c=*p++)) {
|
|
|
|
if(c == ' ') continue;
|
|
|
|
if(c == ',') continue;
|
2012-02-14 08:25:32 +08:00
|
|
|
else if(c == '}') {
|
|
|
|
break;
|
|
|
|
}
|
2010-06-03 21:24:43 +08:00
|
|
|
if(comma) bbCat(buf,", "); else comma=1;
|
|
|
|
if(c == '{') {
|
|
|
|
bbAppend(buf,'{');
|
|
|
|
p = commifyr(p,buf);
|
|
|
|
bbAppend(buf,'}');
|
|
|
|
} else if(c == '\'' || c == '\"') {
|
|
|
|
p = wordstring(p,buf,c);
|
|
|
|
} else {
|
|
|
|
bbAppend(buf,c);
|
|
|
|
p=word(p,buf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
char*
|
|
|
|
word(char* p, Bytebuffer* buf)
|
|
|
|
{
|
|
|
|
int c;
|
|
|
|
while((c=*p++)) {
|
|
|
|
if(c == '}' || c == ' ' || c == ',') break;
|
|
|
|
if(c == '\\') {
|
|
|
|
bbAppend(buf,c);
|
|
|
|
c=*p++;
|
|
|
|
if(!c) break;
|
|
|
|
}
|
|
|
|
bbAppend(buf,(char)c);
|
|
|
|
}
|
|
|
|
p--; /* leave terminator for parent */
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static char*
|
|
|
|
wordstring(char* p, Bytebuffer* buf, int quote)
|
|
|
|
{
|
|
|
|
int c;
|
|
|
|
bbAppend(buf,quote);
|
|
|
|
while((c=*p++)) {
|
|
|
|
if(c == '\\') {
|
|
|
|
bbAppend(buf,c);
|
|
|
|
c = *p++;
|
|
|
|
if(c == '\0') return --p;
|
|
|
|
} else if(c == quote) {
|
|
|
|
bbAppend(buf,c);
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
bbAppend(buf,c);
|
|
|
|
}
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static const char zeros[] =
|
|
|
|
"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
|
2018-11-16 01:00:38 +08:00
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
void
|
2013-07-17 04:22:48 +08:00
|
|
|
alignbuffer(NCConstant* prim, Bytebuffer* buf)
|
2010-06-03 21:24:43 +08:00
|
|
|
{
|
Fix various problem around VLEN's
re: https://github.com/Unidata/netcdf-c/issues/541
re: https://github.com/Unidata/netcdf-c/issues/1208
re: https://github.com/Unidata/netcdf-c/issues/2078
re: https://github.com/Unidata/netcdf-c/issues/2041
re: https://github.com/Unidata/netcdf-c/issues/2143
For a long time, there have been known problems with the
management of complex types containing VLENs. This also
involves the string type because it is stored as a VLEN of
chars.
This PR (mostly) fixes this problem. But note that it adds new
functions to netcdf.h (see below) and this may require bumping
the .so number. These new functions can be removed, if desired,
in favor of functions in netcdf_aux.h, but netcdf.h seems the
better place for them because they are intended as alternatives
to the nc_free_vlen and nc_free_string functions already in
netcdf.h.
The term complex type refers to any type that directly or
transitively references a VLEN type. So an array of VLENS, a
compound with a VLEN field, and so on.
In order to properly handle instances of these complex types, it
is necessary to have function that can recursively walk
instances of such types to perform various actions on them. The
term "deep" is also used to mean recursive.
At the moment, the two operations needed by the netcdf library are:
* free'ing an instance of the complex type
* copying an instance of the complex type.
The current library does only shallow free and shallow copy of
complex types. This means that only the top level is properly
free'd or copied, but deep internal blocks in the instance are
not touched.
Note that the term "vector" will be used to mean a contiguous (in
memory) sequence of instances of some type. Given an array with,
say, dimensions 2 X 3 X 4, this will be stored in memory as a
vector of length 2*3*4=24 instances.
The use cases are primarily these.
## nc_get_vars
Suppose one is reading a vector of instances using nc_get_vars
(or nc_get_vara or nc_get_var, etc.). These functions will
return the vector in the top-level memory provided. All
interior blocks (form nested VLEN or strings) will have been
dynamically allocated.
After using this vector of instances, it is necessary to free
(aka reclaim) the dynamically allocated memory, otherwise a
memory leak occurs. So, the recursive reclaim function is used
to walk the returned instance vector and do a deep reclaim of
the data.
Currently functions are defined in netcdf.h that are supposed to
handle this: nc_free_vlen(), nc_free_vlens(), and
nc_free_string(). Unfortunately, these functions only do a
shallow free, so deeply nested instances are not properly
handled by them.
Note that internally, the provided data is immediately written so
there is no need to copy it. But the caller may need to reclaim the
data it passed into the function.
## nc_put_att
Suppose one is writing a vector of instances as the data of an attribute
using, say, nc_put_att.
Internally, the incoming attribute data must be copied and stored
so that changes/reclamation of the input data will not affect
the attribute.
Again, the code inside the netcdf library does only shallow copying
rather than deep copy. As a result, one sees effects such as described
in Github Issue https://github.com/Unidata/netcdf-c/issues/2143.
Also, after defining the attribute, it may be necessary for the user
to free the data that was provided as input to nc_put_att().
## nc_get_att
Suppose one is reading a vector of instances as the data of an attribute
using, say, nc_get_att.
Internally, the existing attribute data must be copied and returned
to the caller, and the caller is responsible for reclaiming
the returned data.
Again, the code inside the netcdf library does only shallow copying
rather than deep copy. So this can lead to memory leaks and errors
because the deep data is shared between the library and the user.
# Solution
The solution is to build properly recursive reclaim and copy
functions and use those as needed.
These recursive functions are defined in libdispatch/dinstance.c
and their signatures are defined in include/netcdf.h.
For back compatibility, corresponding "ncaux_XXX" functions
are defined in include/netcdf_aux.h.
````
int nc_reclaim_data(int ncid, nc_type xtypeid, void* memory, size_t count);
int nc_reclaim_data_all(int ncid, nc_type xtypeid, void* memory, size_t count);
int nc_copy_data(int ncid, nc_type xtypeid, const void* memory, size_t count, void* copy);
int nc_copy_data_all(int ncid, nc_type xtypeid, const void* memory, size_t count, void** copyp);
````
There are two variants. The first two, nc_reclaim_data() and
nc_copy_data(), assume the top-level vector is managed by the
caller. For reclaim, this is so the user can use, for example, a
statically allocated vector. For copy, it assumes the user
provides the space into which the copy is stored.
The second two, nc_reclaim_data_all() and
nc_copy_data_all(), allows the functions to manage the
top-level. So for nc_reclaim_data_all, the top level is
assumed to be dynamically allocated and will be free'd by
nc_reclaim_data_all(). The nc_copy_data_all() function
will allocate the top level and return a pointer to it to the
user. The user can later pass that pointer to
nc_reclaim_data_all() to reclaim the instance(s).
# Internal Changes
The netcdf-c library internals are changed to use the proper
reclaim and copy functions. It turns out that the places where
these functions are needed is quite pervasive in the netcdf-c
library code. Using these functions also allows some
simplification of the code since the stdata and vldata fields of
NC_ATT_INFO are no longer needed. Currently this is commented
out using the SEPDATA \#define macro. When any bugs are largely
fixed, all this code will be removed.
# Known Bugs
1. There is still one known failure that has not been solved.
All the failures revolve around some variant of this .cdl file.
The proximate cause of failure is the use of a VLEN FillValue.
````
netcdf x {
types:
float(*) row_of_floats ;
dimensions:
m = 5 ;
variables:
row_of_floats ragged_array(m) ;
row_of_floats ragged_array:_FillValue = {-999} ;
data:
ragged_array = {10, 11, 12, 13, 14}, {20, 21, 22, 23}, {30, 31, 32},
{40, 41}, _ ;
}
````
When a solution is found, I will either add it to this PR or post a new PR.
# Related Changes
* Mark nc_free_vlen(s) as deprecated in favor of ncaux_reclaim_data.
* Remove the --enable-unfixed-memory-leaks option.
* Remove the NC_VLENS_NOTEST code that suppresses some vlen tests.
* Document this change in docs/internal.md
* Disable the tst_vlen_data test in ncdump/tst_nccopy4.sh.
* Mark types as fixed size or not (transitively) to optimize the reclaim
and copy functions.
# Misc. Changes
* Make Doxygen process libdispatch/daux.c
* Make sure the NC_ATT_INFO_T.container field is set.
2022-01-09 09:30:00 +08:00
|
|
|
int stat = NC_NOERR;
|
|
|
|
size_t alignment;
|
|
|
|
int pad,offset;
|
2010-06-03 21:24:43 +08:00
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
ASSERT(prim->nctype != NC_COMPOUND);
|
|
|
|
|
2010-06-03 21:24:43 +08:00
|
|
|
if(prim->nctype == NC_ECONST)
|
Fix various problem around VLEN's
re: https://github.com/Unidata/netcdf-c/issues/541
re: https://github.com/Unidata/netcdf-c/issues/1208
re: https://github.com/Unidata/netcdf-c/issues/2078
re: https://github.com/Unidata/netcdf-c/issues/2041
re: https://github.com/Unidata/netcdf-c/issues/2143
For a long time, there have been known problems with the
management of complex types containing VLENs. This also
involves the string type because it is stored as a VLEN of
chars.
This PR (mostly) fixes this problem. But note that it adds new
functions to netcdf.h (see below) and this may require bumping
the .so number. These new functions can be removed, if desired,
in favor of functions in netcdf_aux.h, but netcdf.h seems the
better place for them because they are intended as alternatives
to the nc_free_vlen and nc_free_string functions already in
netcdf.h.
The term complex type refers to any type that directly or
transitively references a VLEN type. So an array of VLENS, a
compound with a VLEN field, and so on.
In order to properly handle instances of these complex types, it
is necessary to have function that can recursively walk
instances of such types to perform various actions on them. The
term "deep" is also used to mean recursive.
At the moment, the two operations needed by the netcdf library are:
* free'ing an instance of the complex type
* copying an instance of the complex type.
The current library does only shallow free and shallow copy of
complex types. This means that only the top level is properly
free'd or copied, but deep internal blocks in the instance are
not touched.
Note that the term "vector" will be used to mean a contiguous (in
memory) sequence of instances of some type. Given an array with,
say, dimensions 2 X 3 X 4, this will be stored in memory as a
vector of length 2*3*4=24 instances.
The use cases are primarily these.
## nc_get_vars
Suppose one is reading a vector of instances using nc_get_vars
(or nc_get_vara or nc_get_var, etc.). These functions will
return the vector in the top-level memory provided. All
interior blocks (form nested VLEN or strings) will have been
dynamically allocated.
After using this vector of instances, it is necessary to free
(aka reclaim) the dynamically allocated memory, otherwise a
memory leak occurs. So, the recursive reclaim function is used
to walk the returned instance vector and do a deep reclaim of
the data.
Currently functions are defined in netcdf.h that are supposed to
handle this: nc_free_vlen(), nc_free_vlens(), and
nc_free_string(). Unfortunately, these functions only do a
shallow free, so deeply nested instances are not properly
handled by them.
Note that internally, the provided data is immediately written so
there is no need to copy it. But the caller may need to reclaim the
data it passed into the function.
## nc_put_att
Suppose one is writing a vector of instances as the data of an attribute
using, say, nc_put_att.
Internally, the incoming attribute data must be copied and stored
so that changes/reclamation of the input data will not affect
the attribute.
Again, the code inside the netcdf library does only shallow copying
rather than deep copy. As a result, one sees effects such as described
in Github Issue https://github.com/Unidata/netcdf-c/issues/2143.
Also, after defining the attribute, it may be necessary for the user
to free the data that was provided as input to nc_put_att().
## nc_get_att
Suppose one is reading a vector of instances as the data of an attribute
using, say, nc_get_att.
Internally, the existing attribute data must be copied and returned
to the caller, and the caller is responsible for reclaiming
the returned data.
Again, the code inside the netcdf library does only shallow copying
rather than deep copy. So this can lead to memory leaks and errors
because the deep data is shared between the library and the user.
# Solution
The solution is to build properly recursive reclaim and copy
functions and use those as needed.
These recursive functions are defined in libdispatch/dinstance.c
and their signatures are defined in include/netcdf.h.
For back compatibility, corresponding "ncaux_XXX" functions
are defined in include/netcdf_aux.h.
````
int nc_reclaim_data(int ncid, nc_type xtypeid, void* memory, size_t count);
int nc_reclaim_data_all(int ncid, nc_type xtypeid, void* memory, size_t count);
int nc_copy_data(int ncid, nc_type xtypeid, const void* memory, size_t count, void* copy);
int nc_copy_data_all(int ncid, nc_type xtypeid, const void* memory, size_t count, void** copyp);
````
There are two variants. The first two, nc_reclaim_data() and
nc_copy_data(), assume the top-level vector is managed by the
caller. For reclaim, this is so the user can use, for example, a
statically allocated vector. For copy, it assumes the user
provides the space into which the copy is stored.
The second two, nc_reclaim_data_all() and
nc_copy_data_all(), allows the functions to manage the
top-level. So for nc_reclaim_data_all, the top level is
assumed to be dynamically allocated and will be free'd by
nc_reclaim_data_all(). The nc_copy_data_all() function
will allocate the top level and return a pointer to it to the
user. The user can later pass that pointer to
nc_reclaim_data_all() to reclaim the instance(s).
# Internal Changes
The netcdf-c library internals are changed to use the proper
reclaim and copy functions. It turns out that the places where
these functions are needed is quite pervasive in the netcdf-c
library code. Using these functions also allows some
simplification of the code since the stdata and vldata fields of
NC_ATT_INFO are no longer needed. Currently this is commented
out using the SEPDATA \#define macro. When any bugs are largely
fixed, all this code will be removed.
# Known Bugs
1. There is still one known failure that has not been solved.
All the failures revolve around some variant of this .cdl file.
The proximate cause of failure is the use of a VLEN FillValue.
````
netcdf x {
types:
float(*) row_of_floats ;
dimensions:
m = 5 ;
variables:
row_of_floats ragged_array(m) ;
row_of_floats ragged_array:_FillValue = {-999} ;
data:
ragged_array = {10, 11, 12, 13, 14}, {20, 21, 22, 23}, {30, 31, 32},
{40, 41}, _ ;
}
````
When a solution is found, I will either add it to this PR or post a new PR.
# Related Changes
* Mark nc_free_vlen(s) as deprecated in favor of ncaux_reclaim_data.
* Remove the --enable-unfixed-memory-leaks option.
* Remove the NC_VLENS_NOTEST code that suppresses some vlen tests.
* Document this change in docs/internal.md
* Disable the tst_vlen_data test in ncdump/tst_nccopy4.sh.
* Mark types as fixed size or not (transitively) to optimize the reclaim
and copy functions.
# Misc. Changes
* Make Doxygen process libdispatch/daux.c
* Make sure the NC_ATT_INFO_T.container field is set.
2022-01-09 09:30:00 +08:00
|
|
|
stat = ncaux_class_alignment(prim->value.enumv->typ.typecode,&alignment);
|
2010-06-03 21:24:43 +08:00
|
|
|
else if(usingclassic && prim->nctype == NC_STRING)
|
Fix various problem around VLEN's
re: https://github.com/Unidata/netcdf-c/issues/541
re: https://github.com/Unidata/netcdf-c/issues/1208
re: https://github.com/Unidata/netcdf-c/issues/2078
re: https://github.com/Unidata/netcdf-c/issues/2041
re: https://github.com/Unidata/netcdf-c/issues/2143
For a long time, there have been known problems with the
management of complex types containing VLENs. This also
involves the string type because it is stored as a VLEN of
chars.
This PR (mostly) fixes this problem. But note that it adds new
functions to netcdf.h (see below) and this may require bumping
the .so number. These new functions can be removed, if desired,
in favor of functions in netcdf_aux.h, but netcdf.h seems the
better place for them because they are intended as alternatives
to the nc_free_vlen and nc_free_string functions already in
netcdf.h.
The term complex type refers to any type that directly or
transitively references a VLEN type. So an array of VLENS, a
compound with a VLEN field, and so on.
In order to properly handle instances of these complex types, it
is necessary to have function that can recursively walk
instances of such types to perform various actions on them. The
term "deep" is also used to mean recursive.
At the moment, the two operations needed by the netcdf library are:
* free'ing an instance of the complex type
* copying an instance of the complex type.
The current library does only shallow free and shallow copy of
complex types. This means that only the top level is properly
free'd or copied, but deep internal blocks in the instance are
not touched.
Note that the term "vector" will be used to mean a contiguous (in
memory) sequence of instances of some type. Given an array with,
say, dimensions 2 X 3 X 4, this will be stored in memory as a
vector of length 2*3*4=24 instances.
The use cases are primarily these.
## nc_get_vars
Suppose one is reading a vector of instances using nc_get_vars
(or nc_get_vara or nc_get_var, etc.). These functions will
return the vector in the top-level memory provided. All
interior blocks (form nested VLEN or strings) will have been
dynamically allocated.
After using this vector of instances, it is necessary to free
(aka reclaim) the dynamically allocated memory, otherwise a
memory leak occurs. So, the recursive reclaim function is used
to walk the returned instance vector and do a deep reclaim of
the data.
Currently functions are defined in netcdf.h that are supposed to
handle this: nc_free_vlen(), nc_free_vlens(), and
nc_free_string(). Unfortunately, these functions only do a
shallow free, so deeply nested instances are not properly
handled by them.
Note that internally, the provided data is immediately written so
there is no need to copy it. But the caller may need to reclaim the
data it passed into the function.
## nc_put_att
Suppose one is writing a vector of instances as the data of an attribute
using, say, nc_put_att.
Internally, the incoming attribute data must be copied and stored
so that changes/reclamation of the input data will not affect
the attribute.
Again, the code inside the netcdf library does only shallow copying
rather than deep copy. As a result, one sees effects such as described
in Github Issue https://github.com/Unidata/netcdf-c/issues/2143.
Also, after defining the attribute, it may be necessary for the user
to free the data that was provided as input to nc_put_att().
## nc_get_att
Suppose one is reading a vector of instances as the data of an attribute
using, say, nc_get_att.
Internally, the existing attribute data must be copied and returned
to the caller, and the caller is responsible for reclaiming
the returned data.
Again, the code inside the netcdf library does only shallow copying
rather than deep copy. So this can lead to memory leaks and errors
because the deep data is shared between the library and the user.
# Solution
The solution is to build properly recursive reclaim and copy
functions and use those as needed.
These recursive functions are defined in libdispatch/dinstance.c
and their signatures are defined in include/netcdf.h.
For back compatibility, corresponding "ncaux_XXX" functions
are defined in include/netcdf_aux.h.
````
int nc_reclaim_data(int ncid, nc_type xtypeid, void* memory, size_t count);
int nc_reclaim_data_all(int ncid, nc_type xtypeid, void* memory, size_t count);
int nc_copy_data(int ncid, nc_type xtypeid, const void* memory, size_t count, void* copy);
int nc_copy_data_all(int ncid, nc_type xtypeid, const void* memory, size_t count, void** copyp);
````
There are two variants. The first two, nc_reclaim_data() and
nc_copy_data(), assume the top-level vector is managed by the
caller. For reclaim, this is so the user can use, for example, a
statically allocated vector. For copy, it assumes the user
provides the space into which the copy is stored.
The second two, nc_reclaim_data_all() and
nc_copy_data_all(), allows the functions to manage the
top-level. So for nc_reclaim_data_all, the top level is
assumed to be dynamically allocated and will be free'd by
nc_reclaim_data_all(). The nc_copy_data_all() function
will allocate the top level and return a pointer to it to the
user. The user can later pass that pointer to
nc_reclaim_data_all() to reclaim the instance(s).
# Internal Changes
The netcdf-c library internals are changed to use the proper
reclaim and copy functions. It turns out that the places where
these functions are needed is quite pervasive in the netcdf-c
library code. Using these functions also allows some
simplification of the code since the stdata and vldata fields of
NC_ATT_INFO are no longer needed. Currently this is commented
out using the SEPDATA \#define macro. When any bugs are largely
fixed, all this code will be removed.
# Known Bugs
1. There is still one known failure that has not been solved.
All the failures revolve around some variant of this .cdl file.
The proximate cause of failure is the use of a VLEN FillValue.
````
netcdf x {
types:
float(*) row_of_floats ;
dimensions:
m = 5 ;
variables:
row_of_floats ragged_array(m) ;
row_of_floats ragged_array:_FillValue = {-999} ;
data:
ragged_array = {10, 11, 12, 13, 14}, {20, 21, 22, 23}, {30, 31, 32},
{40, 41}, _ ;
}
````
When a solution is found, I will either add it to this PR or post a new PR.
# Related Changes
* Mark nc_free_vlen(s) as deprecated in favor of ncaux_reclaim_data.
* Remove the --enable-unfixed-memory-leaks option.
* Remove the NC_VLENS_NOTEST code that suppresses some vlen tests.
* Document this change in docs/internal.md
* Disable the tst_vlen_data test in ncdump/tst_nccopy4.sh.
* Mark types as fixed size or not (transitively) to optimize the reclaim
and copy functions.
# Misc. Changes
* Make Doxygen process libdispatch/daux.c
* Make sure the NC_ATT_INFO_T.container field is set.
2022-01-09 09:30:00 +08:00
|
|
|
stat = ncaux_class_alignment(NC_CHAR,&alignment);
|
2010-06-03 21:24:43 +08:00
|
|
|
else if(prim->nctype == NC_CHAR)
|
Fix various problem around VLEN's
re: https://github.com/Unidata/netcdf-c/issues/541
re: https://github.com/Unidata/netcdf-c/issues/1208
re: https://github.com/Unidata/netcdf-c/issues/2078
re: https://github.com/Unidata/netcdf-c/issues/2041
re: https://github.com/Unidata/netcdf-c/issues/2143
For a long time, there have been known problems with the
management of complex types containing VLENs. This also
involves the string type because it is stored as a VLEN of
chars.
This PR (mostly) fixes this problem. But note that it adds new
functions to netcdf.h (see below) and this may require bumping
the .so number. These new functions can be removed, if desired,
in favor of functions in netcdf_aux.h, but netcdf.h seems the
better place for them because they are intended as alternatives
to the nc_free_vlen and nc_free_string functions already in
netcdf.h.
The term complex type refers to any type that directly or
transitively references a VLEN type. So an array of VLENS, a
compound with a VLEN field, and so on.
In order to properly handle instances of these complex types, it
is necessary to have function that can recursively walk
instances of such types to perform various actions on them. The
term "deep" is also used to mean recursive.
At the moment, the two operations needed by the netcdf library are:
* free'ing an instance of the complex type
* copying an instance of the complex type.
The current library does only shallow free and shallow copy of
complex types. This means that only the top level is properly
free'd or copied, but deep internal blocks in the instance are
not touched.
Note that the term "vector" will be used to mean a contiguous (in
memory) sequence of instances of some type. Given an array with,
say, dimensions 2 X 3 X 4, this will be stored in memory as a
vector of length 2*3*4=24 instances.
The use cases are primarily these.
## nc_get_vars
Suppose one is reading a vector of instances using nc_get_vars
(or nc_get_vara or nc_get_var, etc.). These functions will
return the vector in the top-level memory provided. All
interior blocks (form nested VLEN or strings) will have been
dynamically allocated.
After using this vector of instances, it is necessary to free
(aka reclaim) the dynamically allocated memory, otherwise a
memory leak occurs. So, the recursive reclaim function is used
to walk the returned instance vector and do a deep reclaim of
the data.
Currently functions are defined in netcdf.h that are supposed to
handle this: nc_free_vlen(), nc_free_vlens(), and
nc_free_string(). Unfortunately, these functions only do a
shallow free, so deeply nested instances are not properly
handled by them.
Note that internally, the provided data is immediately written so
there is no need to copy it. But the caller may need to reclaim the
data it passed into the function.
## nc_put_att
Suppose one is writing a vector of instances as the data of an attribute
using, say, nc_put_att.
Internally, the incoming attribute data must be copied and stored
so that changes/reclamation of the input data will not affect
the attribute.
Again, the code inside the netcdf library does only shallow copying
rather than deep copy. As a result, one sees effects such as described
in Github Issue https://github.com/Unidata/netcdf-c/issues/2143.
Also, after defining the attribute, it may be necessary for the user
to free the data that was provided as input to nc_put_att().
## nc_get_att
Suppose one is reading a vector of instances as the data of an attribute
using, say, nc_get_att.
Internally, the existing attribute data must be copied and returned
to the caller, and the caller is responsible for reclaiming
the returned data.
Again, the code inside the netcdf library does only shallow copying
rather than deep copy. So this can lead to memory leaks and errors
because the deep data is shared between the library and the user.
# Solution
The solution is to build properly recursive reclaim and copy
functions and use those as needed.
These recursive functions are defined in libdispatch/dinstance.c
and their signatures are defined in include/netcdf.h.
For back compatibility, corresponding "ncaux_XXX" functions
are defined in include/netcdf_aux.h.
````
int nc_reclaim_data(int ncid, nc_type xtypeid, void* memory, size_t count);
int nc_reclaim_data_all(int ncid, nc_type xtypeid, void* memory, size_t count);
int nc_copy_data(int ncid, nc_type xtypeid, const void* memory, size_t count, void* copy);
int nc_copy_data_all(int ncid, nc_type xtypeid, const void* memory, size_t count, void** copyp);
````
There are two variants. The first two, nc_reclaim_data() and
nc_copy_data(), assume the top-level vector is managed by the
caller. For reclaim, this is so the user can use, for example, a
statically allocated vector. For copy, it assumes the user
provides the space into which the copy is stored.
The second two, nc_reclaim_data_all() and
nc_copy_data_all(), allows the functions to manage the
top-level. So for nc_reclaim_data_all, the top level is
assumed to be dynamically allocated and will be free'd by
nc_reclaim_data_all(). The nc_copy_data_all() function
will allocate the top level and return a pointer to it to the
user. The user can later pass that pointer to
nc_reclaim_data_all() to reclaim the instance(s).
# Internal Changes
The netcdf-c library internals are changed to use the proper
reclaim and copy functions. It turns out that the places where
these functions are needed is quite pervasive in the netcdf-c
library code. Using these functions also allows some
simplification of the code since the stdata and vldata fields of
NC_ATT_INFO are no longer needed. Currently this is commented
out using the SEPDATA \#define macro. When any bugs are largely
fixed, all this code will be removed.
# Known Bugs
1. There is still one known failure that has not been solved.
All the failures revolve around some variant of this .cdl file.
The proximate cause of failure is the use of a VLEN FillValue.
````
netcdf x {
types:
float(*) row_of_floats ;
dimensions:
m = 5 ;
variables:
row_of_floats ragged_array(m) ;
row_of_floats ragged_array:_FillValue = {-999} ;
data:
ragged_array = {10, 11, 12, 13, 14}, {20, 21, 22, 23}, {30, 31, 32},
{40, 41}, _ ;
}
````
When a solution is found, I will either add it to this PR or post a new PR.
# Related Changes
* Mark nc_free_vlen(s) as deprecated in favor of ncaux_reclaim_data.
* Remove the --enable-unfixed-memory-leaks option.
* Remove the NC_VLENS_NOTEST code that suppresses some vlen tests.
* Document this change in docs/internal.md
* Disable the tst_vlen_data test in ncdump/tst_nccopy4.sh.
* Mark types as fixed size or not (transitively) to optimize the reclaim
and copy functions.
# Misc. Changes
* Make Doxygen process libdispatch/daux.c
* Make sure the NC_ATT_INFO_T.container field is set.
2022-01-09 09:30:00 +08:00
|
|
|
stat = ncaux_class_alignment(NC_CHAR,&alignment);
|
2010-06-03 21:24:43 +08:00
|
|
|
else
|
Fix various problem around VLEN's
re: https://github.com/Unidata/netcdf-c/issues/541
re: https://github.com/Unidata/netcdf-c/issues/1208
re: https://github.com/Unidata/netcdf-c/issues/2078
re: https://github.com/Unidata/netcdf-c/issues/2041
re: https://github.com/Unidata/netcdf-c/issues/2143
For a long time, there have been known problems with the
management of complex types containing VLENs. This also
involves the string type because it is stored as a VLEN of
chars.
This PR (mostly) fixes this problem. But note that it adds new
functions to netcdf.h (see below) and this may require bumping
the .so number. These new functions can be removed, if desired,
in favor of functions in netcdf_aux.h, but netcdf.h seems the
better place for them because they are intended as alternatives
to the nc_free_vlen and nc_free_string functions already in
netcdf.h.
The term complex type refers to any type that directly or
transitively references a VLEN type. So an array of VLENS, a
compound with a VLEN field, and so on.
In order to properly handle instances of these complex types, it
is necessary to have function that can recursively walk
instances of such types to perform various actions on them. The
term "deep" is also used to mean recursive.
At the moment, the two operations needed by the netcdf library are:
* free'ing an instance of the complex type
* copying an instance of the complex type.
The current library does only shallow free and shallow copy of
complex types. This means that only the top level is properly
free'd or copied, but deep internal blocks in the instance are
not touched.
Note that the term "vector" will be used to mean a contiguous (in
memory) sequence of instances of some type. Given an array with,
say, dimensions 2 X 3 X 4, this will be stored in memory as a
vector of length 2*3*4=24 instances.
The use cases are primarily these.
## nc_get_vars
Suppose one is reading a vector of instances using nc_get_vars
(or nc_get_vara or nc_get_var, etc.). These functions will
return the vector in the top-level memory provided. All
interior blocks (form nested VLEN or strings) will have been
dynamically allocated.
After using this vector of instances, it is necessary to free
(aka reclaim) the dynamically allocated memory, otherwise a
memory leak occurs. So, the recursive reclaim function is used
to walk the returned instance vector and do a deep reclaim of
the data.
Currently functions are defined in netcdf.h that are supposed to
handle this: nc_free_vlen(), nc_free_vlens(), and
nc_free_string(). Unfortunately, these functions only do a
shallow free, so deeply nested instances are not properly
handled by them.
Note that internally, the provided data is immediately written so
there is no need to copy it. But the caller may need to reclaim the
data it passed into the function.
## nc_put_att
Suppose one is writing a vector of instances as the data of an attribute
using, say, nc_put_att.
Internally, the incoming attribute data must be copied and stored
so that changes/reclamation of the input data will not affect
the attribute.
Again, the code inside the netcdf library does only shallow copying
rather than deep copy. As a result, one sees effects such as described
in Github Issue https://github.com/Unidata/netcdf-c/issues/2143.
Also, after defining the attribute, it may be necessary for the user
to free the data that was provided as input to nc_put_att().
## nc_get_att
Suppose one is reading a vector of instances as the data of an attribute
using, say, nc_get_att.
Internally, the existing attribute data must be copied and returned
to the caller, and the caller is responsible for reclaiming
the returned data.
Again, the code inside the netcdf library does only shallow copying
rather than deep copy. So this can lead to memory leaks and errors
because the deep data is shared between the library and the user.
# Solution
The solution is to build properly recursive reclaim and copy
functions and use those as needed.
These recursive functions are defined in libdispatch/dinstance.c
and their signatures are defined in include/netcdf.h.
For back compatibility, corresponding "ncaux_XXX" functions
are defined in include/netcdf_aux.h.
````
int nc_reclaim_data(int ncid, nc_type xtypeid, void* memory, size_t count);
int nc_reclaim_data_all(int ncid, nc_type xtypeid, void* memory, size_t count);
int nc_copy_data(int ncid, nc_type xtypeid, const void* memory, size_t count, void* copy);
int nc_copy_data_all(int ncid, nc_type xtypeid, const void* memory, size_t count, void** copyp);
````
There are two variants. The first two, nc_reclaim_data() and
nc_copy_data(), assume the top-level vector is managed by the
caller. For reclaim, this is so the user can use, for example, a
statically allocated vector. For copy, it assumes the user
provides the space into which the copy is stored.
The second two, nc_reclaim_data_all() and
nc_copy_data_all(), allows the functions to manage the
top-level. So for nc_reclaim_data_all, the top level is
assumed to be dynamically allocated and will be free'd by
nc_reclaim_data_all(). The nc_copy_data_all() function
will allocate the top level and return a pointer to it to the
user. The user can later pass that pointer to
nc_reclaim_data_all() to reclaim the instance(s).
# Internal Changes
The netcdf-c library internals are changed to use the proper
reclaim and copy functions. It turns out that the places where
these functions are needed is quite pervasive in the netcdf-c
library code. Using these functions also allows some
simplification of the code since the stdata and vldata fields of
NC_ATT_INFO are no longer needed. Currently this is commented
out using the SEPDATA \#define macro. When any bugs are largely
fixed, all this code will be removed.
# Known Bugs
1. There is still one known failure that has not been solved.
All the failures revolve around some variant of this .cdl file.
The proximate cause of failure is the use of a VLEN FillValue.
````
netcdf x {
types:
float(*) row_of_floats ;
dimensions:
m = 5 ;
variables:
row_of_floats ragged_array(m) ;
row_of_floats ragged_array:_FillValue = {-999} ;
data:
ragged_array = {10, 11, 12, 13, 14}, {20, 21, 22, 23}, {30, 31, 32},
{40, 41}, _ ;
}
````
When a solution is found, I will either add it to this PR or post a new PR.
# Related Changes
* Mark nc_free_vlen(s) as deprecated in favor of ncaux_reclaim_data.
* Remove the --enable-unfixed-memory-leaks option.
* Remove the NC_VLENS_NOTEST code that suppresses some vlen tests.
* Document this change in docs/internal.md
* Disable the tst_vlen_data test in ncdump/tst_nccopy4.sh.
* Mark types as fixed size or not (transitively) to optimize the reclaim
and copy functions.
# Misc. Changes
* Make Doxygen process libdispatch/daux.c
* Make sure the NC_ATT_INFO_T.container field is set.
2022-01-09 09:30:00 +08:00
|
|
|
stat = ncaux_class_alignment(prim->nctype,&alignment);
|
|
|
|
if(!stat) {
|
|
|
|
offset = bbLength(buf);
|
|
|
|
pad = getpadding(offset,alignment);
|
|
|
|
if(pad > 0)
|
|
|
|
bbAppendn(buf,(void*)zeros,pad);
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
Following routines are in support of language-oriented output
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
codedump(Bytebuffer* buf)
|
|
|
|
{
|
|
|
|
bbCatbuf(codebuffer,buf);
|
2012-02-14 08:25:32 +08:00
|
|
|
bbClear(buf);
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
codepartial(const char* txt)
|
|
|
|
{
|
|
|
|
bbCat(codebuffer,txt);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
codeline(const char* line)
|
|
|
|
{
|
|
|
|
codepartial(line);
|
|
|
|
codepartial("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
codelined(int n, const char* txt)
|
|
|
|
{
|
|
|
|
bbindent(codebuffer,n);
|
|
|
|
bbCat(codebuffer,txt);
|
|
|
|
codepartial("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
codeflush(void)
|
|
|
|
{
|
|
|
|
if(bbLength(codebuffer) > 0) {
|
|
|
|
bbNull(codebuffer);
|
|
|
|
fputs(bbContents(codebuffer),stdout);
|
|
|
|
fflush(stdout);
|
|
|
|
bbClear(codebuffer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
bbindent(Bytebuffer* buf, const int n)
|
|
|
|
{
|
|
|
|
bbCat(buf,indented(n));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Provide an restrict snprintf that writes to an expandable buffer */
|
|
|
|
/* Simulates a simple snprintf because apparently
|
|
|
|
the IRIX one is broken wrt return value.
|
|
|
|
Supports only %u %d %f %s and %% specifiers
|
|
|
|
with optional leading hh or ll.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
vbbprintf(Bytebuffer* buf, const char* fmt, va_list argv)
|
|
|
|
{
|
|
|
|
char tmp[128];
|
|
|
|
const char* p;
|
|
|
|
int c;
|
|
|
|
int hcount;
|
|
|
|
int lcount;
|
|
|
|
|
|
|
|
char* text;
|
|
|
|
|
|
|
|
for(p=fmt;(c=*p++);) {
|
|
|
|
hcount = 0; lcount = 0;
|
|
|
|
switch (c) {
|
|
|
|
case '%':
|
|
|
|
retry: switch ((c=*p++)) {
|
|
|
|
case '\0': bbAppend(buf,'%'); p--; break;
|
|
|
|
case '%': bbAppend(buf,c); break;
|
|
|
|
case 'h':
|
|
|
|
hcount++;
|
|
|
|
while((c=*p) && (c == 'h')) {hcount++; p++;}
|
|
|
|
if(hcount > 2) hcount = 2;
|
|
|
|
goto retry;
|
|
|
|
case 'l':
|
|
|
|
lcount++;
|
|
|
|
while((c=*p) && (c == 'l')) {
|
|
|
|
lcount++;
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
if(lcount > 2) lcount = 2;
|
|
|
|
goto retry;
|
|
|
|
case 'u':
|
|
|
|
if(hcount == 2) {
|
2021-12-24 13:18:56 +08:00
|
|
|
snprintf(tmp,sizeof(tmp),
|
|
|
|
#ifdef HHPRINT
|
|
|
|
"%hhu"
|
|
|
|
#else
|
|
|
|
"%2u"
|
|
|
|
#endif
|
|
|
|
,(unsigned char)va_arg(argv,unsigned int));
|
2010-06-03 21:24:43 +08:00
|
|
|
} else if(hcount == 1) {
|
2021-12-24 13:18:56 +08:00
|
|
|
snprintf(tmp,sizeof(tmp), "%hu",
|
2018-11-15 02:00:47 +08:00
|
|
|
(unsigned short)va_arg(argv,unsigned int));
|
2010-06-03 21:24:43 +08:00
|
|
|
} else if(lcount == 2) {
|
|
|
|
snprintf(tmp,sizeof(tmp),"%llu",
|
|
|
|
(unsigned long long)va_arg(argv,unsigned long long));
|
|
|
|
} else if(lcount == 1) {
|
|
|
|
snprintf(tmp,sizeof(tmp),"%lu",
|
|
|
|
(unsigned long)va_arg(argv,unsigned long));
|
|
|
|
} else {
|
|
|
|
snprintf(tmp,sizeof(tmp),"%u",
|
|
|
|
(unsigned int)va_arg(argv,unsigned int));
|
|
|
|
}
|
|
|
|
bbCat(buf,tmp);
|
|
|
|
break;
|
|
|
|
case 'd':
|
|
|
|
if(hcount == 2) {
|
2021-12-24 13:18:56 +08:00
|
|
|
snprintf(tmp,sizeof(tmp),
|
|
|
|
#ifdef HHPRINT
|
|
|
|
"%hhd"
|
|
|
|
#else
|
|
|
|
"%2d"
|
|
|
|
#endif
|
|
|
|
,(signed char)va_arg(argv,signed int));
|
2010-06-03 21:24:43 +08:00
|
|
|
} else if(hcount == 1) {
|
|
|
|
snprintf(tmp,sizeof(tmp),"%hd",
|
2018-11-15 02:00:47 +08:00
|
|
|
(signed short)va_arg(argv,signed int));
|
2010-06-03 21:24:43 +08:00
|
|
|
} else if(lcount == 2) {
|
|
|
|
snprintf(tmp,sizeof(tmp),"%lld",
|
|
|
|
(signed long long)va_arg(argv,signed long long));
|
|
|
|
} else if(lcount == 1) {
|
|
|
|
snprintf(tmp,sizeof(tmp),"%ld",
|
|
|
|
(signed long)va_arg(argv,signed long));
|
|
|
|
} else {
|
|
|
|
snprintf(tmp,sizeof(tmp),"%d",
|
|
|
|
(signed int)va_arg(argv,signed int));
|
|
|
|
}
|
|
|
|
bbCat(buf,tmp);
|
|
|
|
break;
|
|
|
|
case 'f':
|
|
|
|
if(lcount > 0) {
|
2012-02-14 08:25:32 +08:00
|
|
|
snprintf(tmp,sizeof(tmp),"((double)%.16g)",
|
2010-06-03 21:24:43 +08:00
|
|
|
(double)va_arg(argv,double));
|
|
|
|
} else {
|
2012-02-14 08:25:32 +08:00
|
|
|
snprintf(tmp,sizeof(tmp),"((float)%.8g)",
|
2010-06-03 21:24:43 +08:00
|
|
|
(double)va_arg(argv,double));
|
|
|
|
}
|
|
|
|
bbCat(buf,tmp);
|
|
|
|
break;
|
|
|
|
case 's':
|
|
|
|
text = va_arg(argv,char*);
|
|
|
|
bbCat(buf,text);
|
|
|
|
break;
|
2012-02-18 02:50:25 +08:00
|
|
|
case 'c':
|
|
|
|
c = va_arg(argv,int);
|
|
|
|
bbAppend(buf,(char)c);
|
|
|
|
break;
|
2010-06-03 21:24:43 +08:00
|
|
|
default:
|
|
|
|
PANIC1("vbbprintf: unknown specifier: %c",(char)c);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
bbAppend(buf,c);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
bbprintf(Bytebuffer* buf, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list argv;
|
|
|
|
va_start(argv,fmt);
|
|
|
|
vbbprintf(buf,fmt,argv);
|
2013-04-17 07:02:54 +08:00
|
|
|
va_end(argv);
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
bbprintf0(Bytebuffer* buf, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list argv;
|
|
|
|
va_start(argv,fmt);
|
|
|
|
bbClear(buf);
|
|
|
|
vbbprintf(buf,fmt,argv);
|
2013-04-17 07:02:54 +08:00
|
|
|
va_end(argv);
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
codeprintf(const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list argv;
|
|
|
|
va_start(argv,fmt);
|
|
|
|
vbbprintf(codebuffer,fmt,argv);
|
2013-04-17 07:02:54 +08:00
|
|
|
va_end(argv);
|
2010-06-03 21:24:43 +08:00
|
|
|
}
|
|
|
|
|
2013-07-17 04:22:48 +08:00
|
|
|
NCConstant*
|
2018-11-16 01:00:38 +08:00
|
|
|
emptycompoundconst(int lineno)
|
2010-07-30 04:37:05 +08:00
|
|
|
{
|
2018-11-16 01:00:38 +08:00
|
|
|
NCConstant* c = nullconst();
|
2010-07-30 04:37:05 +08:00
|
|
|
c->lineno = lineno;
|
|
|
|
c->nctype = NC_COMPOUND;
|
2018-11-16 01:00:38 +08:00
|
|
|
setconstlist(c,builddatalist(0));
|
2013-04-02 07:05:45 +08:00
|
|
|
c->filled = 0;
|
2010-07-30 04:37:05 +08:00
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
/* Make an empty string constant*/
|
2013-07-17 04:22:48 +08:00
|
|
|
NCConstant*
|
2018-11-16 01:00:38 +08:00
|
|
|
emptystringconst(int lineno)
|
2010-07-30 04:37:05 +08:00
|
|
|
{
|
2018-11-16 01:00:38 +08:00
|
|
|
NCConstant* c = nullconst();
|
2010-07-30 04:37:05 +08:00
|
|
|
ASSERT(c != NULL);
|
|
|
|
c->lineno = lineno;
|
|
|
|
c->nctype = NC_STRING;
|
|
|
|
c->value.stringv.len = 0;
|
|
|
|
c->value.stringv.stringv = NULL;
|
2013-04-02 07:05:45 +08:00
|
|
|
c->filled = 0;
|
2010-07-30 04:37:05 +08:00
|
|
|
return c;
|
|
|
|
}
|
2012-02-14 08:25:32 +08:00
|
|
|
|
|
|
|
#define INDENTMAX 256
|
|
|
|
static char* dent = NULL;
|
|
|
|
|
|
|
|
char*
|
|
|
|
indented(int n)
|
|
|
|
{
|
|
|
|
char* indentation;
|
|
|
|
if(dent == NULL) {
|
2017-10-31 05:52:08 +08:00
|
|
|
dent = (char*)ecalloc(INDENTMAX+1);
|
2012-02-14 08:25:32 +08:00
|
|
|
memset((void*)dent,' ',INDENTMAX);
|
|
|
|
dent[INDENTMAX] = '\0';
|
|
|
|
}
|
|
|
|
if(n*4 >= INDENTMAX) n = INDENTMAX/4;
|
|
|
|
indentation = dent+(INDENTMAX - 4*n);
|
|
|
|
return indentation;
|
|
|
|
}
|
|
|
|
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
void
|
|
|
|
dlsetalloc(Datalist* dl, size_t need)
|
|
|
|
{
|
|
|
|
NCConstant** newdata = NULL;
|
|
|
|
if(dl->readonly) abort();
|
|
|
|
if(dl->alloc < need) {
|
|
|
|
newdata = (NCConstant**)ecalloc(need*sizeof(NCConstant*));
|
|
|
|
if(dl->length > 0)
|
|
|
|
memcpy(newdata,dl->data,sizeof(NCConstant*)*dl->length);
|
|
|
|
dl->alloc = need;
|
|
|
|
nullfree(dl->data);
|
|
|
|
dl->data = newdata;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-19 08:26:06 +08:00
|
|
|
void
|
|
|
|
dlextend(Datalist* dl)
|
|
|
|
{
|
|
|
|
size_t newalloc;
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
if(dl->readonly) abort();
|
2017-11-01 04:03:57 +08:00
|
|
|
newalloc = (dl->alloc > 0?2*dl->alloc:2);
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
dlsetalloc(dl,newalloc);
|
2014-09-19 08:26:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
void
|
|
|
|
capture(Datalist* dl)
|
|
|
|
{
|
|
|
|
if(alldatalists == NULL) alldatalists = listnew();
|
|
|
|
listpush(alldatalists,dl);
|
|
|
|
}
|
|
|
|
|
2014-09-19 08:26:06 +08:00
|
|
|
Datalist*
|
|
|
|
builddatalist(int initial)
|
|
|
|
{
|
|
|
|
Datalist* ci;
|
|
|
|
if(initial <= 0) initial = DATALISTINIT;
|
|
|
|
initial++; /* for header*/
|
2017-10-31 05:52:08 +08:00
|
|
|
ci = (Datalist*)ecalloc(sizeof(Datalist));
|
2018-11-16 01:00:38 +08:00
|
|
|
if(ci == NULL) semerror(0,"out of memory\n");
|
|
|
|
ci->data = (NCConstant**)ecalloc(sizeof(NCConstant*)*initial);
|
2014-09-19 08:26:06 +08:00
|
|
|
ci->alloc = initial;
|
|
|
|
ci->length = 0;
|
|
|
|
return ci;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dlappend(Datalist* dl, NCConstant* constant)
|
|
|
|
{
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
if(dl->readonly) abort();
|
2017-11-01 04:03:57 +08:00
|
|
|
if(dl->length >= dl->alloc)
|
|
|
|
dlextend(dl);
|
2018-11-16 01:00:38 +08:00
|
|
|
dl->data[dl->length++] = (constant);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dlset(Datalist* dl, size_t pos, NCConstant* constant)
|
|
|
|
{
|
|
|
|
ASSERT(pos < dl->length);
|
|
|
|
dl->data[pos] = (constant);
|
2014-09-19 08:26:06 +08:00
|
|
|
}
|
|
|
|
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
NCConstant*
|
|
|
|
dlremove(Datalist* dl, size_t pos)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
NCConstant* con = NULL;
|
|
|
|
ASSERT(dl->length > 0 && pos < dl->length);
|
|
|
|
con = dl->data[pos];
|
|
|
|
for(i=pos+1;i<dl->length;i++)
|
|
|
|
dl->data[i-1] = dl->data[i];
|
|
|
|
dl->length--;
|
|
|
|
return con;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dlinsert(Datalist* dl, size_t pos, Datalist* insertion)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int len1 = datalistlen(dl);
|
|
|
|
int len2 = datalistlen(insertion);
|
|
|
|
int delta = len1 - pos;
|
|
|
|
dlsetalloc(dl,len2+len1+1);
|
|
|
|
|
|
|
|
|
|
|
|
/* move contents of dl up to make room for insertion */
|
|
|
|
if(delta > 0)
|
|
|
|
memmove(&dl->data[pos+len2],&dl->data[pos],delta*sizeof(NCConstant*));
|
|
|
|
dl->length += len2;
|
|
|
|
for(i=0;i<len2;i++) {
|
|
|
|
NCConstant* con = insertion->data[i];
|
|
|
|
con = cloneconstant(con);
|
|
|
|
dl->data[pos+i] = con;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
/* Convert a datalist to a compound constant */
|
|
|
|
NCConstant*
|
2014-09-19 08:26:06 +08:00
|
|
|
builddatasublist(Datalist* dl)
|
|
|
|
{
|
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
NCConstant* d = nullconst();
|
|
|
|
d->nctype = NC_COMPOUND;
|
|
|
|
d->lineno = (dl->length > 0?dl->data[0]->lineno:0);
|
|
|
|
setconstlist(d,dl);
|
|
|
|
d->filled = 0;
|
2014-09-19 08:26:06 +08:00
|
|
|
return d;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
/* Convert a subsequence of a datalist to its own datalist */
|
|
|
|
Datalist*
|
|
|
|
builddatasubset(Datalist* dl, size_t start, size_t count)
|
|
|
|
{
|
|
|
|
Datalist* subset;
|
|
|
|
|
|
|
|
if(dl == NULL || start >= datalistlen(dl)) return NULL;
|
|
|
|
if((start + count) > datalistlen(dl))
|
|
|
|
count = (datalistlen(dl) - start);
|
|
|
|
subset = (Datalist*)ecalloc(sizeof(Datalist));
|
|
|
|
subset->readonly = 1;
|
|
|
|
subset->length = count;
|
|
|
|
subset->alloc = count;
|
|
|
|
subset->data = &dl->data[start];
|
|
|
|
return subset;
|
|
|
|
}
|
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
/* Deep copy */
|
|
|
|
Datalist*
|
|
|
|
clonedatalist(Datalist* dl)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
size_t len;
|
|
|
|
Datalist* newdl;
|
|
|
|
|
|
|
|
if(dl == NULL) return NULL;
|
|
|
|
len = datalistlen(dl);
|
|
|
|
newdl = builddatalist(len);
|
|
|
|
/* initialize */
|
|
|
|
for(i=0;i<len;i++) {
|
|
|
|
NCConstant* con = datalistith(dl,i);
|
|
|
|
con = cloneconstant(con);
|
|
|
|
dlappend(newdl,con);
|
|
|
|
}
|
|
|
|
return newdl;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* recursive helpers */
|
|
|
|
|
|
|
|
void
|
|
|
|
reclaimconstant(NCConstant* con)
|
|
|
|
{
|
|
|
|
if(con == NULL) return;
|
|
|
|
switch (con->nctype) {
|
|
|
|
case NC_STRING:
|
|
|
|
if(con->value.stringv.stringv != NULL)
|
|
|
|
efree(con->value.stringv.stringv);
|
|
|
|
break;
|
|
|
|
case NC_OPAQUE:
|
|
|
|
if(con->value.opaquev.stringv != NULL)
|
|
|
|
efree(con->value.opaquev.stringv);
|
|
|
|
break;
|
|
|
|
case NC_COMPOUND:
|
|
|
|
#ifdef VERIFY
|
|
|
|
{int pos;
|
|
|
|
if((pos=verify(alldatalists,con->value.compoundv)) >= 0) {
|
|
|
|
dumpdatalist(listget(alldatalists,pos),"XXX");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
reclaimdatalist(con->value.compoundv);
|
|
|
|
con->value.compoundv = NULL;
|
|
|
|
break;
|
|
|
|
default: break;
|
|
|
|
}
|
|
|
|
efree(con);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
reclaimdatalist(Datalist* list)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
if(list == NULL) return;
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
if(!list->readonly) {
|
|
|
|
if(list->data != NULL) {
|
|
|
|
for(i=0;i<list->length;i++) {
|
|
|
|
NCConstant* con = list->data[i];
|
|
|
|
if(con != NULL) reclaimconstant(con);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
freedatalist(list);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Like reclaimdatalist, but do not try to reclaim contained constants */
|
|
|
|
void
|
|
|
|
freedatalist(Datalist* list)
|
|
|
|
{
|
|
|
|
if(list == NULL) return;
|
|
|
|
if(!list->readonly) {
|
|
|
|
efree(list->data);
|
|
|
|
list->data = NULL;
|
|
|
|
}
|
|
|
|
efree(list);
|
2018-11-16 01:00:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
reclaimalldatalists(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for(i=0;i<listlength(alldatalists);i++) {
|
|
|
|
Datalist* di = listget(alldatalists,i);
|
|
|
|
if(di != NULL)
|
|
|
|
reclaimdatalist(di);
|
|
|
|
}
|
|
|
|
efree(alldatalists);
|
|
|
|
alldatalists = NULL;
|
|
|
|
}
|
|
|
|
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
static void
|
|
|
|
flattenR(Datalist* result, Datalist* data, int rank, int depth)
|
2018-11-16 01:00:38 +08:00
|
|
|
{
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
int i;
|
2018-11-16 01:00:38 +08:00
|
|
|
NCConstant* con;
|
|
|
|
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
if(rank == depth) return;
|
|
|
|
if(datalistlen(data) == 0) return;
|
|
|
|
for(i=0;i<datalistlen(data);i++) {
|
|
|
|
con = datalistith(data,i);
|
|
|
|
if(depth < rank - 1) {
|
|
|
|
/* Is this is a char list, then we might have short depth */
|
|
|
|
if(islistconst(con))
|
|
|
|
flattenR(result,compoundfor(con),rank,depth+1);
|
|
|
|
else
|
|
|
|
dlappend(result,con);
|
|
|
|
} else { /* depth == rank -1, last dimension */
|
|
|
|
dlappend(result,con);
|
|
|
|
}
|
2018-11-16 01:00:38 +08:00
|
|
|
}
|
2014-09-19 08:26:06 +08:00
|
|
|
}
|
|
|
|
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
/* Produce a new list that is the concat of all the leaf constants */
|
|
|
|
Datalist*
|
|
|
|
flatten(Datalist* list,int rank)
|
2018-11-16 01:00:38 +08:00
|
|
|
{
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
Datalist* result = builddatalist(0);
|
|
|
|
flattenR(result,list,rank,0);
|
|
|
|
return result;
|
2018-11-16 01:00:38 +08:00
|
|
|
}
|