2012-02-14 08:25:32 +08:00
|
|
|
/*********************************************************************
|
2018-12-07 06:40:43 +08:00
|
|
|
* Copyright 2018, UCAR/Unidata
|
2012-02-14 08:25:32 +08:00
|
|
|
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
|
|
|
|
*********************************************************************/
|
|
|
|
|
|
|
|
#include "includes.h"
|
2017-03-09 08:01:10 +08:00
|
|
|
#include "ncoffsets.h"
|
2018-11-16 01:00:38 +08:00
|
|
|
#include "netcdf_aux.h"
|
2012-02-14 08:25:32 +08:00
|
|
|
|
|
|
|
/**************************************************/
|
|
|
|
/* Code for generating data lists*/
|
|
|
|
/**************************************************/
|
|
|
|
/* For datalist constant rules: see the rules on the man page */
|
|
|
|
|
|
|
|
/* Forward*/
|
|
|
|
static void generate_array(Symbol*,Bytebuffer*,Datalist*,Generator*,Writer);
|
2013-07-17 04:22:48 +08:00
|
|
|
static void generate_primdata(Symbol*, NCConstant*, Bytebuffer*, Datalist* fillsrc, Generator*);
|
|
|
|
static void generate_fieldarray(Symbol*, NCConstant*, Dimset*, Bytebuffer*, Datalist* fillsrc, Generator*);
|
2012-02-14 08:25:32 +08:00
|
|
|
|
|
|
|
/* Mnemonics */
|
|
|
|
#define VLENLIST1
|
|
|
|
#define FIELDARRAY 1
|
|
|
|
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
#define ITER_BUFSIZE_DEFAULT (2<<20)
|
|
|
|
|
|
|
|
void
|
|
|
|
pvec(int rank, size_t* vector)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
fprintf(stderr,"(");
|
|
|
|
for(i=0;i<rank;i++)
|
|
|
|
fprintf(stderr," %lu",(long)vector[i]);
|
|
|
|
fprintf(stderr,")");
|
|
|
|
}
|
|
|
|
|
2012-02-14 08:25:32 +08:00
|
|
|
/**************************************************/
|
2014-09-19 08:26:06 +08:00
|
|
|
|
2012-02-14 08:25:32 +08:00
|
|
|
|
|
|
|
int
|
|
|
|
generator_getstate(Generator* generator ,void** statep)
|
|
|
|
{
|
2016-11-18 06:29:32 +08:00
|
|
|
if(statep) *statep = (void*)generator->globalstate;
|
2012-02-14 08:25:32 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int generator_reset(Generator* generator, void* state)
|
|
|
|
{
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->globalstate = state;
|
2012-02-14 08:25:32 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
/**************************************************/
|
|
|
|
|
|
|
|
void
|
|
|
|
generate_attrdata(Symbol* asym, Generator* generator, Writer writer, Bytebuffer* codebuf)
|
|
|
|
{
|
|
|
|
Symbol* basetype = asym->typ.basetype;
|
|
|
|
nc_type typecode = basetype->typ.typecode;
|
|
|
|
|
2013-07-11 04:00:48 +08:00
|
|
|
if(typecode == NC_CHAR) {
|
2012-02-14 08:25:32 +08:00
|
|
|
gen_charattr(asym->data,codebuf);
|
|
|
|
} else {
|
|
|
|
int uid;
|
|
|
|
size_t count;
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->listbegin(generator,asym,NULL,LISTATTR,asym->data->length,codebuf,&uid);
|
2012-02-14 08:25:32 +08:00
|
|
|
for(count=0;count<asym->data->length;count++) {
|
2014-09-19 08:26:06 +08:00
|
|
|
NCConstant* con = datalistith(asym->data,count);
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->list(generator,asym,NULL,LISTATTR,uid,count,codebuf);
|
2012-02-14 08:25:32 +08:00
|
|
|
generate_basetype(asym->typ.basetype,con,codebuf,NULL,generator);
|
|
|
|
}
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->listend(generator,asym,NULL,LISTATTR,uid,count,codebuf);
|
2012-02-14 08:25:32 +08:00
|
|
|
}
|
|
|
|
writer(generator,asym,codebuf,0,NULL,NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
generate_vardata(Symbol* vsym, Generator* generator, Writer writer, Bytebuffer* code)
|
|
|
|
{
|
|
|
|
Dimset* dimset = &vsym->typ.dimset;
|
|
|
|
int rank = dimset->ndims;
|
|
|
|
Symbol* basetype = vsym->typ.basetype;
|
2012-05-05 03:22:30 +08:00
|
|
|
Datalist* filler = getfiller(vsym);
|
2012-02-14 08:25:32 +08:00
|
|
|
|
|
|
|
if(vsym->data == NULL) return;
|
|
|
|
|
|
|
|
if(rank == 0) {/*scalar case*/
|
2013-09-22 06:19:06 +08:00
|
|
|
NCConstant* c0 = datalistith(vsym->data,0);
|
2012-05-05 03:22:30 +08:00
|
|
|
generate_basetype(basetype,c0,code,filler,generator);
|
2012-02-14 08:25:32 +08:00
|
|
|
writer(generator,vsym,code,0,NULL,NULL);
|
|
|
|
} else {/*rank > 0*/
|
2012-05-05 03:22:30 +08:00
|
|
|
generate_array(vsym,code,filler,generator,writer);
|
2012-02-14 08:25:32 +08:00
|
|
|
}
|
|
|
|
}
|
2014-08-16 05:42:13 +08:00
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
/* Generate an instance of the basetype using the value of con*/
|
2012-02-14 08:25:32 +08:00
|
|
|
void
|
2013-07-17 04:22:48 +08:00
|
|
|
generate_basetype(Symbol* tsym, NCConstant* con, Bytebuffer* codebuf, Datalist* filler, Generator* generator)
|
2012-02-14 08:25:32 +08:00
|
|
|
{
|
|
|
|
Datalist* data;
|
2016-11-18 06:29:32 +08:00
|
|
|
int offsetbase = 0;
|
2012-02-14 08:25:32 +08:00
|
|
|
|
|
|
|
switch (tsym->subclass) {
|
|
|
|
|
|
|
|
case NC_ENUM:
|
|
|
|
case NC_OPAQUE:
|
|
|
|
case NC_PRIM:
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
if(con == NULL || isfillconst(con)) {
|
|
|
|
Datalist* fill = (filler==NULL?getfiller(tsym):filler);
|
|
|
|
ASSERT(fill->length == 1);
|
|
|
|
con = datalistith(fill,0);
|
|
|
|
}
|
2014-09-19 08:26:06 +08:00
|
|
|
if(islistconst(con)) {
|
|
|
|
semerror(constline(con),"Expected primitive found {..}");
|
|
|
|
}
|
|
|
|
generate_primdata(tsym,con,codebuf,filler,generator);
|
|
|
|
break;
|
2012-02-14 08:25:32 +08:00
|
|
|
|
|
|
|
case NC_COMPOUND: {
|
2014-09-19 08:26:06 +08:00
|
|
|
int i,uid, nfields, dllen;
|
|
|
|
if(con == NULL || isfillconst(con)) {
|
|
|
|
Datalist* fill = (filler==NULL?getfiller(tsym):filler);
|
2018-11-16 01:00:38 +08:00
|
|
|
ASSERT(fill->length == 1);
|
|
|
|
con = fill->data[0];
|
2014-10-03 01:28:07 +08:00
|
|
|
if(!islistconst(con)) {
|
|
|
|
if(con)
|
2014-09-19 08:26:06 +08:00
|
|
|
semerror(con->lineno,"Compound data fill value is not enclosed in {..}");
|
2014-10-03 01:28:07 +08:00
|
|
|
else
|
|
|
|
semerror(0,"Compound data fill value not enclosed in {..}, con is NULL.");
|
|
|
|
}
|
2014-09-19 08:26:06 +08:00
|
|
|
}
|
2015-06-19 04:37:31 +08:00
|
|
|
|
|
|
|
if(!con) { /* fail on null compound. */
|
|
|
|
semerror(constline(con),"NULL compound data.");
|
2015-06-19 04:41:09 +08:00
|
|
|
break;
|
2015-06-19 04:37:31 +08:00
|
|
|
}
|
|
|
|
|
2014-09-19 08:26:06 +08:00
|
|
|
if(!islistconst(con)) {/* fail on no compound*/
|
|
|
|
semerror(constline(con),"Compound data must be enclosed in {..}");
|
2012-02-14 08:25:32 +08:00
|
|
|
}
|
2015-06-19 04:37:31 +08:00
|
|
|
|
2014-09-19 08:26:06 +08:00
|
|
|
data = con->value.compoundv;
|
2013-06-27 02:55:30 +08:00
|
|
|
nfields = listlength(tsym->subnodes);
|
2014-09-19 08:26:06 +08:00
|
|
|
dllen = datalistlen(data);
|
|
|
|
if(dllen > nfields) {
|
2015-06-19 04:37:31 +08:00
|
|
|
semerror(con->lineno,"Datalist longer than the number of compound fields");
|
2014-09-19 08:26:06 +08:00
|
|
|
break;
|
|
|
|
}
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->listbegin(generator,tsym,&offsetbase,LISTCOMPOUND,listlength(tsym->subnodes),codebuf,&uid);
|
2013-06-27 02:55:30 +08:00
|
|
|
for(i=0;i<nfields;i++) {
|
2012-02-14 08:25:32 +08:00
|
|
|
Symbol* field = (Symbol*)listget(tsym->subnodes,i);
|
2014-09-19 08:26:06 +08:00
|
|
|
con = datalistith(data,i);
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->list(generator,field,&offsetbase,LISTCOMPOUND,uid,i,codebuf);
|
2012-02-14 08:25:32 +08:00
|
|
|
generate_basetype(field,con,codebuf,NULL,generator);
|
2014-09-19 08:26:06 +08:00
|
|
|
}
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->listend(generator,tsym,&offsetbase,LISTCOMPOUND,uid,i,codebuf);
|
2014-09-19 08:26:06 +08:00
|
|
|
} break;
|
2012-02-14 08:25:32 +08:00
|
|
|
|
|
|
|
case NC_VLEN: {
|
2014-09-19 08:26:06 +08:00
|
|
|
Bytebuffer* vlenbuf;
|
2012-02-14 08:25:32 +08:00
|
|
|
int uid;
|
2014-09-19 08:26:06 +08:00
|
|
|
size_t count;
|
|
|
|
|
|
|
|
if(con == NULL || isfillconst(con)) {
|
|
|
|
Datalist* fill = (filler==NULL?getfiller(tsym):filler);
|
|
|
|
ASSERT(fill->length == 1);
|
2018-11-16 01:00:38 +08:00
|
|
|
con = fill->data[0];
|
2014-09-19 08:26:06 +08:00
|
|
|
if(con->nctype != NC_COMPOUND) {
|
|
|
|
semerror(con->lineno,"Vlen data fill value is not enclosed in {..}");
|
|
|
|
}
|
|
|
|
}
|
2012-02-14 08:25:32 +08:00
|
|
|
|
2014-09-19 08:26:06 +08:00
|
|
|
if(!islistconst(con)) {
|
|
|
|
semerror(constline(con),"Vlen data must be enclosed in {..}");
|
2012-02-14 08:25:32 +08:00
|
|
|
}
|
|
|
|
data = con->value.compoundv;
|
|
|
|
/* generate the nc_vlen_t instance*/
|
2014-09-19 08:26:06 +08:00
|
|
|
vlenbuf = bbNew();
|
|
|
|
if(tsym->typ.basetype->typ.typecode == NC_CHAR) {
|
2018-11-16 01:00:38 +08:00
|
|
|
gen_charseq(data,vlenbuf);
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->vlenstring(generator,tsym,vlenbuf,&uid,&count);
|
2014-09-19 08:26:06 +08:00
|
|
|
} else {
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->listbegin(generator,tsym,NULL,LISTVLEN,data->length,codebuf,&uid);
|
2012-02-14 08:25:32 +08:00
|
|
|
for(count=0;count<data->length;count++) {
|
Improve performance of the nc_reclaim_data and nc_copy_data functions.
re: Issue https://github.com/Unidata/netcdf-c/issues/2685
re: PR https://github.com/Unidata/netcdf-c/pull/2179
As noted in PR https://github.com/Unidata/netcdf-c/pull/2179,
the old code did not allow for reclaiming instances of types,
nor for properly copying them. That PR provided new functions
capable of reclaiming/copying instances of arbitrary types.
However, as noted by Issue https://github.com/Unidata/netcdf-c/issues/2685, using these
most general functions resulted in a significant performance
degradation, even for common cases.
This PR attempts to mitigate the cost of using the general
reclaim/copy functions in two ways.
First, the previous functions operating at the top level by
using ncid and typeid arguments. These functions were augmented
with equivalent versions that used the netcdf-c library internal
data structures to allow direct access to needed information.
These new functions are used internally to the library.
The second mitigation involves optimizing the internal functions
by providing early tests for common cases. This avoids
unnecessary recursive function calls.
The overall result is a significant improvement in speed by a
factor of roughly twenty -- your mileage may vary. These
optimized functions are still not as fast as the original (more
limited) functions, but they are getting close. Additional optimizations are
possible. But the cost is a significant "uglification" of the
code that I deemed a step too far, at least for now.
## Misc. Changes
1. Added a test case to check the proper reclamation/copy of complex types.
2. Found and fixed some places where nc_reclaim/copy should have been used.
3. Replaced, in the netcdf-c library, (almost all) occurrences of nc_reclaim_copy with calls to NC_reclaim/copy. This plus the optimizations is the primary speed-up mechanism.
4. In DAP4, the metadata is held in a substrate in-memory file; this required some changes so that the reclaim/copy code accessed that substrate dispatcher rather than the DAP4 dispatcher.
5. Re-factored and isolated the code that computes if a type is (transitively) variable-sized or not.
6. Clean up the reclamation code in ncgen; adding the use of nc_reclaim exposed some memory problems.
2023-05-21 07:11:25 +08:00
|
|
|
NCConstant* con;
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->list(generator,tsym,NULL,LISTVLEN,uid,count,vlenbuf);
|
2014-09-19 08:26:06 +08:00
|
|
|
con = datalistith(data,count);
|
2013-06-27 02:55:30 +08:00
|
|
|
generate_basetype(tsym->typ.basetype,con,vlenbuf,NULL,generator);
|
2014-09-19 08:26:06 +08:00
|
|
|
}
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->listend(generator,tsym,NULL,LISTVLEN,uid,count,codebuf,(void*)vlenbuf);
|
2014-09-19 08:26:06 +08:00
|
|
|
}
|
2018-11-16 01:00:38 +08:00
|
|
|
generator->vlendecl(generator,tsym,codebuf,uid,count,vlenbuf); /* Will extract contents of vlenbuf */
|
2014-09-19 08:26:06 +08:00
|
|
|
bbFree(vlenbuf);
|
2012-02-14 08:25:32 +08:00
|
|
|
} break;
|
|
|
|
|
|
|
|
case NC_FIELD:
|
2014-09-19 08:26:06 +08:00
|
|
|
if(tsym->typ.dimset.ndims > 0) {
|
|
|
|
/* Verify that we have a sublist (or fill situation) */
|
|
|
|
if(con != NULL && !isfillconst(con) && !islistconst(con))
|
|
|
|
semerror(constline(con),"Dimensioned fields must be enclose in {...}");
|
2012-02-14 08:25:32 +08:00
|
|
|
generate_fieldarray(tsym->typ.basetype,con,&tsym->typ.dimset,codebuf,filler,generator);
|
2014-09-19 08:26:06 +08:00
|
|
|
} else {
|
|
|
|
generate_basetype(tsym->typ.basetype,con,codebuf,NULL,generator);
|
|
|
|
}
|
|
|
|
break;
|
2012-02-14 08:25:32 +08:00
|
|
|
|
|
|
|
default: PANIC1("generate_basetype: unexpected subclass %d",tsym->subclass);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Used only for structure field arrays*/
|
|
|
|
static void
|
2013-07-17 04:22:48 +08:00
|
|
|
generate_fieldarray(Symbol* basetype, NCConstant* con, Dimset* dimset,
|
2014-09-19 08:26:06 +08:00
|
|
|
Bytebuffer* codebuf, Datalist* filler, Generator* generator)
|
2012-02-14 08:25:32 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int chartype = (basetype->typ.typecode == NC_CHAR);
|
|
|
|
Datalist* data;
|
2014-09-19 08:26:06 +08:00
|
|
|
int rank = rankfor(dimset);
|
2012-02-14 08:25:32 +08:00
|
|
|
|
|
|
|
ASSERT(dimset->ndims > 0);
|
|
|
|
|
|
|
|
if(con != NULL && !isfillconst(con))
|
|
|
|
data = con->value.compoundv;
|
|
|
|
else
|
2014-09-19 08:26:06 +08:00
|
|
|
data = NULL;
|
2012-02-14 08:25:32 +08:00
|
|
|
|
|
|
|
if(chartype) {
|
2014-09-19 08:26:06 +08:00
|
|
|
Bytebuffer* charbuf = bbNew();
|
|
|
|
gen_chararray(dimset,0,data,charbuf,filler);
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->charconstant(generator,basetype,codebuf,charbuf);
|
2014-09-19 08:26:06 +08:00
|
|
|
bbFree(charbuf);
|
2012-02-14 08:25:32 +08:00
|
|
|
} else {
|
2014-09-19 08:26:06 +08:00
|
|
|
int uid;
|
|
|
|
size_t xproduct = crossproduct(dimset,0,rank); /* compute total number of elements */
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->listbegin(generator,basetype,NULL,LISTFIELDARRAY,xproduct,codebuf,&uid);
|
2012-02-14 08:25:32 +08:00
|
|
|
for(i=0;i<xproduct;i++) {
|
2014-09-19 08:26:06 +08:00
|
|
|
con = (data == NULL ? NULL : datalistith(data,i));
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->list(generator,basetype,NULL,LISTFIELDARRAY,uid,i,codebuf);
|
2012-02-14 08:25:32 +08:00
|
|
|
generate_basetype(basetype,con,codebuf,NULL,generator);
|
2014-09-19 08:26:06 +08:00
|
|
|
}
|
2016-11-18 06:29:32 +08:00
|
|
|
generator->listend(generator,basetype,NULL,LISTFIELDARRAY,uid,i,codebuf);
|
2012-02-14 08:25:32 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-05 03:22:30 +08:00
|
|
|
|
|
|
|
/* An opaque string value might not conform
|
|
|
|
to the size of the opaque to which it is being
|
|
|
|
assigned. Normalize it to match the required
|
|
|
|
opaque length (in bytes).
|
|
|
|
Note that the string is a sequence of nibbles (4 bits).
|
|
|
|
*/
|
|
|
|
static void
|
2013-07-17 04:22:48 +08:00
|
|
|
normalizeopaquelength(NCConstant* prim, unsigned long nbytes)
|
2012-05-05 03:22:30 +08:00
|
|
|
{
|
|
|
|
int nnibs = 2*nbytes;
|
|
|
|
ASSERT(prim->nctype==NC_OPAQUE);
|
2014-08-16 05:42:13 +08:00
|
|
|
if(prim->value.opaquev.len == nnibs) {
|
2012-05-05 03:22:30 +08:00
|
|
|
/* do nothing*/
|
|
|
|
} else if(prim->value.opaquev.len > nnibs) { /* truncate*/
|
2014-09-19 08:26:06 +08:00
|
|
|
prim->value.opaquev.stringv[nnibs] = '\0';
|
|
|
|
prim->value.opaquev.len = nnibs;
|
2012-05-05 03:22:30 +08:00
|
|
|
} else {/* prim->value.opaquev.len < nnibs => expand*/
|
|
|
|
char* s;
|
2017-10-31 05:52:08 +08:00
|
|
|
s = (char*)ecalloc(nnibs+1);
|
2014-09-19 08:26:06 +08:00
|
|
|
memset(s,'0',nnibs); /* Fill with '0' characters */
|
|
|
|
memcpy(s,prim->value.opaquev.stringv,prim->value.opaquev.len);
|
|
|
|
s[nnibs] = '\0';
|
|
|
|
efree(prim->value.opaquev.stringv);
|
|
|
|
prim->value.opaquev.stringv=s;
|
|
|
|
prim->value.opaquev.len = nnibs;
|
2012-05-05 03:22:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-14 08:25:32 +08:00
|
|
|
static void
|
2013-07-17 04:22:48 +08:00
|
|
|
generate_primdata(Symbol* basetype, NCConstant* prim, Bytebuffer* codebuf,
|
2014-09-19 08:26:06 +08:00
|
|
|
Datalist* filler, Generator* generator)
|
2012-02-14 08:25:32 +08:00
|
|
|
{
|
2018-11-16 01:00:38 +08:00
|
|
|
NCConstant* target;
|
2013-09-22 06:19:06 +08:00
|
|
|
int match;
|
2012-02-14 08:25:32 +08:00
|
|
|
|
|
|
|
if(prim == NULL || isfillconst(prim)) {
|
2014-09-19 08:26:06 +08:00
|
|
|
Datalist* fill = (filler==NULL?getfiller(basetype):filler);
|
|
|
|
ASSERT(fill->length == 1);
|
|
|
|
prim = datalistith(fill,0);
|
2012-02-14 08:25:32 +08:00
|
|
|
}
|
|
|
|
|
2014-09-19 08:26:06 +08:00
|
|
|
ASSERT((prim->nctype != NC_COMPOUND));
|
2012-02-14 08:25:32 +08:00
|
|
|
|
2013-09-22 06:19:06 +08:00
|
|
|
/* Verify that the constant is consistent with the type */
|
|
|
|
match = 1;
|
|
|
|
switch (prim->nctype) {
|
|
|
|
case NC_CHAR:
|
|
|
|
case NC_BYTE:
|
|
|
|
case NC_SHORT:
|
|
|
|
case NC_INT:
|
|
|
|
case NC_FLOAT:
|
|
|
|
case NC_DOUBLE:
|
|
|
|
case NC_UBYTE:
|
|
|
|
case NC_USHORT:
|
|
|
|
case NC_UINT:
|
|
|
|
case NC_INT64:
|
|
|
|
case NC_UINT64:
|
|
|
|
case NC_STRING:
|
2014-09-19 08:26:06 +08:00
|
|
|
match = (basetype->subclass == NC_PRIM ? 1 : 0);
|
|
|
|
break;
|
2013-09-22 06:19:06 +08:00
|
|
|
|
|
|
|
#ifdef USE_NETCDF4
|
|
|
|
case NC_NIL:
|
2014-09-19 08:26:06 +08:00
|
|
|
match = (basetype->subclass == NC_PRIM && basetype->typ.typecode == NC_STRING ? 1 : 0);
|
|
|
|
break;
|
2013-09-22 06:19:06 +08:00
|
|
|
|
|
|
|
case NC_OPAQUE:
|
2014-09-19 08:26:06 +08:00
|
|
|
/* OPAQUE is also consistent with numbers */
|
|
|
|
match = (basetype->subclass == NC_OPAQUE
|
|
|
|
|| basetype->subclass == NC_PRIM ? 1 : 0);
|
|
|
|
break;
|
2013-09-22 06:19:06 +08:00
|
|
|
case NC_ECONST:
|
2014-09-19 08:26:06 +08:00
|
|
|
match = (basetype->subclass == NC_ENUM ? 1 : 0);
|
|
|
|
if(match) {
|
|
|
|
/* Make sure this econst belongs to this enum */
|
|
|
|
Symbol* ec = prim->value.enumv;
|
|
|
|
Symbol* en = ec->container;
|
|
|
|
match = (en == basetype);
|
|
|
|
}
|
|
|
|
break;
|
2013-09-22 06:19:06 +08:00
|
|
|
#endif
|
|
|
|
default:
|
2014-09-19 08:26:06 +08:00
|
|
|
match = 0;
|
2013-09-22 06:19:06 +08:00
|
|
|
}
|
|
|
|
if(!match) {
|
|
|
|
semerror(constline(prim),"Data value is not consistent with the expected type: %s",
|
2014-09-19 08:26:06 +08:00
|
|
|
basetype->name);
|
2013-09-22 06:19:06 +08:00
|
|
|
}
|
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
target = nullconst();
|
|
|
|
target->nctype = basetype->typ.typecode;
|
2012-02-14 08:25:32 +08:00
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
if(target->nctype != NC_ECONST) {
|
|
|
|
convert1(prim,target);
|
2012-02-14 08:25:32 +08:00
|
|
|
}
|
|
|
|
|
2018-11-16 01:00:38 +08:00
|
|
|
switch (target->nctype) {
|
2012-02-14 08:25:32 +08:00
|
|
|
case NC_ECONST:
|
|
|
|
if(basetype->subclass != NC_ENUM) {
|
2014-09-19 08:26:06 +08:00
|
|
|
semerror(constline(prim),"Conversion to enum not supported (yet)");
|
|
|
|
} break;
|
2014-08-16 05:42:13 +08:00
|
|
|
case NC_OPAQUE:
|
2018-11-16 01:00:38 +08:00
|
|
|
normalizeopaquelength(target,basetype->typ.size);
|
2014-09-19 08:26:06 +08:00
|
|
|
break;
|
2012-02-14 08:25:32 +08:00
|
|
|
default:
|
2014-09-19 08:26:06 +08:00
|
|
|
break;
|
2012-02-14 08:25:32 +08:00
|
|
|
}
|
2018-11-16 01:00:38 +08:00
|
|
|
generator->constant(generator,basetype,target,codebuf);
|
|
|
|
reclaimconstant(target);
|
|
|
|
target = NULL;
|
2012-02-14 08:25:32 +08:00
|
|
|
return;
|
|
|
|
}
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
|
|
|
|
/* Avoid long argument lists */
|
|
|
|
struct Args {
|
|
|
|
Symbol* vsym;
|
|
|
|
Dimset* dimset;
|
|
|
|
int typecode;
|
|
|
|
int storage;
|
|
|
|
int rank;
|
|
|
|
Generator* generator;
|
|
|
|
Writer writer;
|
|
|
|
Bytebuffer* code;
|
|
|
|
Datalist* filler;
|
|
|
|
size_t dimsizes[NC_MAX_VAR_DIMS];
|
|
|
|
size_t chunksizes[NC_MAX_VAR_DIMS];
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
generate_arrayR(struct Args* args, int dimindex, size_t* index, Datalist* data)
|
|
|
|
{
|
|
|
|
size_t counter,stop;
|
|
|
|
size_t count[NC_MAX_VAR_DIMS];
|
|
|
|
Datalist* actual;
|
|
|
|
Symbol* dim = args->dimset->dimsyms[dimindex];
|
|
|
|
|
|
|
|
stop = args->dimsizes[dimindex];
|
|
|
|
|
|
|
|
/* Four cases: (dimindex==rank-1|dimindex<rank-1) X (unlimited|!unlimited) */
|
|
|
|
if(dimindex == (args->rank - 1)) {/* base case */
|
|
|
|
int uid;
|
|
|
|
if(dimindex > 0 && dim->dim.isunlimited) {
|
|
|
|
/* Get the unlimited list */
|
|
|
|
NCConstant* con = datalistith(data,0);
|
2020-09-10 00:24:33 +08:00
|
|
|
actual = compoundfor(con);
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
} else
|
|
|
|
actual = data;
|
|
|
|
/* For last index, dump all of its elements */
|
|
|
|
args->generator->listbegin(args->generator,args->vsym,NULL,LISTDATA,datalistlen(actual),args->code,&uid);
|
|
|
|
for(counter=0;counter<stop;counter++) {
|
|
|
|
NCConstant* con = datalistith(actual,counter);
|
|
|
|
generate_basetype(args->vsym->typ.basetype,con,args->code,args->filler,args->generator);
|
|
|
|
args->generator->list(args->generator,args->vsym,NULL,LISTDATA,uid,counter,args->code);
|
|
|
|
}
|
|
|
|
args->generator->listend(args->generator,args->vsym,NULL,LISTDATA,uid,counter,args->code);
|
|
|
|
memcpy(count,onesvector,sizeof(size_t)*dimindex);
|
|
|
|
count[dimindex] = stop;
|
Improve performance of the nc_reclaim_data and nc_copy_data functions.
re: Issue https://github.com/Unidata/netcdf-c/issues/2685
re: PR https://github.com/Unidata/netcdf-c/pull/2179
As noted in PR https://github.com/Unidata/netcdf-c/pull/2179,
the old code did not allow for reclaiming instances of types,
nor for properly copying them. That PR provided new functions
capable of reclaiming/copying instances of arbitrary types.
However, as noted by Issue https://github.com/Unidata/netcdf-c/issues/2685, using these
most general functions resulted in a significant performance
degradation, even for common cases.
This PR attempts to mitigate the cost of using the general
reclaim/copy functions in two ways.
First, the previous functions operating at the top level by
using ncid and typeid arguments. These functions were augmented
with equivalent versions that used the netcdf-c library internal
data structures to allow direct access to needed information.
These new functions are used internally to the library.
The second mitigation involves optimizing the internal functions
by providing early tests for common cases. This avoids
unnecessary recursive function calls.
The overall result is a significant improvement in speed by a
factor of roughly twenty -- your mileage may vary. These
optimized functions are still not as fast as the original (more
limited) functions, but they are getting close. Additional optimizations are
possible. But the cost is a significant "uglification" of the
code that I deemed a step too far, at least for now.
## Misc. Changes
1. Added a test case to check the proper reclamation/copy of complex types.
2. Found and fixed some places where nc_reclaim/copy should have been used.
3. Replaced, in the netcdf-c library, (almost all) occurrences of nc_reclaim_copy with calls to NC_reclaim/copy. This plus the optimizations is the primary speed-up mechanism.
4. In DAP4, the metadata is held in a substrate in-memory file; this required some changes so that the reclaim/copy code accessed that substrate dispatcher rather than the DAP4 dispatcher.
5. Re-factored and isolated the code that computes if a type is (transitively) variable-sized or not.
6. Clean up the reclamation code in ncgen; adding the use of nc_reclaim exposed some memory problems.
2023-05-21 07:11:25 +08:00
|
|
|
/* Write the data; also reclaims written data */
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
args->writer(args->generator,args->vsym,args->code,args->rank,index,count);
|
|
|
|
bbClear(args->code);
|
2020-09-10 00:24:33 +08:00
|
|
|
} else {
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
actual = data;
|
|
|
|
/* Iterate over this dimension */
|
|
|
|
for(counter = 0;counter < stop; counter++) {
|
|
|
|
Datalist* subdata = NULL;
|
|
|
|
NCConstant* con = datalistith(actual,counter);
|
|
|
|
if(con == NULL)
|
|
|
|
subdata = filldatalist;
|
|
|
|
else {
|
|
|
|
ASSERT(islistconst(con));
|
|
|
|
if(islistconst(con)) subdata = compoundfor(con);
|
|
|
|
}
|
|
|
|
index[dimindex] = counter;
|
|
|
|
generate_arrayR(args,dimindex+1,index,subdata); /* recurse */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
generate_array(Symbol* vsym, Bytebuffer* code, Datalist* filler, Generator* generator, Writer writer)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
size_t index[NC_MAX_VAR_DIMS];
|
|
|
|
struct Args args;
|
|
|
|
size_t totalsize;
|
|
|
|
int nunlimited = 0;
|
|
|
|
|
|
|
|
assert(vsym->typ.dimset.ndims > 0);
|
|
|
|
|
|
|
|
args.vsym = vsym;
|
|
|
|
args.dimset = &vsym->typ.dimset;
|
|
|
|
args.generator = generator;
|
|
|
|
args.writer = writer;
|
|
|
|
args.filler = filler;
|
|
|
|
args.code = code;
|
|
|
|
args.rank = args.dimset->ndims;
|
|
|
|
args.storage = vsym->var.special._Storage;
|
|
|
|
args.typecode = vsym->typ.basetype->typ.typecode;
|
|
|
|
|
|
|
|
assert(args.rank > 0);
|
2020-09-10 00:24:33 +08:00
|
|
|
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
totalsize = 1; /* total # elements in the array */
|
|
|
|
for(i=0;i<args.rank;i++) {
|
|
|
|
args.dimsizes[i] = args.dimset->dimsyms[i]->dim.declsize;
|
|
|
|
totalsize *= args.dimsizes[i];
|
|
|
|
}
|
|
|
|
nunlimited = countunlimited(args.dimset);
|
|
|
|
|
2021-04-14 06:56:43 +08:00
|
|
|
if(vsym->var.special._Storage == NC_CHUNKED) {
|
|
|
|
if(vsym->var.special._ChunkSizes)
|
|
|
|
memcpy(args.chunksizes,vsym->var.special._ChunkSizes,sizeof(size_t)*args.rank);
|
|
|
|
}
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
|
|
|
|
memset(index,0,sizeof(index));
|
|
|
|
|
|
|
|
/* Special case for NC_CHAR */
|
|
|
|
if(args.typecode == NC_CHAR) {
|
|
|
|
size_t start[NC_MAX_VAR_DIMS];
|
|
|
|
size_t count[NC_MAX_VAR_DIMS];
|
|
|
|
Bytebuffer* charbuf = bbNew();
|
|
|
|
gen_chararray(args.dimset,0,args.vsym->data,charbuf,args.filler);
|
|
|
|
args.generator->charconstant(args.generator,args.vsym,args.code,charbuf);
|
|
|
|
memset(start,0,sizeof(size_t)*args.rank);
|
|
|
|
memcpy(count,args.dimsizes,sizeof(size_t)*args.rank);
|
|
|
|
args.writer(args.generator,args.vsym,args.code,args.rank,start,count);
|
|
|
|
bbFree(charbuf);
|
|
|
|
bbClear(args.code);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the total no. of elements is less than some max and no unlimited,
|
|
|
|
then generate a single vara that covers the whole array */
|
|
|
|
if(totalsize <= wholevarsize && nunlimited == 0) {
|
|
|
|
Symbol* basetype = args.vsym->typ.basetype;
|
|
|
|
size_t counter;
|
2020-09-10 00:24:33 +08:00
|
|
|
int uid;
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
Datalist* flat = flatten(vsym->data,args.rank);
|
|
|
|
args.generator->listbegin(args.generator,basetype,NULL,LISTDATA,totalsize,args.code,&uid);
|
|
|
|
for(counter=0;counter<totalsize;counter++) {
|
|
|
|
NCConstant* con = datalistith(flat,counter);
|
|
|
|
if(con == NULL)
|
|
|
|
con = &fillconstant;
|
|
|
|
generate_basetype(basetype,con,args.code,args.filler,args.generator);
|
|
|
|
args.generator->list(args.generator,args.vsym,NULL,LISTDATA,uid,counter,args.code);
|
|
|
|
}
|
|
|
|
args.generator->listend(args.generator,args.vsym,NULL,LISTDATA,uid,counter,args.code);
|
|
|
|
args.writer(args.generator,args.vsym,args.code,args.rank,zerosvector,args.dimsizes);
|
|
|
|
freedatalist(flat);
|
|
|
|
} else
|
2020-09-10 00:24:33 +08:00
|
|
|
generate_arrayR(&args, 0, index, vsym->data);
|
This PR adds EXPERIMENTAL support for accessing data in the
cloud using a variant of the Zarr protocol and storage
format. This enhancement is generically referred to as "NCZarr".
The data model supported by NCZarr is netcdf-4 minus the user-defined
types and the String type. In this sense it is similar to the CDF-5
data model.
More detailed information about enabling and using NCZarr is
described in the document NUG/nczarr.md and in a
[Unidata Developer's blog entry](https://www.unidata.ucar.edu/blogs/developer/en/entry/overview-of-zarr-support-in).
WARNING: this code has had limited testing, so do use this version
for production work. Also, performance improvements are ongoing.
Note especially the following platform matrix of successful tests:
Platform | Build System | S3 support
------------------------------------
Linux+gcc | Automake | yes
Linux+gcc | CMake | yes
Visual Studio | CMake | no
Additionally, and as a consequence of the addition of NCZarr,
major changes have been made to the Filter API. NOTE: NCZarr
does not yet support filters, but these changes are enablers for
that support in the future. Note that it is possible
(probable?) that there will be some accidental reversions if the
changes here did not correctly mimic the existing filter testing.
In any case, previously filter ids and parameters were of type
unsigned int. In order to support the more general zarr filter
model, this was all converted to char*. The old HDF5-specific,
unsigned int operations are still supported but they are
wrappers around the new, char* based nc_filterx_XXX functions.
This entailed at least the following changes:
1. Added the files libdispatch/dfilterx.c and include/ncfilter.h
2. Some filterx utilities have been moved to libdispatch/daux.c
3. A new entry, "filter_actions" was added to the NCDispatch table
and the version bumped.
4. An overly complex set of structs was created to support funnelling
all of the filterx operations thru a single dispatch
"filter_actions" entry.
5. Move common code to from libhdf5 to libsrc4 so that it is accessible
to nczarr.
Changes directly related to Zarr:
1. Modified CMakeList.txt and configure.ac to support both C and C++
-- this is in support of S3 support via the awd-sdk libraries.
2. Define a size64_t type to support nczarr.
3. More reworking of libdispatch/dinfermodel.c to
support zarr and to regularize the structure of the fragments
section of a URL.
Changes not directly related to Zarr:
1. Make client-side filter registration be conditional, with default off.
2. Hack include/nc4internal.h to make some flags added by Ed be unique:
e.g. NC_CREAT, NC_INDEF, etc.
3. cleanup include/nchttp.h and libdispatch/dhttp.c.
4. Misc. changes to support compiling under Visual Studio including:
* Better testing under windows for dirent.h and opendir and closedir.
5. Misc. changes to the oc2 code to support various libcurl CURLOPT flags
and to centralize error reporting.
6. By default, suppress the vlen tests that have unfixed memory leaks; add option to enable them.
7. Make part of the nc_test/test_byterange.sh test be contingent on remotetest.unidata.ucar.edu being accessible.
Changes Left TO-DO:
1. fix provenance code, it is too HDF5 specific.
2020-06-29 08:02:47 +08:00
|
|
|
}
|