mirror of
https://github.com/Unidata/netcdf-c.git
synced 2024-12-27 08:49:16 +08:00
df3636b959
This PR started as an attempt to add unlimited dimensions to NCZarr. It did that, but this exposed significant problems with test interference. So this PR is mostly about fixing -- well mitigating anyway -- test interference. The problem of test interference is now documented in the document docs/internal.md. The solutions implemented here are also describe in that document. The solution is somewhat fragile but multiple cleanup mechanisms are provided. Note that this feature requires that the AWS command line utility must be installed. ## Unlimited Dimensions. The existing NCZarr extensions to Zarr are modified to support unlimited dimensions. NCzarr extends the Zarr meta-data for the ".zgroup" object to include netcdf-4 model extensions. This information is stored in ".zgroup" as dictionary named "_nczarr_group". Inside "_nczarr_group", there is a key named "dims" that stores information about netcdf-4 named dimensions. The value of "dims" is a dictionary whose keys are the named dimensions. The value associated with each dimension name has one of two forms Form 1 is a special case of form 2, and is kept for backward compatibility. Whenever a new file is written, it uses format 1 if possible, otherwise format 2. * Form 1: An integer representing the size of the dimension, which is used for simple named dimensions. * Form 2: A dictionary with the following keys and values" - "size" with an integer value representing the (current) size of the dimension. - "unlimited" with a value of either "1" or "0" to indicate if this dimension is an unlimited dimension. For Unlimited dimensions, the size is initially zero, and as variables extend the length of that dimension, the size value for the dimension increases. That dimension size is shared by all arrays referencing that dimension, so if one array extends an unlimited dimension, it is implicitly extended for all other arrays that reference that dimension. This is the standard semantics for unlimited dimensions. Adding unlimited dimensions required a number of other changes to the NCZarr code-base. These included the following. * Did a partial refactor of the slice handling code in zwalk.c to clean it up. * Added a number of tests for unlimited dimensions derived from the same test in nc_test4. * Added several NCZarr specific unlimited tests; more are needed. * Add test of endianness. ## Misc. Other Changes * Modify libdispatch/ncs3sdk_aws.cpp to optionally support use of the AWS Transfer Utility mechanism. This is controlled by the ```#define TRANSFER```` command in that file. It defaults to being disabled. * Parameterize both the standard Unidata S3 bucket (S3TESTBUCKET) and the netcdf-c test data prefix (S3TESTSUBTREE). * Fixed an obscure memory leak in ncdump. * Removed some obsolete unit testing code and test cases. * Uncovered a bug in the netcdf-c handling of big-endian floats and doubles. Have not fixed yet. See tst_h5_endians.c. * Renamed some nczarr_tests testcases to avoid name conflicts with nc_test4. * Modify the semantics of zmap\#ncsmap_write to only allow total rewrite of objects. * Modify the semantics of zodom to properly handle stride > 1. * Add a truncate operation to the libnczarr zmap code.
146 lines
4.1 KiB
C
146 lines
4.1 KiB
C
/* Copyright 2018, UCAR/Unidata.
|
|
See the COPYRIGHT file for more information.
|
|
*/
|
|
|
|
|
|
#ifndef NCJSON_H
|
|
#define NCJSON_H 1
|
|
|
|
/*
|
|
WARNING:
|
|
If you modify this file,
|
|
then you need to got to
|
|
the include/ directory
|
|
and do the command:
|
|
make makepluginjson
|
|
*/
|
|
|
|
/* Inside libnetcdf and for plugins, export the json symbols */
|
|
#ifndef DLLEXPORT
|
|
#ifdef _WIN32
|
|
#define DLLEXPORT __declspec(dllexport)
|
|
#else
|
|
#define DLLEXPORT
|
|
#endif
|
|
#endif
|
|
|
|
/* Override for plugins */
|
|
#ifdef NETCDF_JSON_H
|
|
#define OPTEXPORT static
|
|
#else
|
|
#define OPTEXPORT DLLEXPORT
|
|
#endif /*NETCDF_JSON_H*/
|
|
|
|
/**************************************************/
|
|
/* Json object sorts (note use of term sort rather than e.g. type or discriminant) */
|
|
#define NCJ_UNDEF 0
|
|
#define NCJ_STRING 1
|
|
#define NCJ_INT 2
|
|
#define NCJ_DOUBLE 3
|
|
#define NCJ_BOOLEAN 4
|
|
#define NCJ_DICT 5
|
|
#define NCJ_ARRAY 6
|
|
#define NCJ_NULL 7
|
|
|
|
#define NCJ_NSORTS 8
|
|
|
|
/* Define a struct to store primitive values as unquoted
|
|
strings. The sort will provide more info. Do not bother with
|
|
a union since the amount of saved space is minimal.
|
|
*/
|
|
|
|
typedef struct NCjson {
|
|
int sort; /* of this object */
|
|
char* string; /* sort != DICT|ARRAY */
|
|
struct NCjlist {
|
|
int len;
|
|
struct NCjson** contents;
|
|
} list; /* sort == DICT|ARRAY */
|
|
} NCjson;
|
|
|
|
/* Structure to hold result of convertinf one json sort to value of another type;
|
|
don't use union so we can know when to reclaim sval
|
|
*/
|
|
struct NCJconst {int bval; long long ival; double dval; char* sval;};
|
|
#define NCJconst_empty {0,0,0.0,NULL}
|
|
|
|
/**************************************************/
|
|
/* Extended API */
|
|
|
|
/* Return 0 if ok else -1 */
|
|
|
|
#if defined(__cplusplus)
|
|
extern "C" {
|
|
#endif
|
|
|
|
/* Parse a string to NCjson*/
|
|
OPTEXPORT int NCJparse(const char* text, unsigned flags, NCjson** jsonp);
|
|
|
|
/* Parse a counted string to NCjson*/
|
|
OPTEXPORT int NCJparsen(size_t len, const char* text, unsigned flags, NCjson** jsonp);
|
|
|
|
/* Reclaim a JSON tree */
|
|
OPTEXPORT void NCJreclaim(NCjson* json);
|
|
|
|
/* Create a new JSON node of a given sort */
|
|
OPTEXPORT int NCJnew(int sort, NCjson** objectp);
|
|
|
|
/* Create new json object with given string content */
|
|
OPTEXPORT int NCJnewstring(int sort, const char* value, NCjson** jsonp);
|
|
|
|
/* Create new json object with given counted string content */
|
|
OPTEXPORT int NCJnewstringn(int sort, size_t len, const char* value, NCjson** jsonp);
|
|
|
|
/* Get dict key value by name */
|
|
OPTEXPORT int NCJdictget(const NCjson* dict, const char* key, NCjson** valuep);
|
|
|
|
/* Convert one json sort to value of another type; don't use union so we can know when to reclaim sval */
|
|
OPTEXPORT int NCJcvt(const NCjson* value, int outsort, struct NCJconst* output);
|
|
|
|
/* Insert an atomic value to an array or dict object. */
|
|
OPTEXPORT int NCJaddstring(NCjson* json, int sort, const char* s);
|
|
|
|
/* Append value to an array or dict object. */
|
|
OPTEXPORT int NCJappend(NCjson* object, NCjson* value);
|
|
|
|
/* Insert key-value pair into a dict object. key will be copied */
|
|
OPTEXPORT int NCJinsert(NCjson* object, char* key, NCjson* value);
|
|
|
|
/* Unparser to convert NCjson object to text in buffer */
|
|
OPTEXPORT int NCJunparse(const NCjson* json, unsigned flags, char** textp);
|
|
|
|
/* Deep clone a json object */
|
|
OPTEXPORT int NCJclone(const NCjson* json, NCjson** clonep);
|
|
|
|
#ifndef NETCDF_JSON_H
|
|
/* dump NCjson* object to output file */
|
|
OPTEXPORT void NCJdump(const NCjson* json, unsigned flags, FILE*);
|
|
/* convert NCjson* object to output string */
|
|
OPTEXPORT const char* NCJtotext(const NCjson* json);
|
|
#endif
|
|
|
|
#if defined(__cplusplus)
|
|
}
|
|
#endif
|
|
|
|
/* Getters */
|
|
#define NCJsort(x) ((x)->sort)
|
|
#define NCJstring(x) ((x)->string)
|
|
#define NCJlength(x) ((x)==NULL ? 0 : (x)->list.len)
|
|
#define NCJcontents(x) ((x)->list.contents)
|
|
#define NCJith(x,i) ((x)->list.contents[i])
|
|
|
|
/* Setters */
|
|
#define NCJsetsort(x,s) (x)->sort=(s)
|
|
#define NCJsetstring(x,y) (x)->string=(y)
|
|
#define NCJsetcontents(x,c) (x)->list.contents=(c)
|
|
#define NCJsetlength(x,l) (x)->list.len=(l)
|
|
|
|
/* Misc */
|
|
#define NCJisatomic(j) ((j)->sort != NCJ_ARRAY && (j)->sort != NCJ_DICT && (j)->sort != NCJ_NULL && (j)->sort != NCJ_UNDEF)
|
|
|
|
/**************************************************/
|
|
|
|
#endif /*NCJSON_H*/
|
|
|