mirror of
https://github.com/Unidata/netcdf-c.git
synced 2024-12-15 08:30:11 +08:00
df3636b959
This PR started as an attempt to add unlimited dimensions to NCZarr. It did that, but this exposed significant problems with test interference. So this PR is mostly about fixing -- well mitigating anyway -- test interference. The problem of test interference is now documented in the document docs/internal.md. The solutions implemented here are also describe in that document. The solution is somewhat fragile but multiple cleanup mechanisms are provided. Note that this feature requires that the AWS command line utility must be installed. ## Unlimited Dimensions. The existing NCZarr extensions to Zarr are modified to support unlimited dimensions. NCzarr extends the Zarr meta-data for the ".zgroup" object to include netcdf-4 model extensions. This information is stored in ".zgroup" as dictionary named "_nczarr_group". Inside "_nczarr_group", there is a key named "dims" that stores information about netcdf-4 named dimensions. The value of "dims" is a dictionary whose keys are the named dimensions. The value associated with each dimension name has one of two forms Form 1 is a special case of form 2, and is kept for backward compatibility. Whenever a new file is written, it uses format 1 if possible, otherwise format 2. * Form 1: An integer representing the size of the dimension, which is used for simple named dimensions. * Form 2: A dictionary with the following keys and values" - "size" with an integer value representing the (current) size of the dimension. - "unlimited" with a value of either "1" or "0" to indicate if this dimension is an unlimited dimension. For Unlimited dimensions, the size is initially zero, and as variables extend the length of that dimension, the size value for the dimension increases. That dimension size is shared by all arrays referencing that dimension, so if one array extends an unlimited dimension, it is implicitly extended for all other arrays that reference that dimension. This is the standard semantics for unlimited dimensions. Adding unlimited dimensions required a number of other changes to the NCZarr code-base. These included the following. * Did a partial refactor of the slice handling code in zwalk.c to clean it up. * Added a number of tests for unlimited dimensions derived from the same test in nc_test4. * Added several NCZarr specific unlimited tests; more are needed. * Add test of endianness. ## Misc. Other Changes * Modify libdispatch/ncs3sdk_aws.cpp to optionally support use of the AWS Transfer Utility mechanism. This is controlled by the ```#define TRANSFER```` command in that file. It defaults to being disabled. * Parameterize both the standard Unidata S3 bucket (S3TESTBUCKET) and the netcdf-c test data prefix (S3TESTSUBTREE). * Fixed an obscure memory leak in ncdump. * Removed some obsolete unit testing code and test cases. * Uncovered a bug in the netcdf-c handling of big-endian floats and doubles. Have not fixed yet. See tst_h5_endians.c. * Renamed some nczarr_tests testcases to avoid name conflicts with nc_test4. * Modify the semantics of zmap\#ncsmap_write to only allow total rewrite of objects. * Modify the semantics of zodom to properly handle stride > 1. * Add a truncate operation to the libnczarr zmap code.
112 lines
4.5 KiB
C
112 lines
4.5 KiB
C
/*********************************************************************
|
|
* Copyright 2018, UCAR/Unidata
|
|
* See netcdf/COPYRIGHT file for copying and redistribution conditions.
|
|
*********************************************************************/
|
|
|
|
#ifndef ZCHUNKING_H
|
|
#define ZCHUNKING_H
|
|
|
|
#include "ncexternl.h"
|
|
|
|
typedef int (*NCZ_reader)(void* source, size64_t* chunkindices, void** chunkdata);
|
|
struct Reader {void* source; NCZ_reader read;};
|
|
|
|
/* Define the intersecting set of chunks for a slice
|
|
in terms of chunk indices (not absolute positions)
|
|
*/
|
|
typedef struct NCZChunkRange {
|
|
size64_t start; /* index, not absolute */
|
|
size64_t stop;
|
|
} NCZChunkRange;
|
|
|
|
/* A per-dimension slice for the incoming hyperslab */
|
|
typedef struct NCZSlice {
|
|
size64_t start;
|
|
size64_t stop; /* start + (count*stride) */
|
|
size64_t stride;
|
|
size64_t len; /* full dimension length */
|
|
} NCZSlice;
|
|
|
|
/* A projection defines the set of grid points
|
|
for a given set of slices as projected onto
|
|
a single chunk.
|
|
*/
|
|
typedef struct NCProjection {
|
|
int id;
|
|
int skip; /* Should this projection be skipped? */
|
|
size64_t chunkindex; /* which chunk are we projecting */
|
|
size64_t offset; /* Absolute offset of this chunk (== chunklen*chunkindex) */
|
|
size64_t first; /* absolute first position to be touched in this chunk */
|
|
size64_t last; /* absolute position of last value touched */
|
|
size64_t stop; /* absolute position of last value touched */
|
|
size64_t limit; /* Actual limit of chunk WRT start of chunk */
|
|
size64_t iopos; /* start point in the data memory to access the data */
|
|
size64_t iocount; /* no. of I/O items */
|
|
NCZSlice chunkslice; /* slice relative to this chunk */
|
|
NCZSlice memslice; /* slice relative to memory */
|
|
} NCZProjection;
|
|
|
|
/* Set of Projections for a slice */
|
|
typedef struct NCZSliceProjections {
|
|
int r; /* 0<=r<rank */
|
|
NCZChunkRange range; /* Chunk ranges covered by this set of projections */
|
|
size_t count; /* |projections| == (range.stop - range.start) */
|
|
NCZProjection* projections; /* Vector of projections derived from the
|
|
original slice when intersected across
|
|
the chunk */
|
|
} NCZSliceProjections;
|
|
|
|
/* Combine some values to avoid having to pass long argument lists*/
|
|
struct Common {
|
|
NC_FILE_INFO_T* file;
|
|
NC_VAR_INFO_T* var;
|
|
struct NCZChunkCache* cache;
|
|
int reading; /* 1=> read, 0 => write */
|
|
int rank;
|
|
int scalar; /* 1 => scalar variable */
|
|
size64_t dimlens[NC_MAX_VAR_DIMS];
|
|
unsigned char isunlimited[NC_MAX_VAR_DIMS];
|
|
size64_t chunklens[NC_MAX_VAR_DIMS];
|
|
size64_t memshape[NC_MAX_VAR_DIMS];
|
|
void* memory;
|
|
size_t typesize;
|
|
size64_t chunkcount; /* computed product of chunklens; warning indices, not bytes */
|
|
int swap; /* var->format_info_file->native_endianness == var->endianness */
|
|
size64_t shape[NC_MAX_VAR_DIMS]; /* shape of the output hyperslab */
|
|
NCZSliceProjections* allprojections;
|
|
/* Parametric chunk reader */
|
|
struct Reader reader;
|
|
};
|
|
|
|
/**************************************************/
|
|
/* From zchunking.c */
|
|
EXTERNL int NCZ_compute_chunk_ranges(struct Common*, const NCZSlice*, NCZChunkRange* ncr);
|
|
EXTERNL int NCZ_compute_projections(struct Common*, int r, size64_t chunkindex, const NCZSlice* slice, size_t n, NCZProjection* projections);
|
|
EXTERNL int NCZ_compute_per_slice_projections(struct Common*, int rank, const NCZSlice*, const NCZChunkRange*, NCZSliceProjections* slp);
|
|
EXTERNL int NCZ_compute_all_slice_projections(struct Common*, const NCZSlice* slices, const NCZChunkRange*, NCZSliceProjections*);
|
|
|
|
/* From zwalk.c */
|
|
EXTERNL int ncz_chunking_init(void);
|
|
EXTERNL int NCZ_transferslice(NC_VAR_INFO_T* var, int reading,
|
|
size64_t* start, size64_t* count, size64_t* stride,
|
|
void* memory, nc_type typecode);
|
|
EXTERNL int NCZ_transfer(struct Common* common, NCZSlice* slices);
|
|
EXTERNL int NCZ_transferscalar(struct Common* common);
|
|
EXTERNL size64_t NCZ_computelinearoffset(size_t, const size64_t*, const size64_t*);
|
|
|
|
/* Special entry points for unit testing */
|
|
struct Common;
|
|
struct NCZOdometer;
|
|
EXTERNL int NCZ_projectslices(struct Common*, NCZSlice* slices, struct NCZOdometer**);
|
|
EXTERNL int NCZ_chunkindexodom(int rank, const NCZChunkRange* ranges, size64_t*, struct NCZOdometer** odom);
|
|
EXTERNL void NCZ_clearsliceprojections(int count, NCZSliceProjections* slpv);
|
|
EXTERNL void NCZ_clearcommon(struct Common* common);
|
|
|
|
#define floordiv(x,y) ((x) / (y))
|
|
|
|
#define ceildiv(x,y) (((x) % (y)) == 0 ? ((x) / (y)) : (((x) / (y)) + 1))
|
|
|
|
#define minimum(x,y) ((x) > (y) ? (y) : (x))
|
|
|
|
#endif /*ZCHUNKING_H*/
|