mirror of
https://github.com/Unidata/netcdf-c.git
synced 2024-11-21 03:13:42 +08:00
update against main
This commit is contained in:
parent
d1d2808919
commit
ebf86ac637
20
.github/workflows/run_tests_osx.yml
vendored
20
.github/workflows/run_tests_osx.yml
vendored
@ -21,14 +21,14 @@ jobs:
|
||||
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
###
|
||||
# libhdf5
|
||||
###
|
||||
- name: Cache libhdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
id: cache-hdf5-osx
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
@ -61,7 +61,7 @@ jobs:
|
||||
use_nczarr: [ nczarr_off, nczarr_on ]
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
###
|
||||
# Set Environmental Variables
|
||||
@ -93,7 +93,7 @@ jobs:
|
||||
|
||||
- name: Fetch HDF Cache
|
||||
id: cache-hdf-osx
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
@ -167,7 +167,7 @@ jobs:
|
||||
use_nczarr: [ nczarr_off, nczarr_on ]
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
###
|
||||
# Set Environmental Variables
|
||||
@ -199,7 +199,7 @@ jobs:
|
||||
|
||||
- name: Fetch HDF Cache
|
||||
id: cache-hdf5-osx
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
@ -260,7 +260,7 @@ jobs:
|
||||
hdf5: [ 1.12.2 ]
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
###
|
||||
# Set Environmental Variables
|
||||
@ -277,7 +277,7 @@ jobs:
|
||||
|
||||
- name: Fetch HDF Cache
|
||||
id: cache-hdf-osx
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
@ -342,7 +342,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
###
|
||||
# Set Environmental Variables
|
||||
@ -357,7 +357,7 @@ jobs:
|
||||
|
||||
- name: Fetch HDF Cache
|
||||
id: cache-hdf5-osx
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
|
50
.github/workflows/run_tests_ubuntu.yml
vendored
50
.github/workflows/run_tests_ubuntu.yml
vendored
@ -18,7 +18,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install System dependencies
|
||||
shell: bash -l {0}
|
||||
@ -29,7 +29,7 @@ jobs:
|
||||
###
|
||||
- name: Cache libhdf5-${{ matrix.hdf5 }}
|
||||
id: cache-hdf5
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
@ -67,7 +67,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install System dependencies
|
||||
shell: bash -l {0}
|
||||
@ -78,7 +78,7 @@ jobs:
|
||||
###
|
||||
- name: Cache libhdf5-parallel-${{ matrix.hdf5 }}
|
||||
id: cache-hdf5
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-parallel-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
@ -128,7 +128,7 @@ jobs:
|
||||
hdf5: [ 1.12.2 ]
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install System dependencies
|
||||
shell: bash -l {0}
|
||||
@ -149,7 +149,7 @@ jobs:
|
||||
|
||||
- name: Fetch HDF Cache
|
||||
id: cache-hdf
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
@ -208,7 +208,7 @@ jobs:
|
||||
hdf5: [ 1.12.2 ]
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install System dependencies
|
||||
shell: bash -l {0}
|
||||
@ -224,7 +224,7 @@ jobs:
|
||||
|
||||
- name: Fetch HDF Cache
|
||||
id: cache-hdf
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-parallel-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
@ -289,7 +289,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install System dependencies
|
||||
shell: bash -l {0}
|
||||
@ -308,7 +308,7 @@ jobs:
|
||||
|
||||
- name: Fetch HDF Cache
|
||||
id: cache-hdf5
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
@ -369,7 +369,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install System dependencies
|
||||
shell: bash -l {0}
|
||||
@ -388,7 +388,7 @@ jobs:
|
||||
|
||||
- name: Fetch HDF Cache
|
||||
id: cache-hdf5
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-parallel-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
@ -448,7 +448,7 @@ jobs:
|
||||
use_nczarr: [ nczarr_off, nczarr_on ]
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install System dependencies
|
||||
shell: bash -l {0}
|
||||
@ -482,7 +482,7 @@ jobs:
|
||||
|
||||
- name: Fetch HDF Cache
|
||||
id: cache-hdf
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
@ -501,31 +501,37 @@ jobs:
|
||||
|
||||
- name: Configure
|
||||
shell: bash -l {0}
|
||||
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} ./configure ${ENABLE_HDF5} ${ENABLE_DAP} ${ENABLE_NCZARR}
|
||||
run: |
|
||||
current_directory="$(pwd)"
|
||||
mkdir ../build
|
||||
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} "${current_directory}/configure" ${ENABLE_HDF5} ${ENABLE_DAP} ${ENABLE_NCZARR}
|
||||
if: ${{ success() }}
|
||||
|
||||
- name: Look at config.log if error
|
||||
shell: bash -l {0}
|
||||
run: cat config.log
|
||||
run: cd ../build && cat config.log
|
||||
if: ${{ failure() }}
|
||||
|
||||
- name: Print Summary
|
||||
shell: bash -l {0}
|
||||
run: cat libnetcdf.settings
|
||||
run: cd ../build && cat libnetcdf.settings
|
||||
|
||||
- name: Build Library and Utilities
|
||||
shell: bash -l {0}
|
||||
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make -j
|
||||
run: |
|
||||
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make -j
|
||||
if: ${{ success() }}
|
||||
|
||||
- name: Build Tests
|
||||
shell: bash -l {0}
|
||||
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check TESTS="" -j
|
||||
run: |
|
||||
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check TESTS="" -j
|
||||
if: ${{ success() }}
|
||||
|
||||
- name: Run Tests
|
||||
shell: bash -l {0}
|
||||
run: CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check -j
|
||||
run: |
|
||||
cd ../build && CFLAGS=${CFLAGS} LDFLAGS=${LDFLAGS} LD_LIBRARY_PATH=${LD_LIBRARY_PATH} make check -j
|
||||
if: ${{ success() }}
|
||||
|
||||
nc-cmake:
|
||||
@ -541,7 +547,7 @@ jobs:
|
||||
use_nczarr: [ nczarr_off, nczarr_on ]
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install System dependencies
|
||||
shell: bash -l {0}
|
||||
@ -575,7 +581,7 @@ jobs:
|
||||
|
||||
- name: Fetch HDF Cache
|
||||
id: cache-hdf5
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/environments/${{ matrix.hdf5 }}
|
||||
key: hdf5-${{ runner.os }}-${{ matrix.hdf5 }}
|
||||
|
2
.github/workflows/run_tests_win_cygwin.yml
vendored
2
.github/workflows/run_tests_win_cygwin.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
- name: Fix line endings
|
||||
run: git config --global core.autocrlf input
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: cygwin/cygwin-install-action@v2
|
||||
with:
|
||||
|
2
.github/workflows/run_tests_win_mingw.yml
vendored
2
.github/workflows/run_tests_win_mingw.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
msystem: MINGW64
|
||||
|
@ -95,6 +95,6 @@ obsolete/fan_utils.html bestpractices.md filters.md indexing.md
|
||||
inmemory.md DAP2.dox FAQ.md
|
||||
known_problems.md
|
||||
COPYRIGHT.dox user_defined_formats.md DAP4.md DAP4.dox
|
||||
testserver.dox byterange.dox filters.md nczarr.md auth.md quantize.md)
|
||||
testserver.dox byterange.md filters.md nczarr.md auth.md quantize.md)
|
||||
|
||||
ADD_EXTRA_DIST("${CUR_EXTRA_DIST}")
|
||||
|
@ -809,13 +809,10 @@ INPUT = @abs_top_srcdir@/docs/mainpage.dox \
|
||||
@abs_top_srcdir@/RELEASE_NOTES.md \
|
||||
@abs_top_srcdir@/docs/install-fortran.md \
|
||||
@abs_top_srcdir@/docs/windows-binaries.md \
|
||||
@abs_top_srcdir@/docs/attribute_conventions.md \
|
||||
@abs_top_srcdir@/docs/file_format_specifications.md \
|
||||
@abs_top_srcdir@/docs/all-error-codes.md \
|
||||
@abs_top_srcdir@/docs/inmemory.md \
|
||||
@abs_top_srcdir@/docs/filter_quickstart.md \
|
||||
@abs_top_srcdir@/docs/filters.md \
|
||||
@abs_top_srcdir@/docs/byterange.dox \
|
||||
@abs_top_srcdir@/docs/byterange.md \
|
||||
@abs_top_srcdir@/docs/nczarr.md \
|
||||
@abs_top_srcdir@/docs/notes.md \
|
||||
@abs_top_srcdir@/docs/building-with-cmake.md \
|
||||
@ -829,6 +826,9 @@ INPUT = @abs_top_srcdir@/docs/mainpage.dox \
|
||||
@abs_top_srcdir@/docs/indexing.dox \
|
||||
@abs_top_srcdir@/docs/testserver.dox \
|
||||
@abs_top_srcdir@/docs/quantize.md \
|
||||
@abs_top_srcdir@/docs/attribute_conventions.md \
|
||||
@abs_top_srcdir@/docs/file_format_specifications.md \
|
||||
@abs_top_srcdir@/docs/all-error-codes.md \
|
||||
@abs_top_srcdir@/include/netcdf.h \
|
||||
@abs_top_srcdir@/include/netcdf_mem.h \
|
||||
@abs_top_srcdir@/include/netcdf_par.h \
|
||||
|
@ -748,7 +748,7 @@ INPUT = \
|
||||
./docs/windows-binaries.md \
|
||||
./docs/attribute_conventions.md \
|
||||
./docs/file_format_specifications.md \
|
||||
./docs/byterange.dox \
|
||||
./docs/byterange.md \
|
||||
./docs/inmemory.md \
|
||||
./docs/auth.md \
|
||||
./docs/filters.md \
|
||||
|
@ -7,15 +7,13 @@
|
||||
# See netcdf-c/COPYRIGHT file for more info.
|
||||
|
||||
# These files will be included with the dist.
|
||||
EXTRA_DIST = CMakeLists.txt COPYRIGHT.md FAQ.md \
|
||||
netcdf.m4 DoxygenLayout.xml Doxyfile.in footer.html \
|
||||
mainpage.dox tutorial.dox architecture.dox \
|
||||
groups.dox indexing.dox inmeminternal.dox testserver.dox \
|
||||
byterange.dox \
|
||||
windows-binaries.md dispatch.md building-with-cmake.md \
|
||||
EXTRA_DIST = netcdf.m4 DoxygenLayout.xml Doxyfile.in footer.html \
|
||||
mainpage.dox tutorial.dox architecture.dox internal.dox \
|
||||
windows-binaries.md dispatch.md building-with-cmake.md CMakeLists.txt groups.dox \
|
||||
notes.md install-fortran.md credits.md auth.md filters.md \
|
||||
obsolete/fan_utils.html inmemory.md known_problems.md \
|
||||
nczarr.md quantize.md all-error-codes.md
|
||||
obsolete/fan_utils.html indexing.dox inmemory.md FAQ.md \
|
||||
known_problems.md COPYRIGHT.md inmeminternal.dox testserver.dox \
|
||||
byterange.md nczarr.md quantize.md all-error-codes.md
|
||||
|
||||
# Turn off parallel builds in this directory.
|
||||
.NOTPARALLEL:
|
||||
|
@ -1,156 +0,0 @@
|
||||
/**
|
||||
@if INTERNAL
|
||||
|
||||
@page byterange Remote Dataset Access Using HTTP Byte Ranges
|
||||
|
||||
\tableofcontents
|
||||
|
||||
<!-- Note that this file has the .dox extension, but is mostly markdown -->
|
||||
<!-- Begin MarkDown -->
|
||||
|
||||
# Introduction {#byterange_intro}
|
||||
|
||||
Suppose that you have the URL to a remote dataset
|
||||
which is a normal netcdf-3 or netcdf-4 file.
|
||||
|
||||
The netCDF-c library now supports read-only access to such
|
||||
datasets using the HTTP byte range capability [], assuming that
|
||||
the remote server supports byte-range access.
|
||||
|
||||
Two examples:
|
||||
|
||||
1. An Amazon S3 object containing a netcdf classic file.
|
||||
- location: "https://remotetest.unidata.ucar.edu/thredds/fileServer/testdata/2004050300_eta_211.nc#mode=bytes"
|
||||
2. A Thredds Server dataset supporting the Thredds HTTPServer protocol.
|
||||
and containing a netcdf enhanced file.
|
||||
- location: "http://noaa-goes16.s3.amazonaws.com/ABI-L1b-RadC/2017/059/03/OR_ABI-L1b-RadC-M3C13_G16_s20170590337505_e20170590340289_c20170590340316.nc#mode=bytes"
|
||||
|
||||
Other remote servers may also provide byte-range access in a similar form.
|
||||
|
||||
It is important to note that this is not intended as a true
|
||||
production capability because it is believed that this kind of access
|
||||
can be quite slow. In addition, the byte-range IO drivers do not
|
||||
currently do any sort of optimization or caching.
|
||||
|
||||
# Configuration {#byterange_config}
|
||||
|
||||
This capability is enabled using the option *--enable-byterange* option
|
||||
to the *./configure* command for Automake. For Cmake, the option flag is
|
||||
*-DENABLE_BYTERANGE=true*.
|
||||
|
||||
This capability requires access to *libcurl*, and an error will occur
|
||||
if byterange is enabled, but no *libcurl* could not be located.
|
||||
In this, it is similar to the DAP2 and DAP4 capabilities.
|
||||
|
||||
Note also that here, the term "http" is often used as a synonym for *byterange*.
|
||||
|
||||
# Run-time Usage {#byterange_url}
|
||||
|
||||
In order to use this capability at run-time, with *ncdump* for
|
||||
example, it is necessary to provide a URL pointing to the basic
|
||||
dataset to be accessed. The URL must be annotated to tell the
|
||||
netcdf-c library that byte-range access should be used. This is
|
||||
indicated by appending the phrase ````#mode=bytes````
|
||||
to the end of the URL.
|
||||
The two examples above show how this will look.
|
||||
|
||||
In order to determine the kind of file being accessed, the
|
||||
netcdf-c library will read what is called the "magic number"
|
||||
from the beginning of the remote dataset. This magic number
|
||||
is a specific set of bytes that indicates the kind of file:
|
||||
classic, enhanced, cdf5, etc.
|
||||
|
||||
# Architecture {#byterange_arch}
|
||||
|
||||
Internally, this capability is implemented with three files:
|
||||
|
||||
1. libdispatch/dhttp.c -- wrap libcurl operations.
|
||||
2. libsrc/httpio.c -- provide byte-range reading to the netcdf-3 dispatcher.
|
||||
3. libhdf5/H5FDhttp.c -- provide byte-range reading to the netcdf-4 dispatcher.
|
||||
|
||||
Both *httpio.c* and *H5FDhttp.c* are adapters that use *dhttp.c*
|
||||
to do the work. Testing for the magic number is also carried out
|
||||
by using the *dhttp.c* code.
|
||||
|
||||
## NetCDF Classic Access
|
||||
|
||||
The netcdf-3 code in the directory *libsrc* is built using
|
||||
a secondary dispatch mechanism called *ncio*. This allows the
|
||||
netcdf-3 code be independent of the lowest level IO access mechanisms.
|
||||
This is how in-memory and mmap based access is implemented.
|
||||
The file *httpio.c* is the dispatcher used to provide byte-range
|
||||
IO for the netcdf-3 code.
|
||||
|
||||
Note that *httpio.c* is mostly just an
|
||||
adapter between the *ncio* API and the *dhttp.c* code.
|
||||
|
||||
## NetCDF Enhanced Access
|
||||
|
||||
Similar to the netcdf-3 code, the HDF5 library
|
||||
provides a secondary dispatch mechanism *H5FD*. This allows the
|
||||
HDF5 code to be independent of the lowest level IO access mechanisms.
|
||||
The netcdf-4 code in libhdf5 is built on the HDF5 library, so
|
||||
it indirectly inherits the H5FD mechanism.
|
||||
|
||||
The file *H5FDhttp.c* implements the H5FD dispatcher API
|
||||
and provides byte-range IO for the netcdf-4 code
|
||||
(and for the HDF5 library as a side effect).
|
||||
|
||||
Note that *H5FDhttp.c* is mostly just an
|
||||
adapter between the *H5FD* API and the *dhttp.c* code.
|
||||
|
||||
# The dhttp.c Code {#byterange_dhttp}
|
||||
|
||||
The core of all this is *dhttp.c* (and its header
|
||||
*include/nchttp.c*). It is a wrapper over *libcurl*
|
||||
and so exposes the libcurl handles -- albeit as _void*_.
|
||||
|
||||
The API for *dhttp.c* consists of the following procedures:
|
||||
- int nc_http_open(const char* objecturl, void** curlp, fileoffset_t* filelenp);
|
||||
- int nc_http_read(void* curl, const char* url, fileoffset_t start, fileoffset_t count, NCbytes* buf);
|
||||
- int nc_http_close(void* curl);
|
||||
- typedef long long fileoffset_t;
|
||||
|
||||
The type *fileoffset_t* is used to avoid use of *off_t* or *off64_t*
|
||||
which are too volatile. It is intended to be represent file lengths
|
||||
and offsets.
|
||||
|
||||
## nc_http_open
|
||||
The *nc_http_open* procedure creates a *Curl* handle and returns it
|
||||
in the *curlp* argument. It also obtains and searches the headers
|
||||
looking for two headers:
|
||||
|
||||
1. "Accept-Ranges: bytes" -- to verify that byte-range access is supported.
|
||||
2. "Content-Length: ..." -- to obtain the size of the remote dataset.
|
||||
|
||||
The dataset length is returned in the *filelenp* argument.
|
||||
|
||||
## nc_http_read
|
||||
|
||||
The *nc_http_read* procedure reads a specified set of contiguous bytes
|
||||
as specified by the *start* and *count* arguments. It takes the *Curl*
|
||||
handle produced by *nc_http_open* to indicate the server from which to read.
|
||||
|
||||
The *buf* argument is a pointer to an instance of type *NCbytes*, which
|
||||
is a dynamically expandable byte vector (see the file *include/ncbytes.h*).
|
||||
|
||||
This procedure reads *count* bytes from the remote dataset starting at
|
||||
the offset *start* position. The bytes are stored in *buf*.
|
||||
|
||||
## nc_http_close
|
||||
|
||||
The *nc_http_close* function closes the *Curl* handle and does any
|
||||
necessary cleanup.
|
||||
|
||||
# Point of Contact {#byterange_poc}
|
||||
|
||||
__Author__: Dennis Heimbigner<br>
|
||||
__Email__: dmh at ucar dot edu<br>
|
||||
__Initial Version__: 12/30/2018<br>
|
||||
__Last Revised__: 12/30/2018
|
||||
|
||||
<!-- End MarkDown -->
|
||||
|
||||
@endif
|
||||
|
||||
*/
|
@ -125,8 +125,6 @@ int
|
||||
nc_def_user_format(int mode_flag, NC_Dispatch *dispatch_table, char *magic_number)
|
||||
{
|
||||
/* Check inputs. */
|
||||
if (mode_flag != NC_UDF0 && mode_flag != NC_UDF1)
|
||||
return NC_EINVAL;
|
||||
if (!dispatch_table)
|
||||
return NC_EINVAL;
|
||||
if (magic_number && strlen(magic_number) > NC_MAX_MAGIC_NUMBER_LEN)
|
||||
@ -135,21 +133,29 @@ nc_def_user_format(int mode_flag, NC_Dispatch *dispatch_table, char *magic_numbe
|
||||
/* Check the version of the dispatch table provided. */
|
||||
if (dispatch_table->dispatch_version != NC_DISPATCH_VERSION)
|
||||
return NC_EINVAL;
|
||||
|
||||
/* user defined magic numbers not allowed with netcdf3 modes */
|
||||
if (magic_number && (fIsSet(mode_flag, NC_64BIT_OFFSET) ||
|
||||
fIsSet(mode_flag, NC_64BIT_DATA) ||
|
||||
(fIsSet(mode_flag, NC_CLASSIC_MODEL) &&
|
||||
!fIsSet(mode_flag, NC_NETCDF4))))
|
||||
return NC_EINVAL;
|
||||
/* Retain a pointer to the dispatch_table and a copy of the magic
|
||||
* number, if one was provided. */
|
||||
switch(mode_flag)
|
||||
if (fIsSet(mode_flag,NC_UDF0))
|
||||
{
|
||||
case NC_UDF0:
|
||||
UDF0_dispatch_table = dispatch_table;
|
||||
if (magic_number)
|
||||
strncpy(UDF0_magic_number, magic_number, NC_MAX_MAGIC_NUMBER_LEN);
|
||||
break;
|
||||
case NC_UDF1:
|
||||
}
|
||||
else if(fIsSet(mode_flag, NC_UDF1))
|
||||
{
|
||||
UDF1_dispatch_table = dispatch_table;
|
||||
if (magic_number)
|
||||
strncpy(UDF1_magic_number, magic_number, NC_MAX_MAGIC_NUMBER_LEN);
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
return NC_EINVAL;
|
||||
}
|
||||
|
||||
return NC_NOERR;
|
||||
@ -175,23 +181,23 @@ int
|
||||
nc_inq_user_format(int mode_flag, NC_Dispatch **dispatch_table, char *magic_number)
|
||||
{
|
||||
/* Check inputs. */
|
||||
if (mode_flag != NC_UDF0 && mode_flag != NC_UDF1)
|
||||
return NC_EINVAL;
|
||||
|
||||
switch(mode_flag)
|
||||
if (fIsSet(mode_flag,NC_UDF0))
|
||||
{
|
||||
case NC_UDF0:
|
||||
if (dispatch_table)
|
||||
*dispatch_table = UDF0_dispatch_table;
|
||||
if (magic_number)
|
||||
strncpy(magic_number, UDF0_magic_number, NC_MAX_MAGIC_NUMBER_LEN);
|
||||
break;
|
||||
case NC_UDF1:
|
||||
}
|
||||
else if(fIsSet(mode_flag,NC_UDF1))
|
||||
{
|
||||
if (dispatch_table)
|
||||
*dispatch_table = UDF1_dispatch_table;
|
||||
if (magic_number)
|
||||
strncpy(magic_number, UDF1_magic_number, NC_MAX_MAGIC_NUMBER_LEN);
|
||||
break;
|
||||
}
|
||||
else
|
||||
{
|
||||
return NC_EINVAL;
|
||||
}
|
||||
|
||||
return NC_NOERR;
|
||||
|
@ -118,8 +118,8 @@ static struct FORMATMODES {
|
||||
{"classic",NC_FORMATX_NC3,0}, /* ditto */
|
||||
{"netcdf-4",NC_FORMATX_NC4,NC_FORMAT_NETCDF4},
|
||||
{"enhanced",NC_FORMATX_NC4,NC_FORMAT_NETCDF4},
|
||||
{"udf0",NC_FORMATX_UDF0,NC_FORMAT_NETCDF4},
|
||||
{"udf1",NC_FORMATX_UDF1,NC_FORMAT_NETCDF4},
|
||||
{"udf0",NC_FORMATX_UDF0,0},
|
||||
{"udf1",NC_FORMATX_UDF1,0},
|
||||
{"nczarr",NC_FORMATX_NCZARR,NC_FORMAT_NETCDF4},
|
||||
{"zarr",NC_FORMATX_NCZARR,NC_FORMAT_NETCDF4},
|
||||
{"bytes",NC_FORMATX_NC4,NC_FORMAT_NETCDF4}, /* temporary until 3 vs 4 is determined */
|
||||
@ -182,8 +182,8 @@ static struct Readable {
|
||||
{NC_FORMATX_PNETCDF,1},
|
||||
{NC_FORMATX_DAP2,0},
|
||||
{NC_FORMATX_DAP4,0},
|
||||
{NC_FORMATX_UDF0,0},
|
||||
{NC_FORMATX_UDF1,0},
|
||||
{NC_FORMATX_UDF0,1},
|
||||
{NC_FORMATX_UDF1,1},
|
||||
{NC_FORMATX_NCZARR,0}, /* eventually make readable */
|
||||
{0,0},
|
||||
};
|
||||
@ -762,13 +762,31 @@ NC_omodeinfer(int useparallel, int cmode, NCmodel* model)
|
||||
* use some of the other flags, like NC_NETCDF4, so we must first
|
||||
* check NC_UDF0 and NC_UDF1 before checking for any other
|
||||
* flag. */
|
||||
if(fIsSet(cmode,(NC_UDF0|NC_UDF1))) {
|
||||
model->format = NC_FORMAT_NETCDF4;
|
||||
if(fIsSet(cmode,NC_UDF0)) {
|
||||
if(fIsSet(cmode, NC_UDF0) || fIsSet(cmode, NC_UDF1))
|
||||
{
|
||||
if(fIsSet(cmode, NC_UDF0))
|
||||
{
|
||||
model->impl = NC_FORMATX_UDF0;
|
||||
} else {
|
||||
model->impl = NC_FORMATX_UDF1;
|
||||
}
|
||||
if(fIsSet(cmode,NC_64BIT_OFFSET))
|
||||
{
|
||||
model->format = NC_FORMAT_64BIT_OFFSET;
|
||||
}
|
||||
else if(fIsSet(cmode,NC_64BIT_DATA))
|
||||
{
|
||||
model->format = NC_FORMAT_64BIT_DATA;
|
||||
}
|
||||
else if(fIsSet(cmode,NC_NETCDF4))
|
||||
{
|
||||
if(fIsSet(cmode,NC_CLASSIC_MODEL))
|
||||
model->format = NC_FORMAT_NETCDF4_CLASSIC;
|
||||
else
|
||||
model->format = NC_FORMAT_NETCDF4;
|
||||
}
|
||||
if(! model->format)
|
||||
model->format = NC_FORMAT_CLASSIC;
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -981,8 +999,6 @@ NC_infermodel(const char* path, int* omodep, int iscreate, int useparallel, void
|
||||
case NC_FORMATX_NC4:
|
||||
case NC_FORMATX_NC_HDF4:
|
||||
case NC_FORMATX_DAP4:
|
||||
case NC_FORMATX_UDF0:
|
||||
case NC_FORMATX_UDF1:
|
||||
case NC_FORMATX_NCZARR:
|
||||
omode |= NC_NETCDF4;
|
||||
if(model->format == NC_FORMAT_NETCDF4_CLASSIC)
|
||||
@ -1001,6 +1017,17 @@ NC_infermodel(const char* path, int* omodep, int iscreate, int useparallel, void
|
||||
case NC_FORMATX_DAP2:
|
||||
omode &= ~(NC_NETCDF4|NC_64BIT_OFFSET|NC_64BIT_DATA|NC_CLASSIC_MODEL);
|
||||
break;
|
||||
case NC_FORMATX_UDF0:
|
||||
case NC_FORMATX_UDF1:
|
||||
if(model->format == NC_FORMAT_64BIT_OFFSET)
|
||||
omode |= NC_64BIT_OFFSET;
|
||||
else if(model->format == NC_FORMAT_64BIT_DATA)
|
||||
omode |= NC_64BIT_DATA;
|
||||
else if(model->format == NC_FORMAT_NETCDF4)
|
||||
omode |= NC_NETCDF4;
|
||||
else if(model->format == NC_FORMAT_NETCDF4_CLASSIC)
|
||||
omode |= NC_NETCDF4|NC_CLASSIC_MODEL;
|
||||
break;
|
||||
default:
|
||||
{stat = NC_ENOTNC; goto done;}
|
||||
}
|
||||
@ -1513,23 +1540,10 @@ static int
|
||||
NC_interpret_magic_number(char* magic, NCmodel* model)
|
||||
{
|
||||
int status = NC_NOERR;
|
||||
int tmpimpl = 0;
|
||||
/* Look at the magic number */
|
||||
#ifdef USE_NETCDF4
|
||||
if (strlen(UDF0_magic_number) && !strncmp(UDF0_magic_number, magic,
|
||||
strlen(UDF0_magic_number)))
|
||||
{
|
||||
model->impl = NC_FORMATX_UDF0;
|
||||
model->format = NC_FORMAT_NETCDF4;
|
||||
goto done;
|
||||
}
|
||||
if (strlen(UDF1_magic_number) && !strncmp(UDF1_magic_number, magic,
|
||||
strlen(UDF1_magic_number)))
|
||||
{
|
||||
model->impl = NC_FORMATX_UDF1;
|
||||
model->format = NC_FORMAT_NETCDF4;
|
||||
goto done;
|
||||
}
|
||||
#endif /* USE_NETCDF4 */
|
||||
if(model->impl == NC_FORMATX_UDF0 || model->impl == NC_FORMATX_UDF1)
|
||||
tmpimpl = model->impl;
|
||||
|
||||
/* Use the complete magic number string for HDF5 */
|
||||
if(memcmp(magic,HDF5_SIGNATURE,sizeof(HDF5_SIGNATURE))==0) {
|
||||
@ -1561,10 +1575,29 @@ NC_interpret_magic_number(char* magic, NCmodel* model)
|
||||
}
|
||||
}
|
||||
/* No match */
|
||||
if (!tmpimpl)
|
||||
status = NC_ENOTNC;
|
||||
|
||||
goto done;
|
||||
|
||||
done:
|
||||
/* if model->impl was UDF0 or UDF1 on entry, make it so on exit */
|
||||
if(tmpimpl)
|
||||
model->impl = tmpimpl;
|
||||
/* if this is a UDF magic_number update the model->impl */
|
||||
if (strlen(UDF0_magic_number) && !strncmp(UDF0_magic_number, magic,
|
||||
strlen(UDF0_magic_number)))
|
||||
{
|
||||
model->impl = NC_FORMATX_UDF0;
|
||||
status = NC_NOERR;
|
||||
}
|
||||
if (strlen(UDF1_magic_number) && !strncmp(UDF1_magic_number, magic,
|
||||
strlen(UDF1_magic_number)))
|
||||
{
|
||||
model->impl = NC_FORMATX_UDF1;
|
||||
status = NC_NOERR;
|
||||
}
|
||||
|
||||
return check(status);
|
||||
}
|
||||
|
||||
|
@ -1303,6 +1303,9 @@ NC_check_nulls(int ncid, int varid, const size_t *start, size_t **count,
|
||||
pointer back to this function, when you're done with the data, and
|
||||
it will free the string memory.
|
||||
|
||||
WARNING: This does not free the data vector itself, only
|
||||
the strings to which it points.
|
||||
|
||||
@param len The number of character arrays in the array.
|
||||
@param data The pointer to the data array.
|
||||
|
||||
|
@ -307,7 +307,7 @@ main(int argc, char **argv)
|
||||
* priority. If NC_NETCDF4 flag were given priority, then
|
||||
* nc_abort() will not return TEST_VAL_42, but instead will
|
||||
* return 0. */
|
||||
if (nc_open(FILE_NAME, mode[i]|NC_NETCDF4, &ncid)) ERR;
|
||||
if (nc_open(FILE_NAME, mode[i], &ncid)) ERR;
|
||||
if (nc_inq_format(ncid, NULL) != TEST_VAL_42) ERR;
|
||||
if (nc_inq_format_extended(ncid, NULL, NULL) != TEST_VAL_42) ERR;
|
||||
if (nc_abort(ncid) != TEST_VAL_42) ERR;
|
||||
@ -336,6 +336,7 @@ main(int argc, char **argv)
|
||||
for (i = 0; i < NUM_UDFS; i++)
|
||||
{
|
||||
/* Add our test user defined format. */
|
||||
mode[i] = mode[i]|NC_NETCDF4;
|
||||
if (nc_def_user_format(mode[i], &tst_dispatcher, magic_number)) ERR;
|
||||
|
||||
/* Check that our user-defined format has been added. */
|
||||
@ -360,6 +361,7 @@ main(int argc, char **argv)
|
||||
printf("*** testing bad version causes dispatch table to be rejected...");
|
||||
{
|
||||
int i;
|
||||
char magic_number[5] = "1111";
|
||||
|
||||
/* Test all available user-defined format slots. */
|
||||
for (i = 0; i < NUM_UDFS; i++)
|
||||
@ -367,6 +369,9 @@ main(int argc, char **argv)
|
||||
/* Make sure our bad version format is rejected. */
|
||||
if (nc_def_user_format(mode[i], &tst_dispatcher_bad_version,
|
||||
NULL) != NC_EINVAL) ERR;
|
||||
/* Make sure defining a magic number with netcdf3 is rejected. */
|
||||
if (nc_def_user_format(NC_CLASSIC_MODEL, &tst_dispatcher,
|
||||
magic_number) != NC_EINVAL) ERR;
|
||||
}
|
||||
}
|
||||
SUMMARIZE_ERR;
|
||||
|
@ -263,6 +263,7 @@ endif()
|
||||
add_sh_test(ncdump tst_ncgen4)
|
||||
add_sh_test(ncdump tst_netcdf4_4)
|
||||
add_sh_test(ncdump tst_nccopy4)
|
||||
add_sh_test(ncdump tst_calendars_nc4)
|
||||
|
||||
SET_TESTS_PROPERTIES(ncdump_tst_nccopy4 PROPERTIES DEPENDS "ncdump_run_ncgen_tests;ncdump_tst_output;ncdump_tst_ncgen4;ncdump_sh_tst_fillbug;ncdump_tst_netcdf4_4;ncdump_tst_h_scalar;tst_comp;tst_comp2;tst_nans;tst_opaque_data;tst_create_files;tst_special_atts")
|
||||
SET_TESTS_PROPERTIES(ncdump_tst_nccopy5 PROPERTIES DEPENDS "ncdump_tst_nccopy4")
|
||||
|
@ -151,7 +151,7 @@ TESTS += tst_output.sh
|
||||
TESTS += tst_nccopy3.sh
|
||||
if USE_HDF5
|
||||
TESTS += run_back_comp_tests.sh tst_netcdf4_4.sh
|
||||
TESTS += tst_nccopy4.sh tst_nccopy5.sh
|
||||
TESTS += tst_nccopy4.sh tst_nccopy5.sh tst_calendars_nc4.sh
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
@ -177,7 +177,7 @@ ref_tst_noncoord.cdl ref_tst_compounds2.nc ref_tst_compounds2.cdl \
|
||||
ref_tst_compounds3.nc ref_tst_compounds3.cdl ref_tst_compounds4.nc \
|
||||
ref_tst_compounds4.cdl ref_tst_group_data_v23.cdl tst_mslp.cdl \
|
||||
tst_bug321.cdl ref_tst_format_att.cdl ref_tst_format_att_64.cdl \
|
||||
tst_nccopy3.sh tst_nccopy4.sh tst_nccopy5.sh \
|
||||
tst_nccopy3.sh tst_nccopy4.sh tst_nccopy5.sh tst_calendars_nc4.sh \
|
||||
ref_nc_test_netcdf4_4_0.nc run_back_comp_tests.sh \
|
||||
ref_nc_test_netcdf4.cdl ref_tst_special_atts3.cdl tst_brecs.cdl \
|
||||
ref_tst_grp_spec0.cdl ref_tst_grp_spec.cdl tst_grp_spec.sh \
|
||||
@ -205,7 +205,7 @@ test_keywords.sh ref_keyword1.cdl ref_keyword2.cdl ref_keyword3.cdl ref_keyword4
|
||||
ref_tst_nofilters.cdl test_scope.sh \
|
||||
test_rcmerge.sh ref_rcmerge1.txt ref_rcmerge2.txt ref_rcmerge3.txt \
|
||||
scope_ancestor_only.cdl scope_ancestor_subgroup.cdl scope_group_only.cdl scope_preorder.cdl \
|
||||
ref_rcapi.txt ref_tst_enum_undef.cdl
|
||||
ref_rcapi.txt ref_tst_enum_undef.cdl tst_calendars_nc4.cdl ref_times_nc4.cdl
|
||||
|
||||
# The L512.bin file is file containing exactly 512 bytes each of value 0.
|
||||
# It is used for creating hdf5 files with varying offsets for testing.
|
||||
@ -247,7 +247,7 @@ tst_roman_szip_unlim.cdl tst_perdimpspecs.nc tmppds.* \
|
||||
keyword1.nc keyword2.nc keyword3.nc keyword4.nc \
|
||||
tmp_keyword1.cdl tmp_keyword2.cdl tmp_keyword3.cdl tmp_keyword4.cdl \
|
||||
type_*.nc copy_type_*.cdl \
|
||||
scope_*.nc copy_scope_*.cdl keyword5.nc tst_enum_undef.cdl
|
||||
scope_*.nc copy_scope_*.cdl keyword5.nc tst_enum_undef.cdl tst_times_nc4.cdl
|
||||
|
||||
# Remove directories
|
||||
clean-local:
|
||||
|
@ -79,13 +79,18 @@ calendar_type(int ncid, int varid) {
|
||||
int ncals = (sizeof calmap)/(sizeof calmap[0]);
|
||||
ctype = cdMixed; /* default mixed Gregorian/Julian ala udunits */
|
||||
stat = nc_inq_att(ncid, varid, CF_CAL_ATT_NAME, &catt.type, &catt.len);
|
||||
if(stat == NC_NOERR && catt.type == NC_CHAR && catt.len > 0) {
|
||||
char *calstr = (char *)emalloc(catt.len + 1);
|
||||
if(stat == NC_NOERR && (catt.type == NC_CHAR || catt.type == NC_STRING) && catt.len > 0) {
|
||||
char *calstr;
|
||||
size_t cf_cal_att_name_len = strlen(CF_CAL_ATT_NAME);
|
||||
strncpy(catt.name, CF_CAL_ATT_NAME, cf_cal_att_name_len);
|
||||
catt.name[cf_cal_att_name_len] = '\0';
|
||||
catt.tinfo = get_typeinfo(catt.type);
|
||||
nc_get_att_single_string(ncid, varid, &catt, &calstr);
|
||||
|
||||
int itype;
|
||||
NC_CHECK(nc_get_att(ncid, varid, CF_CAL_ATT_NAME, calstr));
|
||||
calstr[catt.len] = '\0';
|
||||
int calstr_len = strlen(calstr);
|
||||
for(itype = 0; itype < ncals; itype++) {
|
||||
if(strncasecmp(calstr, calmap[itype].attname, catt.len) == 0) {
|
||||
if(strncasecmp(calstr, calmap[itype].attname, calstr_len) == 0) {
|
||||
ctype = calmap[itype].type;
|
||||
break;
|
||||
}
|
||||
@ -204,10 +209,11 @@ get_timeinfo(int ncid1, int varid1, ncvar_t *vp) {
|
||||
|
||||
/* time variables must have appropriate units attribute or be a bounds variable */
|
||||
nc_status = nc_inq_att(ncid, varid, "units", &uatt.type, &uatt.len);
|
||||
if(nc_status == NC_NOERR && uatt.type == NC_CHAR) { /* TODO: NC_STRING? */
|
||||
units = emalloc(uatt.len + 1);
|
||||
NC_CHECK(nc_get_att(ncid, varid, "units", units));
|
||||
units[uatt.len] = '\0';
|
||||
if(nc_status == NC_NOERR && (uatt.type == NC_CHAR || uatt.type == NC_STRING)) {
|
||||
strncpy(uatt.name, "units", 5);
|
||||
uatt.name[5] = '\0';
|
||||
uatt.tinfo = get_typeinfo(uatt.type);
|
||||
nc_get_att_single_string(ncid, varid, &uatt, &units);
|
||||
if(!is_valid_time_unit(units)) {
|
||||
free(units);
|
||||
return;
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <assert.h>
|
||||
#include <ctype.h>
|
||||
#include "utils.h"
|
||||
#include "nccomps.h"
|
||||
#ifndef isascii
|
||||
EXTERNL int isascii(int c);
|
||||
#endif
|
||||
@ -959,3 +960,39 @@ done:
|
||||
}
|
||||
#endif
|
||||
|
||||
/*********************************************************************************/
|
||||
void nc_get_att_single_string(const int ncid, const int varid,
|
||||
const struct ncatt_t *att, char **str_out) {
|
||||
if (att->type == NC_CHAR) {
|
||||
// NC_CHAR type attribute
|
||||
// Use a call to nc_get_att_text which expects to output the attribute value
|
||||
// into a char * pointing to allocated memory. The number of bytes to allocate
|
||||
// is the attribute length (which is the number of elements in a vector, 1 for
|
||||
// scalar) times the size of each element in bytes. The attribute length is
|
||||
// held in att->len, and the attribute element size is in att->tinfo->size.
|
||||
*str_out = emalloc((att->len + 1) * att->tinfo->size);
|
||||
(*str_out)[att->len] = '\0';
|
||||
NC_CHECK(nc_get_att_text(ncid, varid, att->name, *str_out));
|
||||
} else if (att->type == NC_STRING) {
|
||||
// NC_STRING type attribute
|
||||
// Use a call to nc_get_att_string which expects to output the attribute value
|
||||
// into a vector of char pointers, where each entry points to allocated memory.
|
||||
// The vector of char pointers needs to be allocated to the length (number of strings)
|
||||
// times the size of each entry (size of a char *).
|
||||
char **att_strings = emalloc((att->len + 1) * att->tinfo->size);
|
||||
NC_CHECK(nc_get_att_string(ncid, varid, att->name, att_strings));
|
||||
// str_out needs to be allocated to a size large enough to hold the string that
|
||||
// the first pointer in att_strings is pointing to.
|
||||
size_t att_str_len = strlen(att_strings[0]);
|
||||
*str_out = emalloc((att_str_len + 1) * att->tinfo->size);
|
||||
(*str_out)[att_str_len] = '\0';
|
||||
strncpy(*str_out, att_strings[0], att_str_len);
|
||||
nc_free_string(att->len, att_strings); /* Warning: does not free att_strings */
|
||||
free(att_strings);
|
||||
} else {
|
||||
fprintf(stderr,"nc_get_att_single_string: unknown attribute type: %d\n", att->type);
|
||||
fprintf(stderr," must use one of: NC_CHAR, NC_STRING\n");
|
||||
fflush(stderr); fflush(stdout);
|
||||
exit(2);
|
||||
}
|
||||
}
|
||||
|
@ -10,6 +10,8 @@
|
||||
|
||||
#include "config.h"
|
||||
|
||||
struct ncatt_t;
|
||||
|
||||
#ifndef NCSTREQ
|
||||
#define NCSTREQ(a, b) (*(a) == *(b) && strcmp((a), (b)) == 0)
|
||||
#endif
|
||||
@ -181,6 +183,16 @@ extern int nc_next_giter(ncgiter_t *iterp, int *grpid);
|
||||
extern void nc_free_giter(ncgiter_t *iterp);
|
||||
extern int getrootid(int grpid);
|
||||
|
||||
/*
|
||||
* Get attribute value for a single string value from either of NC_CHAR or NC_STRING types.
|
||||
* This routine assumes that the attribute holds a single string value. If there are more
|
||||
* than one string, subequent strings after the first one will be ignored.
|
||||
*
|
||||
* The caller is responsible for allocating and freeing memory for the str_out parameter.
|
||||
*/
|
||||
extern void nc_get_att_single_string(const int ncid, const int varid,
|
||||
const struct ncatt_t *att, char **str_out);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user