Merge branch 'master' into enable-cdf5

This commit is contained in:
Ward Fisher 2018-07-17 11:26:02 -06:00 committed by GitHub
commit b570d10e01
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 443 additions and 173 deletions

View File

@ -37,4 +37,4 @@ before_install:
script:
- docker run --rm -it -h "$CURHOST" -e USEDASH=FALSE -e RUNF=OFF -e RUNCXX=OFF -e RUNP=OFF -e RUNNCO=OFF -e USECMAKE=$USECMAKE -e USEAC=$USEAC -e DISTCHECK=$DISTCHECK -e COPTS="$COPTS" -e AC_OPTS="$AC_OPTS" -e CTEST_OUTPUT_ON_FAILURE=1 -v $(pwd):/netcdf-c -e USE_LOCAL_CP=$USECP -e TESTPROC=100 $DOCKIMG
- docker run --rm -it -h "$CURHOST" -e USEDASH=FALSE -e RUNF=TRUE -e RUNCXX=OFF -e RUNP=OFF -e RUNNCO=OFF -e USECMAKE=$USECMAKE -e USEAC=$USEAC -e DISTCHECK=$DISTCHECK -e COPTS="$COPTS" -e AC_OPTS="$AC_OPTS" -e CTEST_OUTPUT_ON_FAILURE=1 -v $(pwd):/netcdf-c -e USE_LOCAL_CP=$USECP -e TESTPROC=100 $DOCKIMG

View File

@ -943,6 +943,11 @@ IF(ENABLE_LARGE_FILE_TESTS)
SET(LARGE_FILE_TESTS ON)
ENDIF()
OPTION(ENABLE_METADATA_PERF_TESTS "Enable test of metadata performance." OFF)
IF(ENABLE_METADATA_PERF_TESTS)
SET(ENABLE_METADATA_PERF ON)
ENDIF()
# Location for large file tests.
SET(TEMP_LARGE "." CACHE STRING "Location to store large file tests.")
@ -1814,7 +1819,11 @@ configure_file(
${netCDF_SOURCE_DIR}/netcdf.pc.in
${netCDF_BINARY_DIR}/netcdf.pc @ONLY)
FILE(MAKE_DIRECTORY ${netCDF_BINARY_DIR}/tmp)
IF(NOT IS_DIRECTORY ${netCDF_BINARY_DIR}/tmp)
FILE(MAKE_DIRECTORY ${netCDF_BINARY_DIR}/tmp)
ENDIF()
configure_file("${netCDF_SOURCE_DIR}/nc-config.cmake.in"
"${netCDF_BINARY_DIR}/tmp/nc-config" @ONLY
NEWLINE_STYLE LF)

3
cf
View File

@ -108,7 +108,8 @@ FLAGS="$FLAGS --enable-extreme-numbers"
#FLAGS="$FLAGS --disable-testsets"
#FLAGS="$FLAGS --disable-dap-remote-tests"
#FLAGS="$FLAGS --enable-dap-auth-tests" -- requires a new remotetest server
#FLAGS="$FLAGS --enable-doxygen --enable-internal-docs"
#FLAGS="$FLAGS --enable-doxygen"
#FLAGS="$FLAGS --enable-internal-docs"
FLAGS="$FLAGS --enable-logging"
FLAGS="$FLAGS --disable-diskless"
#FLAGS="$FLAGS --enable-mmap"

View File

@ -3,7 +3,7 @@
# Is netcdf-4 and/or DAP enabled?
NC4=1
DAP=1
CDF5=1
#CDF5=1
#HDF4=1
for arg in "$@" ; do

View File

@ -149,6 +149,16 @@ AC_ARG_ENABLE([internal-docs],
test "x$enable_internal_docs" = xyes || enable_internal_docs=no
AC_SUBST([BUILD_INTERNAL_DOCS], [$enable_internal_docs])
# Doxygen is apparently buggy when trying to combine a markdown
# file with @internal. The equivalent can be faked using
# the Doxygen ENABLED_SECTIONS mechanism. See docs/testserver.dox
# to see how this is done.
sections=
if test "x$enable_internal_docs" = xyes ; then
sections="$sections INTERNAL"
fi
AC_SUBST([ENABLED_DOC_SECTIONS], [$sections])
AC_MSG_CHECKING([if fsync support is enabled])
AC_ARG_ENABLE([fsync],
[AS_HELP_STRING([--enable-fsync],

View File

@ -606,7 +606,7 @@ GENERATE_DEPRECATEDLIST= YES
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
# ... \endcond blocks.
ENABLED_SECTIONS =
ENABLED_SECTIONS = @ENABLED_DOC_SECTIONS@
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
@ -749,21 +749,17 @@ INPUT = \
@abs_top_srcdir@/docs/install.md \
@abs_top_srcdir@/docs/install-fortran.md \
@abs_top_srcdir@/docs/types.dox \
@abs_top_srcdir@/docs/internal.dox \
@abs_top_srcdir@/docs/indexing.dox \
@abs_top_srcdir@/docs/windows-binaries.md \
@abs_top_srcdir@/docs/guide.dox \
@abs_top_srcdir@/docs/OPeNDAP.dox \
@abs_top_srcdir@/docs/DAP4.dox \
@abs_top_srcdir@/docs/attribute_conventions.md \
@abs_top_srcdir@/docs/file_format_specifications.md \
@abs_top_srcdir@/docs/OPeNDAP.dox \
@abs_top_srcdir@/docs/DAP4.dox \
@abs_top_srcdir@/docs/user_defined_formats.md \
@abs_top_srcdir@/docs/auth.md \
@abs_top_srcdir@/docs/filters.md \
@abs_top_srcdir@/docs/inmemory.md \
@abs_top_srcdir@/docs/notes.md \
@abs_top_srcdir@/docs/auth.md \
@abs_top_srcdir@/docs/filters.md \
@abs_top_srcdir@/docs/notes.md \
@abs_top_srcdir@/docs/all-error-codes.md \
@abs_top_srcdir@/docs/FAQ.md \
@abs_top_srcdir@/docs/known_problems.md \
@ -771,6 +767,9 @@ INPUT = \
@abs_top_srcdir@/docs/credits.md \
@abs_top_srcdir@/docs/bestpractices.md \
@abs_top_srcdir@/docs/tutorial.dox \
@abs_top_srcdir@/docs/internal.dox \
@abs_top_srcdir@/docs/indexing.dox \
@abs_top_srcdir@/docs/testserver.dox \
@abs_top_srcdir@/include/netcdf.h \
@abs_top_srcdir@/include/netcdf_mem.h \
@abs_top_srcdir@/include/netcdf_par.h \

View File

@ -1,12 +1,14 @@
netCDF Authorization Support
NetCDF Authorization Support
======================================
<!-- double header is needed to workaround doxygen bug -->
# netCDF Authorization Support {#Header}
NetCDF Authorization Support {#auth}
====================================
[TOC]
## Introduction {#Introduction}
## Introduction {#auth_intro}
netCDF can support user authorization using the facilities provided by the curl
library. This includes basic password authentication as well as
@ -23,7 +25,7 @@ The libcurl authorization mechanisms can be accessed in two ways
2. Accessing information from a so-called _rc_ file named either
`.daprc` or `.dodsrc`
## URL-Based Authentication {#URLAUTH}
## URL-Based Authentication {#auth_url}
For simple password based authentication, it is possible to
directly insert the username and the password into a url in this form.
@ -43,7 +45,7 @@ Note also that the `user:password` form may contain characters that must be
escaped. See the <a href="#USERPWDESCAPE">password escaping</a> section to see
how to properly escape the user and password.
## RC File Authentication {#DODSRC}
## RC File Authentication {#auth_dodsrc}
The netcdf library supports an _rc_ file mechanism to allow the passing
of a number of parameters to libnetcdf and libcurl.
Locating the _rc_ file is a multi-step process.
@ -118,7 +120,7 @@ Similarly,
will have HTTP.VERBOSE set to 0 because its host+port matches the example above.
## Authorization-Related Keys {#AUTHKEYS}
## Authorization-Related Keys {#auth_keys}
The currently defined set of authorization-related keys are as follows.
The second column is the affected curl_easy_setopt option(s), if any
@ -207,7 +209,7 @@ specifies the absolute path of the .netrc file.
See [redirection authorization](#REDIR)
for information about using .netrc.
## Password Escaping {#USERPWDESCAPE}
## Password Escaping {#auth_userpwdescape}
With current password rules, it is is not unlikely that the password
will contain characters that need to be escaped. Similarly, the user
@ -231,7 +233,7 @@ The relevant escape codes are as follows.
</table>
Additional characters can be escaped if desired.
## Redirection-Based Authentication {#REDIR}
## Redirection-Based Authentication {#auth_redir}
Some sites provide authentication by using a third party site
to do the authentication. Examples include ESG, URS, RDA, and most oauth2-based
@ -282,7 +284,7 @@ to specify a real file in the file system to act as the
cookie jar file (HTTP.COOKIEJAR) so that the
redirect site can properly pass back authorization information.
## Client-Side Certificates {#CLIENTCERTS}
## Client-Side Certificates {#auth_clientcerts}
Some systems, notably ESG (Earth System Grid), requires
the use of client-side certificates, as well as being
@ -303,7 +305,7 @@ Note that the first two are there to support re-direction based authentication.
1. https://curl.haxx.se/libcurl/c/curl_easy_setopt.html
2. https://curl.haxx.se/docs/ssl-compared.html
## Appendix A. All RC-File Keys {#allkeys}
## Appendix A. All RC-File Keys {#auth_allkeys}
For completeness, this is the list of all rc-file keys.
If this documentation is out of date with respect to the actual code,
@ -330,7 +332,7 @@ the code is definitive.
<tr><td>HTTP.NETRC</td><td>CURLOPT_NETRC,CURLOPT_NETRC_FILE</td>
</table>
## Appendix B. URS Access in Detail {#URSDETAIL}
## Appendix B. URS Access in Detail {#auth_ursdetail}
It is possible to use the NASA Earthdata Login System (URS)
with netcdf by using using the process specified in the
@ -340,7 +342,7 @@ register as a user with NASA at this website (subject to change):
https://uat.urs.earthdata.nasa.gov/
## Appendix C. ESG Access in Detail {#ESGDETAIL}
## Appendix C. ESG Access in Detail {#auth_esgdetail}
It is possible to access Earth Systems Grid (ESG) datasets
from ESG servers through the netCDF API using the techniques

View File

@ -8,11 +8,11 @@
The original internal representations of metadata in memory
relied on linear searching of lists to locate various objects
by name or by numeric id: e.g. _varid_ or _grpid_.
by name or by numeric id: by _varid_ or by _grpid_ for example.
In recent years, the flaws in that approach have become obvious
as users create files with extremely large numbers of objects:
group, variables, attributes, and dimensions. One case
groups, variables, attributes, and dimensions. One case
has 14 megabytes of metadata. Creating and (especially) later
opening such files was exceedingly slow.
@ -171,6 +171,14 @@ Note that currently, NCindex is only used in libsrc4 and libhdf4.
But if performance issues warrant, it will also be used in
libsrc.
Note also that alternative implementations are feasible that do not
use a hash table for name indexing, but rather keep a list sorted by name
and use binary search to do name-based lookup. If this alternative were
implemented, then it is probable that we could get rid of using the NC_hashmap
structure altogether for netcdf-4. There is a performance cost since binary
search is O(log n). In practice, it is probable that this is of negligable
effect. The advantage is that rename operations become considerably simpler.
\section Sglobal_object_access Global Object Access
As mentioned, dimension, group, and type external id's (dimid,
@ -179,11 +187,11 @@ convenient to store in memory a per-file vector for each object
type such that the external id of the object is the same as the
position of that object in the corresponding per-file
vector. This makes lookup by external id very efficient.
Note that this is was already the case for netcdf-3 (libsrc) so
Note that this was already the case for netcdf-3 (libsrc) so
this is a change for libsrc4 only.
The global set of dimensions, types, and groups is maintained by
three instances of NClist in the NC_HDF5_FILE_INFO structure:
three instances of NClist in the NC_FILE_INFO structure:
namely _alldims_, _alltypes_, and _allgroups_.
The position of the object within the corresponding list determines
the object's external id. Thus, a position of a dimension object within the
@ -194,7 +202,7 @@ for types and groups.
Each group object (NC_GRP_INFO_T) contains five instances of
NCindex. One is for dimensions, one is for types, one is for
subgroups, one is for variables, and one is for attributes. A
subgroups, one is for variables, and one is for attributes. An
index is used for two reasons. First, it allows name-based lookup
for these items. Second, the declaration order is maintained by
the list within the index's vector. Note that the position of
@ -265,7 +273,7 @@ if(g != NULL)
... code to process matching grp by name
}
\endcode
In this case, the iteration uses a hashtable.
In this case, the iteration is replaced by a hashtable lookup.
\subsection Slookupid Lookup an Object by id
@ -291,7 +299,7 @@ dimension.
In the new code, this iteration cliche is replaced by something
that will look like this.
\code
NC_HDF5_FILE_INFO_T* h5 = ...;
NC_FILE_INFO_T* h5 = ...;
NC_DIM_INFO_T* d;;
...
d = nclistget(h5->alldims,id);
@ -301,7 +309,7 @@ if(d != NULL)
\endcode
This shows how the alldims vector is used to map from a
dimid directly to the matching dimension object.
In this example, h5 is the NC_HDF5_FILE_INFO_T file object.
In this example, h5 is the NC_FILE_INFO_T file object.
This approach works for dimension ids, group ids, and type ids
because they are globally unique.
@ -364,6 +372,7 @@ no opportunity for lazy loading to be used.
The remedys of which I can conceive are these.
1. Modify the netcdf-c library to also do lazy loading
(work on this is under way).
2. Store a single metadata object into the file so it can
be read at one shot. This object would then be processed
in-memory to construct the internal metadata. The costs for
@ -373,7 +382,7 @@ The remedys of which I can conceive are these.
It should be noted that there is an effect from this change.
Using gprof, one can see that in the original code the obj_list_add
function was the dominate function called by a large percentage (about 20%).
Whereas with the new code, the function call distribute is more much more
Whereas with the new code, the function call distribution is much more
even with no function taking more than 4-5%.
Some other observations:
@ -400,7 +409,7 @@ Some other observations:
4. Renaming is still a costly operation because it causes
the whole containing index to be rebuilt.
5. As in the original code, object ids (dimid, etc) are assigned
explicitly using counters within the NC_HDF5_FILE_INFO_T object.
explicitly using counters within the NC_FILE_INFO_T object.
When stored into, for example, "alldims", the position of the
object is forcibly made to match the value of the assigned id.
6. The file nchashmap.c has a constant, SMALLTABLE, that controls

View File

@ -78,6 +78,8 @@ Requirements {#netcdf_requirements}
* HDF5 1.10.1 or later.
* zlib 1.2.5 or later (for netCDF-4 compression)
* curl 7.18.0 or later (for DAP remote access client support)
* For parallel I/O support on classic netCDF files
* PnetCDF 1.6.0 or later
> **Important Note**: When building netCDF-C library versions older than 4.4.1, use only HDF5 1.8.x versions. Combining older netCDF-C versions with newer HDF5 1.10 versions will create superblock 3 files that are not readable by lots of older software. See <a href="http://www.unidata.ucar.edu/blogs/news/entry/netcdf-4-4-1">this announcement</a> for more details.
@ -210,7 +212,7 @@ to prevent it from building an HDF4 version of the netCDF-2 library
that conflicts with the netCDF-2 functions that are built into the Unidata
netCDF library.
Then, when building netCDF-4, use the `--enable-hdf4`.
Then, when building netCDF-4, use the `--enable-hdf4`
option to configure. The location for the HDF4 header files and
library must be specified in the CPPFLAGS and LDFLAGS environment variables
or configure options.
@ -245,7 +247,7 @@ Building with Parallel I/O Support {#build_parallel}
--------------
For parallel I/O to work, HDF5 must be installed with
enable-parallel, and an MPI library (and related libraries) must be
`--enable-parallel`, and an MPI library (and related libraries) must be
made available to the HDF5 configure. This can be accomplished with
an mpicc wrapper script.
@ -271,19 +273,33 @@ From the top-level netCDF-4 source directory, the following builds netCDF-4 with
$ make install
~~~~
If parallel I/O access to netCDF classic and 64-bit offset files is
needed, an alternate
[parallel-netcdf library](https://trac.mcs.anl.gov/projects/parallel-netcdf/wiki/WikiStart),
referred to as "PnetCDF", must also be installed. Assume it was
installed in the directory named by the PNDIR shell variable.
Then, from the top-level netCDF-4 source directory, configure netCDF
with the "--enable-pnetcdf" option:
### Building PnetCDF from source {#build_pnetcdf_from_source}
To enable parallel I/O support for classic netCDF files, i.e. CDF-1, 2 and 5
formats, [PnetCDF library](https://parallel-netcdf.github.io) must also be
installed. First specify where you want to install PnetCDF in a shell
variable, for example PNDIR, and build it from the PnetCDF top-level source
directory. If you would like to build the shared library, include
`--enable-shared` option at the configure command line. By default, only a
static library is built.
~~~~{.py}
$ # Build and install PnetCDF
$ PNDIR=/usr/local
$ ./configure --prefix=${PNDIR} --with-mpi=/path/to/MPI/compilers
$ make check
$ make install # or sudo make install, if root permissions required
~~~~
To build netCDF-4 with PnetCDF support, from the top-level netCDF-4 source
directory, configure netCDF with the "--enable-pnetcdf" option. If PnetCDF
is built with static library only, add "--disable-shared" option.
~~~~{.py}
$ # Build, test, and install netCDF-4 with pnetcdf support
$ CC=mpicc CPPFLAGS="-I${H5DIR}/include -I${PNDIR}/include" \
LDFLAGS="-L${H5DIR}/lib -L${PNDIR}/lib" ./configure \
--disable-shared --enable-pnetcdf --enable-parallel-tests \
--enable-pnetcdf --enable-parallel-tests \
--prefix=${NCDIR}
$ make check
$ make install
@ -292,18 +308,18 @@ with the "--enable-pnetcdf" option:
Linking to netCDF-C {#linking}
-------------------
For static builds of applications that use netCDF-4 you must link to all the libraries,
netCDF, HDF5, zlib, szip (if used with HDF5 build), and curl (if the
remote access client has not been disabled). This will require -L options
to your build for the locations of the libraries, and -l (lower-case
L) for the names of the libraries.
For static builds of applications that use netCDF-4 you must link to all the
libraries, netCDF, HDF5, zlib, szip (if used with HDF5 build), pnetcdf (if used
with PnetCDF build), and curl (if the remote access client has not been
disabled). This will require -L options to your build for the locations of the
libraries, and -l (lower-case L) for the names of the libraries.
For example, you might build other applications with netCDF-4 by
setting the LIBS environment variable, assuming NCDIR, H5DIR, and ZDIR
indicate where netCDF, HDF5, and zlib are installed:
setting the LIBS environment variable, assuming NCDIR, H5DIR, PNDIT, and ZDIR
indicate where netCDF, HDF5, PnetCDF, and zlib are installed:
~~~~{.py}
LIBS="-L${NCDIR}/lib -lnetcdf -L${H5DIR}/lib -lhdf5_hl -lhdf5 -L${ZDIR}/lib -lz -lm"
LIBS="-L${NCDIR}/lib -lnetcdf -L${H5DIR}/lib -lhdf5_hl -lhdf5 -L${PNDIR} -lpnetcdf -L${ZDIR}/lib -lz -lm"
~~~~
For shared builds, only `-L${NCDIR}/lib -lnetcdf` is
@ -336,16 +352,16 @@ Note: `--disable` prefix indicates that the option is normally enabled.
<tr><th>Option<th>Description<th>Dependencies
<tr><td>--disable-doxygen<td>Disable generation of documentation.<td>doxygen
<tr><td>--disable-fsync<td>disable fsync support<td>kernel fsync support
<tr><td>--disable-netcdf-4<td>build netcdf-3 without HDF5 and zlib<td>
<tr><td>--disable-netcdf4<td>synonym for disable-netcdf-4
<tr><td>--disable-netcdf4<td>synonym for disable-netcdf-4<td>
<tr><td>--enable-hdf4<td>build netcdf-4 with HDF4 read capability<td>HDF4, HDF5 and zlib
<tr><td>--enable-hdf4-file-tests<td>test ability to read HDF4 files<td>selected HDF4 files from Unidata ftp site
<tr><td>--enable-pnetcdf<td>build netcdf-4 with parallel I/O for classic and
64-bit offset files using parallel-netcdf
<tr><td>--disable-parallel4<td>build netcdf-4 without parallel I/O support<td>
<tr><td>--disable-cdf5<td>build netcdf-4 without support of classic CDF-5 file format<td>
<tr><td>--enable-pnetcdf<td>build netcdf-4 with parallel I/O for classic files (CDF-1, 2, and 5 formats) using PnetCDF<td>PnetCDF
<tr><td>--enable-extra-example-tests<td>Run extra example tests<td>--enable-netcdf-4,GNU sed
<tr><td>--disable-filter-testing<td>Run filter example<td>--enable-shared --enable-netcdf-4
<tr><td>--enable-parallel-tests <td>run extra parallel IO tests<td>--enable-netcdf-4, parallel IO support
<tr><td>--enable-parallel-tests <td>run extra parallel IO tests<td>--enable-netcdf-4 or --enable-pnetcdf, parallel IO support
<tr><td>--enable-logging<td>enable logging capability<td>--enable-netcdf-4
<tr><td>--disable-dap<td>build without DAP client support.<td>libcurl
<tr><td>--disable-dap-remote-tests<td>disable dap remote tests<td>--enable-dap
@ -391,6 +407,7 @@ The following packages are required to build netCDF-C using CMake.
* Optional Requirements:
* HDF5 Libraries for netCDF4/HDF5 support.
* libcurl for DAP support.
* PnetCDF libraries for parallel I/O support to classic netCDF files
<center>
<img src="deptree.jpg" height="250px" />
@ -425,7 +442,11 @@ Enable/Disable netCDF-4 | --enable-netcdf-4<br>--disable-netcdf-4 | -D"ENABLE_NE
Enable/Disable DAP | --enable-dap <br> --disable-dap | -D"ENABLE_DAP=ON" <br> -D"ENABLE_DAP=OFF"
Enable/Disable Utilities | --enable-utilities <br> --disable-utilities | -D"BUILD_UTILITIES=ON" <br> -D"BUILD_UTILITIES=OFF"
Specify shared/Static Libraries | --enable-shared <br> --enable-static | -D"BUILD_SHARED_LIBS=ON" <br> -D"BUILD_SHARED_LIBS=OFF"
Enable/Disable Parallel netCDF-4 | --enable-parallel4 <br> --disable-parallel4 | -D"ENABLE_PARALLEL4=ON" <br> -D"ENABLE_PARALLEL4=OFF"
Enable/Disable PnetCDF | --enable-pnetcdf<br>--disable-pnetcdf | -D"ENABLE_PNETCDF=ON" <br> -D"ENABLE_PNETCDF=OFF"
Enable/Disable CDF5 | --enable-cdf5 <br> --disable-cdf5 | -D"ENABLE_CDF5=ON" <br> -D"ENABLE_CDF5=OFF"
Enable/Disable Tests | --enable-testsets <br> --disable-testsets | -D"ENABLE_TESTS=ON" <br> -D"ENABLE_TESTS=OFF"
Enable/Disable Parallel Tests | --enable-parallel-tests<br> --disable-parallel-tests | -D"ENABLE_PARALLEL_TESTS=ON" <br> -D"ENABLE_PARALLEL_TESTS=OFF"
Specify a custom library location | Use *CFLAGS* and *LDFLAGS* | -D"CMAKE_PREFIX_PATH=/usr/custom_libs/"
A full list of *basic* options can be found by invoking `cmake [Source Directory] -L`. To enable a list of *basic* and *advanced* options, one would invoke `cmake [Source Directory] -LA`.

View File

@ -4,6 +4,8 @@
\page nc_dispatch Internal Dispatch Table Architecture
\tableofcontents
This document describes the architecture and details of the netCDF
internal dispatch mechanism. The idea is that when a user opens or
creates a netcdf file, a specific dispatch table is chosen.
@ -28,97 +30,85 @@ To date, at least the following dispatch tables are supported.
- pnetcdf (parallel cdf5)
- HDF4 SD files
Internal Dispatch Tables
- \subpage adding_dispatch
- \subpage put_vara_dispatch
- \subpage put_attr_dispatch
The dispatch table represents a distillation of the netcdf API down to
a minimal set of internal operations. The format of the dispatch table
is defined in the file libdispatch/ncdispatch.h. Every new dispatch
table must define this minimal set of operations.
\page adding_dispatch Adding a New Dispatch Table
\tableofcontents
\section adding_dispatch Adding a New Dispatch Table
In order to make this process concrete, let us assume we plan to add
an in-memory implementation of netcdf-3.
\section dispatch_configure_ac Defining configure.ac flags
\subsection dispatch_configure_ac Defining configure.ac flags
Define a -enable flag and an AM_CONFIGURE flag in configure.ac.
Define a _-enable_ flag and an _AM_CONFIGURE_ flag in _configure.ac_.
For our example, we assume the option "--enable-ncm" and the AM_CONFIGURE
flag "ENABLE_NCM". If you examine the existing configure.ac and see how,
for example, dap2 is defined, then it should be clear how to do it for
flag "ENABLE_NCM". If you examine the existing _configure.ac_ and see how,
for example, _DAP2_ is defined, then it should be clear how to do it for
your code.
\section dispatch_namespace Defining a "name space"
\subsection dispatch_namespace Defining a "name space"
Choose some prefix of characters to identify the new dispatch
system. In effect we are defining a name-space. For our in-memory
system, we will choose "NCM" and "ncm". NCM is used for non-static
procedures to be entered into the dispatch table and ncm for all other
non-static procedures.
non-static procedures. Note that the chosen prefix should probably start
with "nc" in order to avoid name conflicts outside the netcdf-c library.
\section dispatch_netcdf_h Extend include/netcdf.h
\subsection dispatch_netcdf_h Extend include/netcdf.h
Modify file include/netcdf.h to add an NC_FORMATX_XXX flag
Modify the file _include/netcdf.h_ to add an NC_FORMATX_XXX flag
by adding a flag for this dispatch format at the appropriate places.
\code
#define NC_FORMATX_NCM 7
\c \#define NC_FORMATX_NCM 7
\endcode
Add any format specific new error codes.
\code
#define NC_ENCM (?)
\c \#define NC_ENCM (?)
\endcode
\section dispatch_ncdispatch Extend include/ncdispatch.h
Modify file include/ncdispatch.h as follows.
Add format specific data and functions; note the of our NCM namespace.
\subsection dispatch_ncdispatch Extend include/ncdispatch.h
Modify the file _include/ncdispatch.h_ to
add format specific data and functions; note the use of our NCM namespace.
\code
#ifdef ENABLE_NCM
extern NC_Dispatch* NCM_dispatch_table;
extern int NCM_initialize(void);
#endif
#ifdef ENABLE_NCM
extern NC_Dispatch* NCM_dispatch_table;
extern int NCM_initialize(void);
#endif
\endcode
\section dispatch_define_code Define the dispatch table functions
\subsection dispatch_define_code Define the dispatch table functions
Define the functions necessary to fill in the dispatch table. As a
rule, we assume that a new directory is defined, libsrcm, say. Within
this directory, we need to define Makefile.am and CMakeLists.txt.
rule, we assume that a new directory is defined, _libsrcm_, say. Within
this directory, we need to define _Makefile.am_ and _CMakeLists.txt_.
We also need to define the source files
containing the dispatch table and the functions to be placed in the
dispatch table call them ncmdispatch.c and ncmdispatch.h. Look at
libsrc/nc3dispatch.[ch] or libdap4/ncd4dispatch.[ch] for examples.
dispatch table - call them _ncmdispatch.c_ and _ncmdispatch.h_. Look at
_libsrc/nc3dispatch.[ch]_ or _libdap4/ncd4dispatch.[ch]_ for examples.
Similarly, it is best to take existing Makefile.am and CMakeLists.txt
files (from libdap4 for example) and modify them.
Similarly, it is best to take existing _Makefile.am_ and _CMakeLists.txt_
files (from _libdap4_ for example) and modify them.
\section dispatch_lib Adding the dispatch code to libnetcdf
\subsection dispatch_lib Adding the dispatch code to libnetcdf
Provide for the inclusion of this library in the final libnetcdf
library. This is accomplished by modifying liblib/Makefile.am by
library. This is accomplished by modifying _liblib/Makefile.am_ by
adding something like the following.
\code
if ENABLE_NCM
libnetcdf_la_LIBADD += $(top_builddir)/libsrcm/libnetcdfm.la
endif
\endcode
\section dispatch_init Extend library initialization
\subsection dispatch_init Extend library initialization
Modify the NC_initialize function in liblib/nc_initialize.c by adding
Modify the _NC_initialize_ function in _liblib/nc_initialize.c_ by adding
appropriate references to the NCM dispatch function.
\code
#ifdef ENABLE_NCM
extern int NCM_initialize(void);
@ -127,7 +117,7 @@ appropriate references to the NCM dispatch function.
int NC_initialize(void)
{
...
#ifdef USE_DAP
#ifdef ENABLE_NCM
if((stat = NCM_initialize())) return stat;
#endif
...
@ -136,9 +126,8 @@ appropriate references to the NCM dispatch function.
\section dispatch_tests Testing the new dispatch table
Add a directory of tests; ncm_test, say. The file ncm_test/Makefile.am
Add a directory of tests: _ncm_test_, say. The file _ncm_test/Makefile.am_
will look something like this.
\code
# These files are created by the tests.
CLEANFILES = ...
@ -155,8 +144,8 @@ will look something like this.
\section dispatch_toplevel Top-Level build of the dispatch code
Provide for libnetcdfm to be constructed by adding the following to
the top-level Makefile.am.
Provide for _libnetcdfm_ to be constructed by adding the following to
the top-level _Makefile.am_.
\code
if ENABLE_NCM
@ -170,40 +159,40 @@ the top-level Makefile.am.
\section choosing_dispatch_table Choosing a Dispatch Table
The dispatch table is chosen in the NC_create and the NC_open
procedures in libdispatch/netcdf.c.
procedures in _libdispatch/netcdf.c_.
This can be, unfortunately, a complex process.
The choice is made in _NC_create_ and _NC_open_ in _libdispatch/dfile.c_.
The decision is currently based on the following pieces of information.
Using a mode flag is the most common mechanism, in which case
netcdf.h needs to be modified to define the relevant mode flag.
In any case, the choice of dispatch table is currently based on the following
pieces of information.
1. The mode argument this can be used to detect, for example, what kind
of file to create: netcdf-3, netcdf-4, 64-bit netcdf-3, etc. For
nc_open and when the file path references a real file, the contents of
the file can also be used to determine the dispatch table. Although
currently not used, this code could be modified to also use other
pieces of information such as environment variables.
of file to create: netcdf-3, netcdf-4, 64-bit netcdf-3, etc.
Using a mode flag is the most common mechanism, in which case
_netcdf.h_ needs to be modified to define the relevant mode flag.
2. The file path this can be used to detect, for example, a DAP url
versus a normal file system file.
versus a normal file system file. If the path looks like a URL, then
the choice is determined using the function _NC_urlmodel_.
When adding a new dispatcher, it is necessary to modify NC_create and
NC_open in libdispatch/dfile.c to detect when it is appropriate to
use the NCM dispatcher. Some possibilities are as follows.
- Add a new mode flag: say NC_NETCDFM.
- Define a special file path format that indicates the need to use a
special dispatch table.
3. The file contents - when the contents of a real file are available,
the contents of the file can be used to determine the dispatch table.
As a rule, this is likely to be useful only for _nc_open_.
4. Environment variables - this option is currently not used,
but information such as environment variables could be used to determine
the choice of dispatch table.
\section special_dispatch Special Dispatch Table Signatures.
Several of the entries in the dispatch table are significantly
different than those of the external API.
The entries in the dispatch table do not necessarily correspond
to the external API. In many cases, multiple related API functions
are merged into a single dispatch table entry.
\subsection create_open_dispatch Create/Open
The create table entry and the open table entry in the dispatch table
have the following signatures respectively.
\code
int (*create)(const char *path, int cmode,
size_t initialsz, int basepe, size_t *chunksizehintp,
@ -223,13 +212,13 @@ create/open signatures from the include/netcdfXXX.h files. Note especially the l
three parameters. The parameters argument is a pointer to arbitrary data
to provide extra info to the dispatcher.
The table argument is included in case the create
function (e.g. NCM_create) needs to invoke other dispatch
function (e.g. _NCM_create_) needs to invoke other dispatch
functions. The very last argument, ncp, is a pointer to an NC
instance. The raw NC instance will have been created by libdispatch/dfile.c
instance. The raw NC instance will have been created by _libdispatch/dfile.c_
and is passed to e.g. open with the expectation that it will be filled in
by the dispatch open function.
\page put_vara_dispatch Accessing Data with put_vara() and get_vara()
\subsection put_vara_dispatch Accessing Data with put_vara() and get_vara()
\code
int (*put_vara)(int ncid, int varid, const size_t *start, const size_t *count,
@ -248,7 +237,7 @@ memtype will be either ::NC_INT or ::NC_INT64, depending on the value
of sizeof(long). This means that even netcdf-3 code must be prepared
to encounter the ::NC_INT64 type.
\page put_attr_dispatch Accessing Attributes with put_attr() and get_attr()
\subsection put_attr_dispatch Accessing Attributes with put_attr() and get_attr()
\code
int (*get_att)(int ncid, int varid, const char *name,
@ -263,16 +252,16 @@ to encounter the ::NC_INT64 type.
Again, the key difference is the memtype parameter. As with
put/get_vara, it used ::NC_INT64 to encode the long case.
\subsection Pre-defined Dispatch Functions
\subsection pre_def_dispatch Pre-defined Dispatch Functions
It is usually not necessary to implement all the functions in the
It is sometimes not necessary to implement all the functions in the
dispatch table. Some pre-defined functions are available which may be
used in many cases.
\subsubsection Inquiry Functions
\subsubsection inquiry_functions Inquiry Functions
The netCDF inquiry functions operate from an in-memory model of
metadata. Once a file has been opened, or as a file is created, this
metadata. Once a file is opened, or a file is created, this
in-memory metadata model is kept up to date. Consequenty the inquiry
functions do not depend on the dispatch layer code. These functions
can be used by all dispatch layers which use the internal netCDF
@ -304,25 +293,32 @@ enhanced data model.
- NC4_inq_user_type
- NC4_inq_typeid
\subsubsection NCDEFAULT get/put Functions
\subsubsection ncdefault_functions NCDEFAULT get/put Functions
The strided (vars) and mapped (varm) get/put functions have been
The mapped (varm) get/put functions have been
implemented in terms of the array (vara) functions. So dispatch layers
need only implement the vara functions, and can use the following
functions to get the vars and varm functions:
functions to get the and varm functions:
- NCDEFAULT_get_vars
- NCDEFAULT_put_vars
- NCDEFAULT_get_varm
- NCDEFAULT_put_varm
\subsubsection Read-Only Functions
For the netcdf-3 format, the strided functions (nc_get/put_vars)
are similarly implemented in terms of the vara functions. So the following
convenience functions are available.
- NCDEFAULT_get_vars
- NCDEFAULT_put_vars
For the netcdf-4 format, the vars functions actually exist, so
the default vars functions are not used.
\subsubsection read_only_functions Read-Only Functions
Some dispatch layers are read-only (ex. HDF4). Any function which
writes to a file, including nc_create(), needs to return error code
::NC_EPERM. The following read-only functions are available so that
these don't have to be re-implemented in each read-only dispatch
layer:
these don't have to be re-implemented in each read-only dispatch layer:
- NC_RO_create
- NC_RO_redef
@ -339,7 +335,7 @@ layer:
- NC_RO_put_vara
- NC_RO_def_var_fill
\subsubsection Classic NetCDF Only Functions
\subsubsection classic_functions Classic NetCDF Only Functions
There are two functions that are only used in the classic code. All
other dispatch layers (except pnetcdf) return error ::NC_ENOTNC3 for
@ -349,7 +345,7 @@ purpose:
- NOTNC3_inq_base_pe
- NOTNC3_set_base_pe
\section HDF4 Dispatch Layer as a Simple Example
\section dispatch_layer HDF4 Dispatch Layer as a Simple Example
The HDF4 dispatch layer is about the simplest possible dispatch
layer. It is read-only, classic model. It will serve as a nice, simple
@ -358,21 +354,28 @@ example of a dispatch layer.
Note that the HDF4 layer is optional in the netCDF build. Not all
users will have HDF4 installed, and those users will not build with
the HDF4 dispatch layer enabled. For this reason HDF4 code is guarded
with #ifdef(USE_HDF4). Code in libhdf4 is only compiled if HDF4 is
as follows.
\code
\c \#ifdef USE_HDF4
...
\c \#endif /*USE_HDF4*/
\endcode
Code in libhdf4 is only compiled if HDF4 is
turned on in the build.
\subsection Header File Changes in include Directory
\subsection header_changes Header File Changes in include Directory
\subsubsection The netcdf.h File
\subsubsection netcdf_h_file The netcdf.h File
In the main netcdf.h file, we have the following:
\code
#define NC_FORMATX_NC_HDF4 (3) /**< netCDF-4 subset of HDF4 */
#define NC_FORMAT_NC_HDF4 NC_FORMATX_NC_HDF4 /**< \deprecated As of 4.4.0, use NC_FORMATX_NC_HDF4 */
\c \#define NC_FORMATX_NC_HDF4 (3)
\c \#define NC_FORMAT_NC_HDF4 NC_FORMATX_NC_HDF4
\endcode
\subsubsection The ncdispatch.h File
\subsubsection ncdispatch_h_file The ncdispatch.h File
In ncdispatch.h we have the following:
@ -384,25 +387,24 @@ extern int HDF4_finalize(void);
#endif
\endcode
\subsubsection The netcdf_meta.h File
\subsubsection netcdf_meta_h_file The netcdf_meta.h File
The netcdf_meta.h file allows for easy determination of what features
are in use. It contains the following, set by configure:
\code
#define NC_HAS_HDF4 1 /*!< hdf4 support. */
\c \#define NC_HAS_HDF4 1 /*!< hdf4 support. */
\endcode
\subsubsection The hdf4dispatch.h File
\subsubsection hdf4dispatch_h_file The hdf4dispatch.h File
We also have the file hdf4dispatch.h, which contains prototypes and
The file _hdf4dispatch.h_ contains prototypes and
macro definitions used within the HDF4 code in libhdf4. This include
file should not be used anywhere except in libhdf4.
\subsection Initialization Code Changes in liblib Directory
In the file nc_initialize.c we have the following:
\subsection liblib_init Initialization Code Changes in liblib Directory
The file _nc_initialize.c_ is modified to include the following:
\code
#ifdef USE_HDF4
extern int HDF4_initialize(void);
@ -410,18 +412,18 @@ extern int HDF4_finalize(void);
#endif
\endcode
\subsection Dispatch Code Changes in libdispatch Directory
\subsection libdispatch_changes Dispatch Code Changes in libdispatch Directory
\subsubsection Changes to dfile.c
\subsubsection dfile_c_changes Changes to dfile.c
In order for a dispatch layer to be used, it must be correctly
determined in functions NC_open() or NC_create() in dfile.c.
determined in functions _NC_open()_ or _NC_create()_ in _libdispatch/dfile.c_.
HDF4 has a magic number that is detected in
NC_interpret_magic_number(), which allows NC_open to automatically
detect a HDF4 file. Other dispatch layers may need to set a mode flag.
_NC_interpret_magic_number()_, which allows _NC_open_ to automatically
detect an HDF4 file.
Once HDF4 is detected, model variable is set to NC_FORMATX_NC_HDF4,
Once HDF4 is detected, the _model_ variable is set to _NC_FORMATX_NC_HDF4_,
and later this is used in a case statement:
\code
@ -433,13 +435,12 @@ and later this is used in a case statement:
This sets the dispatcher to the HDF4 dispatcher, which is defined in
the libhdf4 directory.
\subsection Dispatch Code in libhdf4
\subsection libhdf4_dispatch_code Dispatch Code in libhdf4
\subsubsection Dispatch Table in hdf4dispatch.c
\subsubsection hdf4dispatch_c_table Dispatch Table in hdf4dispatch.c
The file hdf4dispatch.c contains the definition of the HDF4 dispatch
The file _hdf4dispatch.c_ contains the definition of the HDF4 dispatch
table. It looks like this:
\code
/* This is the dispatch object that holds pointers to all the
* functions that make up the HDF4 dispatch interface. */
@ -472,13 +473,13 @@ the HDF4 dispatch layer. There are 6 such functions:
- NC_HDF4_inq_format_extended
- NC_HDF4_get_vara
\subsubsection HDF4 Reading Code
\subsubsection hdf4_reading_code HDF4 Reading Code
The code in hdf4file.c opens the HDF4 SD dataset, and reads the
The code in _hdf4file.c_ opens the HDF4 SD dataset, and reads the
metadata. This metadata is stored in the netCDF internal metadata
model, allowing the inq functions to work.
The code in hdf4var.c does an nc_get_vara() on the HDF4 SD
The code in _hdf4var.c_ does an _nc_get_vara()_ on the HDF4 SD
dataset. This is all that is needed for all the nc_get_* functions to
work.

189
docs/testserver.dox Executable file
View File

@ -0,0 +1,189 @@
/**
@if INTERNAL
@page remotesvc Remote Test Server Setup
\tableofcontents
<!-- Note that this file has the .dox extension, but is mostly markdown -->
<!-- Begin MarkDown -->
# Introduction {#remotesvc_intro}
The test suite for the DAP2 and DAP4 (aka DAP) protocols optionally
uses a continuously running test server to verify that the
DAP protocols can successfully be used against a remote server such
as a Unidata Thredds server or a OPeNDAP Hyrax server.
The way that this is tests is to establish two servlets running
under a Tomcat server on some machine with a known IP address
or DNS name. For example, accessing this URL.
````
http://149.165.169.123:8080/dts/
````
will bring up a page with a variety of DAP2 accessible files.
These files are accessed by the netcdf-c test case
_netcdf-c/ncdap_test/tst_remote3.sh_
to verify that the whole
transmission and conversions process worked correctly.
Similarly, the DAP4 test case is
_netcdf-c/ncdap_test/test_remote.sh_
and it accesses the test server web page with this URL.
````
http://149.165.169.123:8080/d4ts/
````
Setting up and Maintaining the test server involves a somewhat complex
set of tasks. This document shows how to do those tasks.
# Tomcat Setup {#remotesvc_tomcat}
The initial task is to setup a Tomcat server on some machine.
Currently the remote server is operating on Jetstream on an Ubuntu Linux
environment.
Assuming you do not already have Tomcat setup, the necessary steps are
as follows.
1. Login to your machine - let us call it "testserver.unidata.edu".
2. Use the package manager to install two packages using commands like this.
````
sudo apt-get tomcat7
sudo apt-get tomcat7-admin
````
Note that this installs Tomcat version 7. Later versions should also work.
3. Goto the tomcat7 installation, probably ''/usr/share/tomcat7'',
and modify the configuration as described in the next section.
4. Startup tomcat using the following commands.
````
cd /usr/share/tomcat7/bin
./startup.sh
````
It is possible that the startup command may need to be executed with ''sudo''.
5. Verify that tomcat is running by trying this URL in your web browser.
````
http:testserver.unidata.edu:8080
````
The port -- 8080 -- is the default for an installed tomcat. It may be that
you will need to change it as part of the configuration process.
6. Try the administration web page
````
http:testserver.unidata.edu:8080/manager
````
This should pop-up a user+password dialog box. You should fill it in
with the values you choose during configuration.
The resulting final page will be the one you use to get the dts and d4ts
servlets operating.
7. Use the commands
````
cd /usr/share/tomcat7/bin
./shutdown.sh
````
to stop the Tomcat server.
# Configuring Tomcat {#remotesvc_cfg}
In the primary Tomcat directory -- probably /usr/share/tomcat7 --
there should be a ''conf'' directory, and in that directory,
you will need to edit the file ''tomcat-users.xml''.
## Configuring tomcat-users.xml {#remotesvc_tomcat_users}
The goal here is primarily to setup a user that can login to the
Tomcat admin page to upload the dts and d4ts servlets.
Let us assume that the username we want to use is ''dapadmin''
with password ''dap''.
Edit the ''tomcat-users.xml'' file.
You will probably have to use ''sudo'' to access the file.
Insert the following near the end of the file.
````
<role rolename="manager-gui"/>
<user username="dapadmin" password="dap" roles="admin,manager-gui"/>
````
## Configuring server.xml {#remotesvc_config_server_xml}
As a rule, the default settings in ''server.xml'' will work
and provide insecure access to the Tomcat server via port 8080.
This is ok if the test server is running nothing but the d4ts and dts
test servlets. Otherwise, you should investigate how to
get the Tomcat server to use <em>https:</em>
# Loading dts.war and d4ts.war {#remotesvc_load_war}
The next step is to actually upload the servlet files for the DAP
test servlets. We assume that you have on your local disk two files:
''dts.war'' and ''d4ts.war''. Building those servlet war files is
described in a separate section.
The specific tasks are as follows:
1. Open the web page
````
http:testserver.unidata.edu:8080/manager
````
Scroll down to the block labelled ''WAR file to deploy'',
which should have two buttons: ''Browse'' and ''Deploy''.
2. Clicking on the browse button should bring up a file dialog box.
Use the dialog box to choose ''dts.war''.
3. After choosing that file, click the ''Deploy'' button.
After some delay, this should add
a new entry in the first column near the top of the page.
That entry should be named ''/dts''.
4. If you click the ''/dts'' in the first column, then it should take you
to the DAP2 test server main page.
5. Repeat to load ''d4ts.war'' as ''/d4ts''.
# Testing the Test Server {#remotesvc_testing}
You can now test the test server by configuring and building
the netcdf-c library. In order to force the use
of the newly started test server you can either:
1. Add the option
````
--with-testservers=testserver.unidata.edu
````
This will override the defaults.
2. Edit configure.ac and find the string "svclist".
Replace the list with "testserver.unidata.edu".
This will make the new test server be the default.
After building the library, go into the directory ''ncdap_test''
and do a ''make check'' command. This should succeed.
Similarly enter the directory ''dap4_test'' and do a make check
to verify that the d4ts test server is working.
# Building d4ts.war and dts.war {#remotesvc_buildservlets}
In order to build the servlet (.war) files, you will need to
clone the Thredds directory on github: https://github.com/Unidata/thredds.
Once you have a clone, you will need to enter the thredds directory
and build by invoking gradle using this command.
````
./gradlew --info --no-daemon clean assemble
````
If successful, the following two files should exist
(the names may vary slightly).
1. ./dap4/d4ts/build/libs/d4ts-5.0.0-SNAPSHOT.war
2. ./opendap/dtswar/build/libs/dtswar-5.0.0-SNAPSHOT.war
Copy the two files to some more accessible place and rename them
to ''d4ts.war'' and ''dts.war'' respectively.
Use those files to upload the servlets to your Tomcat server
as described above.
# Point of Contact {#remotesvc_poc}
__Author__: Dennis Heimbigner<br>
__Email__: dmh at ucar dot edu<br>
__Initial Version__: 6/26/2018<br>
__Last Revised__: 6/28/2018
<!-- End MarkDown -->
@endif
*/

View File

@ -12,6 +12,11 @@ User-defined formats allow users to write their own adaptors for the
netCDF C library, so that it can read and (optionally) write a
proprietary format through the netCDF API.
This capability is currently experimental. It involves the exposing of internal
netcdf interfaces and data structures that were previously invisible to users.
This means that it is unstable and the exposed interfaces are subject to change.
Use with caution.
User-defined format code is packaged into a separate library, the
user-defined format dispatch library. This library, when linked with
the netCDF library, will allow user programs to read their proprietary
@ -71,7 +76,7 @@ add the location of the user-defined format dispatch library include
file to the CPPFLAGS, and the location of the user-defined format
dispatch library in LDFLAGS.
Configure netcdf-c with the option --with-udf0=<udf_lib_name>.
Configure netcdf-c with the option ````--with-udf0=<udf_lib_name>````.
If a magic number is associated with the user-defined format, it can
be specified with the --with-udf0-magic-number= argument.

View File

@ -512,10 +512,10 @@ EXTERNL const char *
nc_strerror(int ncerr);
/* Set up user-defined format. */
typedef struct NC_Dispatch NC_Dispatch;
typedef struct NC_Dispatch NC_Dispatch;
EXTERNL int
nc_def_user_format(int mode_flag, NC_Dispatch *dispatch_table, char *magic_number);
EXTERNL int
nc_inq_user_format(int mode_flag, NC_Dispatch **dispatch_table, char *magic_number);
@ -1960,12 +1960,26 @@ ncrecget(int ncid, long recnum, void **datap);
EXTERNL int
ncrecput(int ncid, long recnum, void *const *datap);
EXTERNL int nc_finalize();
EXTERNL int nc_finalize(void);
#if defined(__cplusplus)
}
#endif
/* Define two hard-coded functionality-related
(as requested by community developers) macros.
This is not going to be standard practice.
Don't remove without an in-place replacement of some sort,
the are now (for better or worse) used by downstream
software external to Unidata. */
#ifndef NC_HAVE_RENAME_GRP
#define NC_HAVE_RENAME_GRP /*!< rename_grp() support. */
#endif
#ifndef NC_HAVE_INQ_FORMAT_EXTENDED
#define NC_HAVE_INQ_FORMAT_EXTENDED /*!< inq_format_extended() support. */
#endif
#define NC_HAVE_META_H
#endif /* _NETCDF_ */

View File

@ -2042,8 +2042,8 @@ NC_create(const char *path0, int cmode, size_t initialsz,
if ((stat = check_create_mode(cmode)))
return stat;
/* Initialize the dispatch table. The function pointers in the
* dispatch table will depend on how netCDF was built
/* Initialize the library. The available dispatch tables
* will depend on how netCDF was built
* (with/without netCDF-4, DAP, CDMREMOTE). */
if(!NC_initialized)
{

View File

@ -2501,6 +2501,7 @@ exit:
* classic model is in use.
*
* @param root_grp pointer to the group info for the root group of the
* @param is_classic store 1 if this is a classic file.
* file.
*
* @return NC_NOERR No error.

View File

@ -1603,6 +1603,8 @@ nc4_normalize_name(const char *name, char *norm_name)
* show only errors, and to higher numbers to show more and more
* logging details. If logging is not enabled with --enable-logging at
* configure when building netCDF, this function will do nothing.
* Note that it is possible to set the log level using the environment
* variable named _NETCDF_LOG_LEVEL_ (e.g. _export NETCDF_LOG_LEVEL=4_).
*
* @param new_level The new logging level.
*

View File

@ -94,4 +94,11 @@ IF(TEST_PARALLEL4)
add_sh_test(nc_test4 run_par_test)
ENDIF()
IF(ENABLE_METADATA_PERF)
build_bin_test(bigmeta)
build_bin_test(openbigmeta)
build_bin_test(tst_attsperf)
add_sh_test(nc_test4 perftest)
ENDIF()
ADD_EXTRA_DIST(findplugin.in)