Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Quincey Koziol 2014-11-07 08:17:35 -06:00
commit c6716f3f82
365 changed files with 7522 additions and 10672 deletions

3
.gitignore vendored
View File

@ -1,3 +1,6 @@
### 'Normal' gitignore files.
ctest.c
ctest64.c
debug.txt
CTestConfig.cmake
Vagrantfile

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,7 @@
set(DART_TESTING_TIMEOUT "4800")
set(CTEST_PROJECT_NAME "@NC_CTEST_PROJECT_NAME@")
set(CTEST_NIGHTLY_START_TIME "00:00:00 EST")
set(CTEST_NIGHTLY_START_TIME "02:00:00 EDT")
set(CTEST_DROP_METHOD "http")
set(CTEST_DROP_SITE "@NC_CTEST_DROP_SITE@")

1934
Doxyfile

File diff suppressed because it is too large Load Diff

216
man4/Doxyfile.guide.in → Doxyfile.developer Normal file → Executable file
View File

@ -1,4 +1,4 @@
# Doxyfile 1.8.3, mixed with 1.8.6
# Doxyfile 1.8.6
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
@ -32,26 +32,26 @@ DOXYFILE_ENCODING = UTF-8
# title of most generated pages and in a few other places.
# The default value is: My Project.
PROJECT_NAME = "The NetCDF User's Guide"
PROJECT_NAME = netCDF-C
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER = @PACKAGE_VERSION@
PROJECT_NUMBER = 4.3.3-rc2
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF =
PROJECT_BRIEF = "The netCDF C libraries"
# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
# the documentation. The maximum height of the logo should not exceed 55 pixels
# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
# to the output directory.
PROJECT_LOGO = ../../man4/netcdf-50x50.png
PROJECT_LOGO = ./docs/unidata_logo_cmyk.png
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
@ -110,14 +110,24 @@ REPEAT_BRIEF = YES
# the entity):The $name class, The $name widget, The $name file, is, provides,
# specifies, contains, represents, a, an and the.
ABBREVIATE_BRIEF =
ABBREVIATE_BRIEF = "The $name class" \
"The $name widget" \
"The $name file" \
is \
provides \
specifies \
contains \
represents \
a \
an \
the
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
# doxygen will generate a detailed section even if there is only a brief
# description.
# The default value is: NO.
ALWAYS_DETAILED_SEC = NO
ALWAYS_DETAILED_SEC = YES
# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
# inherited members of a class in the documentation of that class as if those
@ -125,7 +135,7 @@ ALWAYS_DETAILED_SEC = NO
# operators of the base classes will not be shown.
# The default value is: NO.
INLINE_INHERITED_MEMB = NO
INLINE_INHERITED_MEMB = YES
# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
# before files name in the file list and in the header files. If set to NO the
@ -189,7 +199,7 @@ QT_AUTOBRIEF = YES
# not recognized any more.
# The default value is: NO.
MULTILINE_CPP_IS_BRIEF = NO
MULTILINE_CPP_IS_BRIEF = YES
# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
# documentation from any documented member that it re-implements.
@ -298,7 +308,7 @@ AUTOLINK_SUPPORT = YES
# diagrams that involve STL classes more complete and accurate.
# The default value is: NO.
BUILTIN_STL_SUPPORT = NO
BUILTIN_STL_SUPPORT = YES
# If you use Microsoft's C++/CLI language, you should set this option to YES to
# enable parsing support.
@ -330,7 +340,7 @@ IDL_PROPERTY_SUPPORT = YES
# all members of a group must be documented explicitly.
# The default value is: NO.
DISTRIBUTE_GROUP_DOC = YES
DISTRIBUTE_GROUP_DOC = NO
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
@ -398,13 +408,13 @@ LOOKUP_CACHE_SIZE = 0
# normally produced when WARNINGS is set to YES.
# The default value is: NO.
EXTRACT_ALL = NO
EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = NO
EXTRACT_PRIVATE = YES
# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
# scope will be included in the documentation.
@ -416,7 +426,7 @@ EXTRACT_PACKAGE = NO
# included in the documentation.
# The default value is: NO.
EXTRACT_STATIC = NO
EXTRACT_STATIC = YES
# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
# locally in source files will be included in the documentation. If set to NO
@ -441,7 +451,7 @@ EXTRACT_LOCAL_METHODS = NO
# are hidden.
# The default value is: NO.
EXTRACT_ANON_NSPACES = NO
EXTRACT_ANON_NSPACES = YES
# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
# undocumented members inside documented classes or files. If set to NO these
@ -478,7 +488,7 @@ HIDE_IN_BODY_DOCS = NO
# will be excluded. Set it to YES to include the internal documentation.
# The default value is: NO.
INTERNAL_DOCS = @BUILD_INTERNAL_DOCS@
INTERNAL_DOCS = YES
# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
# names in lower-case letters. If set to YES upper-case letters are also
@ -487,7 +497,7 @@ INTERNAL_DOCS = @BUILD_INTERNAL_DOCS@
# and Mac users are advised to set this option to NO.
# The default value is: system dependent.
CASE_SENSE_NAMES = YES
CASE_SENSE_NAMES = NO
# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
# their full class and namespace scopes in the documentation. If set to YES the
@ -513,7 +523,7 @@ SHOW_GROUPED_MEMB_INC = NO
# files with double quotes in the documentation rather than with sharp brackets.
# The default value is: NO.
FORCE_LOCAL_INCLUDES = NO
FORCE_LOCAL_INCLUDES = YES
# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
# documentation for inline members.
@ -534,7 +544,7 @@ SORT_MEMBER_DOCS = YES
# this will also influence the order of the classes in the class list.
# The default value is: NO.
SORT_BRIEF_DOCS = NO
SORT_BRIEF_DOCS = YES
# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
# (brief and detailed) documentation of class members so that constructors and
@ -546,14 +556,14 @@ SORT_BRIEF_DOCS = NO
# detailed member documentation.
# The default value is: NO.
SORT_MEMBERS_CTORS_1ST = NO
SORT_MEMBERS_CTORS_1ST = YES
# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
# of group names into alphabetical order. If set to NO the group names will
# appear in their defined order.
# The default value is: NO.
SORT_GROUP_NAMES = NO
SORT_GROUP_NAMES = YES
# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
# fully-qualified names, including namespaces. If set to NO, the class list will
@ -580,7 +590,7 @@ STRICT_PROTO_MATCHING = NO
# documentation.
# The default value is: YES.
GENERATE_TODOLIST = @SHOW_DOXYGEN_TODO_LIST@
GENERATE_TODOLIST = YES
# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
# test list. This list is created by putting \test commands in the
@ -617,7 +627,7 @@ ENABLED_SECTIONS =
# documentation regardless of this setting.
# Minimum value: 0, maximum value: 10000, default value: 30.
MAX_INITIALIZER_LINES = 30
MAX_INITIALIZER_LINES = 28
# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
# the bottom of the documentation of classes and structs. If set to YES the list
@ -661,7 +671,7 @@ FILE_VERSION_FILTER =
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
# tag is left empty.
LAYOUT_FILE = @abs_top_srcdir@/man4/DoxygenLayout.xml
LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
@ -683,7 +693,7 @@ CITE_BIB_FILES =
# messages are off.
# The default value is: NO.
QUIET = YES
QUIET = NO
# The WARNINGS tag can be used to turn on/off the warning messages that are
# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
@ -699,7 +709,7 @@ WARNINGS = YES
# will automatically be disabled.
# The default value is: YES.
WARN_IF_UNDOCUMENTED = NO
WARN_IF_UNDOCUMENTED = YES
# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
# potential errors in the documentation, such as not documenting some parameters
@ -715,7 +725,7 @@ WARN_IF_DOC_ERROR = YES
# documentation, but not about the absence of documentation.
# The default value is: NO.
WARN_NO_PARAMDOC = YES
WARN_NO_PARAMDOC = NO
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
@ -743,7 +753,7 @@ WARN_LOGFILE =
# spaces.
# Note: If this tag is empty the current directory is searched.
INPUT = @abs_top_srcdir@/man4/guide.dox
INPUT =
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
@ -763,13 +773,45 @@ INPUT_ENCODING = UTF-8
# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
# *.qsf, *.as and *.js.
FILE_PATTERNS =
FILE_PATTERNS = *.c \
*.cc \
*.cxx \
*.cpp \
*.c++ \
*.d \
*.java \
*.ii \
*.ixx \
*.ipp \
*.i++ \
*.inl \
*.h \
*.hh \
*.hxx \
*.hpp \
*.h++ \
*.idl \
*.odl \
*.cs \
*.php \
*.php3 \
*.inc \
*.m \
*.mm \
*.dox \
*.py \
*.f90 \
*.f \
*.vhd \
*.vhdl \
*.md \
*.doc
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = NO
RECURSIVE = YES
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
@ -785,7 +827,7 @@ EXCLUDE =
# from the input.
# The default value is: NO.
EXCLUDE_SYMLINKS = NO
EXCLUDE_SYMLINKS = YES
# If the value of the INPUT tag contains directories, you can use the
# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
@ -811,14 +853,14 @@ EXCLUDE_SYMBOLS =
# that contain example code fragments that are included (see the \include
# command).
EXAMPLE_PATH =
EXAMPLE_PATH =
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
EXAMPLE_PATTERNS =
EXAMPLE_PATTERNS = *
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude commands
@ -831,19 +873,7 @@ EXAMPLE_RECURSIVE = NO
# that contain images that are to be included in the documentation (see the
# \image command).
IMAGE_PATH = @abs_top_srcdir@/man4/images/chunking2.png \
@abs_top_srcdir@/man4/images/compatibility3.png \
@abs_top_srcdir@/man4/images/compression.png \
@abs_top_srcdir@/man4/images/groups.png \
@abs_top_srcdir@/man4/images/nc4-model.png \
@abs_top_srcdir@/man4/images/ncatts.png \
@abs_top_srcdir@/man4/images/nc-classic-uml.png \
@abs_top_srcdir@/man4/images/nccoords.png \
@abs_top_srcdir@/man4/images/ncfile.png \
@abs_top_srcdir@/man4/images/netcdf_architecture.png \
@abs_top_srcdir@/man4/images/pnetcdf.png \
@abs_top_srcdir@/man4/images/deptree.jpg \
@abs_top_srcdir@/man4/images/InstallTreeWindows.jpg
IMAGE_PATH =
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
@ -910,26 +940,26 @@ SOURCE_BROWSER = YES
# classes and enums directly into the documentation.
# The default value is: NO.
INLINE_SOURCES = NO
INLINE_SOURCES = YES
# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
# special comment blocks from generated source code fragments. Normal C, C++ and
# Fortran comments will always remain visible.
# The default value is: YES.
STRIP_CODE_COMMENTS = YES
STRIP_CODE_COMMENTS = NO
# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
# function all documented functions referencing it will be listed.
# The default value is: NO.
REFERENCED_BY_RELATION = NO
REFERENCED_BY_RELATION = YES
# If the REFERENCES_RELATION tag is set to YES then for each documented function
# all documented entities called/used by that function will be listed.
# The default value is: NO.
REFERENCES_RELATION = NO
REFERENCES_RELATION = YES
# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
@ -988,7 +1018,7 @@ VERBATIM_HEADERS = YES
# compiled with the --with-libclang option.
# The default value is: NO.
# CLANG_ASSISTED_PARSING = NO
CLANG_ASSISTED_PARSING = NO
# If clang assisted parsing is enabled you can provide the compiler with command
# line options that you would normally use when invoking the compiler. Note that
@ -996,7 +1026,7 @@ VERBATIM_HEADERS = YES
# specified with INPUT and INCLUDE_PATH.
# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
# CLANG_OPTIONS =
CLANG_OPTIONS =
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
@ -1007,7 +1037,7 @@ VERBATIM_HEADERS = YES
# classes, structs, unions or interfaces.
# The default value is: YES.
ALPHABETICAL_INDEX = NO
ALPHABETICAL_INDEX = YES
# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
# which the alphabetical index list will be split.
@ -1039,7 +1069,7 @@ GENERATE_HTML = YES
# The default directory is: html.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_OUTPUT = html/html_guide
HTML_OUTPUT = html
# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
# generated HTML page (for example: .htm, .php, .asp).
@ -1076,7 +1106,7 @@ HTML_HEADER =
# that doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FOOTER = @abs_top_srcdir@/man4/footer.html
HTML_FOOTER =
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
@ -1101,12 +1131,6 @@ HTML_STYLESHEET =
HTML_EXTRA_STYLESHEET =
# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
# files or namespaces will be aligned in HTML using tables. If set to
# NO a bullet list will be used.
# HTML_ALIGN_MEMBERS = YES
# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the HTML output directory. Note
# that these files will be copied to the base HTML output directory. Use the
@ -1126,7 +1150,7 @@ HTML_EXTRA_FILES =
# Minimum value: 0, maximum value: 359, default value: 220.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_HUE = 220
HTML_COLORSTYLE_HUE = 349
# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
# in the HTML output. For a value of 0 the output will use grayscales only. A
@ -1134,7 +1158,7 @@ HTML_COLORSTYLE_HUE = 220
# Minimum value: 0, maximum value: 255, default value: 100.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_COLORSTYLE_SAT = 100
HTML_COLORSTYLE_SAT = 91
# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
# luminance component of the colors in the HTML output. Values below 100
@ -1153,7 +1177,7 @@ HTML_COLORSTYLE_GAMMA = 80
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = NO
HTML_TIMESTAMP = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
@ -1161,7 +1185,7 @@ HTML_TIMESTAMP = NO
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_SECTIONS = NO
HTML_DYNAMIC_SECTIONS = YES
# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
# shown in the various tree structured indices initially; the user can expand
@ -1375,12 +1399,19 @@ ECLIPSE_DOC_ID = org.doxygen.Project
DISABLE_INDEX = NO
# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
# structure should be generated to display hierarchical information.
# If the tag value is set to YES, a side panel will be generated
# containing a tree-like index structure (just like the one that
# is generated for HTML Help). For this to work a browser that supports
# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
# Windows users are probably better off using the HTML help feature.
# structure should be generated to display hierarchical information. If the tag
# value is set to YES, a side panel will be generated containing a tree-like
# index structure (just like the one that is generated for HTML Help). For this
# to work a browser that supports JavaScript, DHTML, CSS and frames is required
# (i.e. any modern browser). Windows users are probably better off using the
# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
# further fine-tune the look of the index. As an example, the default style
# sheet generated by doxygen has an example that shows how to put an image at
# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
# the same information as the tab index, you could consider setting
# DISABLE_INDEX to YES when enabling this option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
GENERATE_TREEVIEW = YES
@ -1698,7 +1729,7 @@ LATEX_HIDE_INDICES = NO
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_SOURCE_CODE = NO
LATEX_SOURCE_CODE = YES
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
@ -1772,7 +1803,7 @@ RTF_EXTENSIONS_FILE =
# classes and files.
# The default value is: NO.
GENERATE_MAN = YES
GENERATE_MAN = NO
# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
@ -1799,7 +1830,7 @@ MAN_EXTENSION = .3
# The default value is: NO.
# This tag requires that the tag GENERATE_MAN is set to YES.
MAN_LINKS = YES
MAN_LINKS = NO
#---------------------------------------------------------------------------
# Configuration options related to the XML output
@ -1965,7 +1996,7 @@ INCLUDE_FILE_PATTERNS =
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED = USE_NETCDF4
PREDEFINED =
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
@ -2003,14 +2034,13 @@ SKIP_FUNCTION_MACROS = YES
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
TAGFILES = @CMAKE_CURRENT_BINARY_DIR@/main.tag=../ "@CMAKE_CURRENT_BINARY_DIR@/tutorial.tag = ../html_tutorial"
TAGFILES =
# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
# tag file that is based on the input files it reads. See section "Linking to
# external documentation" for more information about the usage of tag files.
GENERATE_TAGFILE = @CMAKE_CURRENT_BINARY_DIR@/guide.tag
GENERATE_TAGFILE =
# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
# class index. If set to NO only the inherited external classes will be listed.
@ -2023,14 +2053,14 @@ ALLEXTERNALS = NO
# listed.
# The default value is: YES.
EXTERNAL_GROUPS = NO
EXTERNAL_GROUPS = YES
# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in
# the related pages index. If set to NO, only the current project's pages will
# be listed.
# The default value is: YES.
EXTERNAL_PAGES = NO
EXTERNAL_PAGES = YES
# The PERL_PATH should be the absolute path and name of the perl script
# interpreter (i.e. the result of 'which perl').
@ -2071,7 +2101,7 @@ DIA_PATH =
# and usage relations if the target is undocumented or is not a class.
# The default value is: YES.
HIDE_UNDOC_RELATIONS = YES
HIDE_UNDOC_RELATIONS = NO
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz (see:
@ -2080,7 +2110,7 @@ HIDE_UNDOC_RELATIONS = YES
# set to NO
# The default value is: NO.
HAVE_DOT = @HAVE_DOT@
HAVE_DOT = YES
# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
# to run in parallel. When set to 0 doxygen will base this on the number of
@ -2090,7 +2120,7 @@ HAVE_DOT = @HAVE_DOT@
# Minimum value: 0, maximum value: 32, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_NUM_THREADS = 0
DOT_NUM_THREADS = 4
# When you want a differently looking font n the dot files that doxygen
# generates you can specify the font name using DOT_FONTNAME. You need to make
@ -2100,14 +2130,14 @@ DOT_NUM_THREADS = 0
# The default value is: Helvetica.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTNAME = Helvetica
DOT_FONTNAME = FreeSans.ttf
# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
# dot graphs.
# Minimum value: 4, maximum value: 24, default value: 10.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTSIZE = 10
DOT_FONTSIZE = 8
# By default doxygen will tell dot to use the default font as specified with
# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
@ -2146,7 +2176,7 @@ GROUP_GRAPHS = YES
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
UML_LOOK = NO
UML_LOOK = YES
# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
# class node. If there are many fields or methods and many nodes the graph may
@ -2167,7 +2197,7 @@ UML_LIMIT_NUM_FIELDS = 10
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
TEMPLATE_RELATIONS = NO
TEMPLATE_RELATIONS = YES
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then doxygen will generate a graph for each documented file showing the
@ -2207,7 +2237,7 @@ CALL_GRAPH = YES
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
CALLER_GRAPH = NO
CALLER_GRAPH = YES
# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
# hierarchy of all classes instead of a textual one.
@ -2234,7 +2264,7 @@ DIRECTORY_GRAPH = YES
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_IMAGE_FORMAT = png
DOT_IMAGE_FORMAT = svg
# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
# enable generation of interactive SVG images that allow zooming and panning.
@ -2246,7 +2276,7 @@ DOT_IMAGE_FORMAT = png
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
INTERACTIVE_SVG = NO
INTERACTIVE_SVG = YES
# The DOT_PATH tag can be used to specify the path where the dot tool can be
# found. If left blank, it is assumed the dot tool can be found in the path.
@ -2295,7 +2325,7 @@ DOT_GRAPH_MAX_NODES = 50
# Minimum value: 0, maximum value: 1000, default value: 0.
# This tag requires that the tag HAVE_DOT is set to YES.
MAX_DOT_GRAPH_DEPTH = 0
MAX_DOT_GRAPH_DEPTH = 5
# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
# background. This is disabled by default, because dot on Windows does not seem
@ -2307,7 +2337,7 @@ MAX_DOT_GRAPH_DEPTH = 0
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_TRANSPARENT = NO
DOT_TRANSPARENT = YES
# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
# files in one run (i.e. multiple -o and -T options on the command line). This
@ -2316,7 +2346,7 @@ DOT_TRANSPARENT = NO
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_MULTI_TARGETS = NO
DOT_MULTI_TARGETS = YES
# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
# explaining the meaning of the various boxes and arrows in the dot generated

View File

@ -10,12 +10,11 @@ ACLOCAL_AMFLAGS = -I m4
# These files get added to the distribution.
EXTRA_DIST = README.md COPYRIGHT INSTALL INSTALL.cmake test_prog.c \
lib_flags.am cmake CMakeLists.txt COMPILE.cmake.txt config.h.in.cmake \
config.h.in.cmake cmake_uninstall.cmake.in \
netcdf-config-version.cmake.in \
netcdf-config.cmake.in FixBundle.cmake.in \
nc-config.in.cmake RELEASE_NOTES.md CTestCustom.cmake \
CTestConfig.cmake.in
lib_flags.am cmake CMakeLists.txt COMPILE.cmake.txt \
config.h.cmake.in cmake_uninstall.cmake.in \
FixBundle.cmake.in \
nc-config.cmake.in RELEASE_NOTES.md CTestCustom.cmake \
CTestConfig.cmake.in libnetcdf.settings.in netCDFConfig.cmake.in
# Doxygen doesn't build nicely in vpath builds.
# Don't do this; it wipes out any exported values
@ -25,9 +24,9 @@ pkgconfigdir=$(libdir)/pkgconfig
pkgconfig_DATA = netcdf.pc
# We need a way to automatically generate INSTALL from
# man4/install.doc, now that we've switched from texinfo to doxygen.
# docs/install.doc, now that we've switched from texinfo to doxygen.
# INSTALL:
# non-existent-doxygen-to-text-utility < man4/install.doc > INSTALL
# non-existent-doxygen-to-text-utility < docs/install.doc > INSTALL
# Does the user want to build the V2 API?
if BUILD_V2
@ -84,16 +83,16 @@ endif
# This is the list of subdirs for which Makefiles will be constructed
# and run. ncgen must come before ncdump, because their tests
# depend on it.
# depend on it.
SUBDIRS = include $(OCLIB) $(H5_TEST_DIR) libdispatch libsrc \
$(LIBSRC4_DIR) $(DAP2) $(LIBCDMR) $(LIBSRC5) liblib \
$(NCGEN3) $(NCGEN) $(NCDUMP) \
$(TESTDIRS) \
man4 $(EXAMPLES) \
docs $(EXAMPLES) \
$(UDUNITS) $(LIBCF)
# Remove these generated files, for a distclean.
DISTCLEANFILES = VERSION comps.txt test_prog
DISTCLEANFILES = VERSION comps.txt test_prog libnetcdf.settings
# The nc-config script helps the user build programs with netCDF.
bin_SCRIPTS = nc-config
@ -101,6 +100,7 @@ bin_SCRIPTS = nc-config
# What needs to go in the binrary dist?
BINFILES = README_BINARIES.txt
BINFILES += include/netcdf.h share/man/man3/netcdf.3 lib/libnetcdf.a
BINFILES += libnetcdf.settings
ZIPBINFILES = ${prefix}/include/netcdf.h ${prefix}/share/man/man3/netcdf.3 ${prefix}/lib/libnetcdf.a
if BUILD_UTILITIES
@ -117,6 +117,10 @@ ZIPBINFILES += ${prefix}/bin/libnetcdf-7.dll ${prefix}/lib/libnetcdf.dll.a ${pre
${prefix}/lib/libnetcdf.la ${prefix}/lib/netcdfdll.def
endif # BUILD_DLL
# install libnetcdf.settings in lib directory.
settingsdir=$(libdir)
settings_DATA=libnetcdf.settings
# At Unidata, package up binaries.
ftpbin: install
echo "Getting binaries from ${prefix}"
@ -137,7 +141,7 @@ check_nc_config:
install-data-hook:
if BUILD_DLL
cp liblib/netcdfdll.def ${prefix}/lib
cp liblib/netcdfdll.def $(DESTDIR)${prefix}/lib
endif # BUILD_DLL
@echo ''
@echo '+-------------------------------------------------------------+'
@ -165,7 +169,7 @@ endif # BUILD_DLL
@echo '| http://www.unidata.ucar.edu/software/netcdf/ |'
@echo '| |'
@echo '| NetCDF is developed and maintained at the Unidata Program |'
@echo '| Center. Unidata provides a broad array of data and software |'
@echo '| Center. Unidata provides a broad array of data and software |'
@echo '| tools for use in geoscience education and research. |'
@echo '| http://www.unidata.ucar.edu |'
@echo '+-------------------------------------------------------------+'

View File

@ -1,4 +1,10 @@
# Unidata NetCDF
Unidata NetCDF
==============
<a href="https://scan.coverity.com/projects/157">
<img alt="Coverity Scan Build Status"
src="https://scan.coverity.com/projects/157/badge.svg"/>
</a>
The Unidata network Common Data Form (netCDF) is an interface for
scientific data access and a freely-distributed software library that

View File

@ -1,13 +1,84 @@
Release Notes {#release_notes}
Release Notes {#RELEASE_NOTES}
===============================
\brief Release notes file for the netcdf-c package.
This file contains a high-level description of this package's evolution. Releases are in reverse chronological order (most recent first). Note that, as of netcdf 4.2, the netcdf-c++ and netcdf-fortran libraries have been separated into their own libraries.
This file contains a high-level description of this package's evolution. Releases are in reverse chronological order (most recent first). Note that, as of netcdf 4.2, the `netcdf-c++` and `netcdf-fortran` libraries have been separated into their own libraries.
## 4.3.3 Released TBD
### 4.3.3-rc1 Released TBD
### 4.3.3-rc3 Released ?
* The pnetcdf support was not properly being used to provide
mpi parallel io for netcdf-3 classic files. The wrong
dispatch table was being used.
[NCF-319](https://bugtracking.unidata.ucar.edu/browse/NCF-319)
* Fixed bug in ncgen. When classic format was in force (k=1 or k=4),
the "long" datatype should be treated as int32. Was returning an error.
[NCF-318](https://bugtracking.unidata.ucar.edu/browse/NCF-318)
* Fixed bug where if the netCDF-C library is built with the
HDF5 library but without the HDF4 library and one attempts
to open an HDF4 file, an abort occurs rather than returning
a proper error code (NC_ENOTNC). [NCF-317](https://bugtracking.unidata.ucar.edu/browse/NCF-317)
* Added a new option, `NC_EXTRA_DEPS`, for cmake-based builds. This is analogous to `LIBS` in autotools-based builds. Example usage:
$ cmake .. -NC_EXTRA_DEPS="-lcustom_lib"
More details may be found at the Unidata JIRA Dashboard. [NCF-316](https://bugtracking.unidata.ucar.edu/browse/NCF-316)
### 4.3.3-rc2 Released 2014-09-24
* Fixed the code for handling character constants
in datalists in ncgen. Two of the problems were:
1. It failed on large constants
2. It did not handle e.g. var = 'a', 'b', ...
in the same way that ncgen3 did.
See [NCF-309](https://bugtracking.unidata.ucar.edu/browse/NCF-309).
* Added a new file, `netcdf_meta.h`. This file is generated automatically at configure time and contains information related to the capabilities of the netcdf library. This file may be used by projects dependent upon `netcdf` to make decisions during configuration, based on how the `netcdf` library was built. The macro `NC_HAVE_META_H` is defined in `netcdf.h`. Paired with judicious use of `#ifdef`'s, this macro will indicate to developers whether or not the meta-header file is present. See [NCF-313](https://bugtracking.unidata.ucar.edu/browse/NCF-313).
> Determining the presence of `netcdf_meta.h` can also be accomplished by methods common to autotools and cmake-based build systems.
* Changed `Doxygen`-generated documentation hosted by Unidata to use more robust server-based searching.
* Corrected embedded URLs in release notes.
* Corrected an issue where building with HDF4 support with Visual Studio would fail.
### 4.3.3-rc1 Released 2014-08-25
* Added `CMake`-based export files, contributed by Nico Schlömer. See https://github.com/Unidata/netcdf-c/pull/74.
* Fixed ncdump bug for char variables with multiple unlimited dimensions and added an associated test. Now the output CDL properly disambiguates dimension groupings, so that ncgen can generate the original file from the CDL. [NCF-310](https://bugtracking.unidata.ucar.edu/browse/NCF-310)
* Converted the [Manually-maintained FAQ page](http://www.unidata.ucar.edu/software/netcdf/docs/faq.html) into markdown and added it to the `docs/` directory. This way the html version will be generated when the rest of the documentation is built, the FAQ will be under version control, and it will be in a more visible location, hopefully making it easier to maintain.
* Bumped minimum required version of `cmake` to `2.8.12`. This was necessitated by the adoption of the new `CMAKE_MACOSX_RPATH` property, for use on OSX.
* Jennifer Adams has requested a reversion in behavior so that all dap requests include a constraint. Problem is caused by change in prefetch where if all variables are requested, then no constraint is generated. Fix is to always generate a constraint in prefetch.
[NCF-308](https://bugtracking.unidata.ucar.edu/browse/NCF-308)
* Added a new option for cmake-based builds, `ENABLE_DOXYGEN_LATEX_OUTPUT`. On those systems with `make` and `pdflatex`, setting this option **ON** will result in pdf versions of the documentation being built. This feature is experimental.
* Bumped minimum CMake version to `2.8.9` from `2.8.8` as part of a larger pull request contributed by Nico Schlömer. [Pull Request #64](https://github.com/Unidata/netcdf-c/pull/64)
* Replaced the `NetCDF Library Architecture` image with an updated version from the 2012 NetCDF Workshop slides.
* Fix HDF4 files to support chunking.
[NCF-272](https://bugtracking.unidata.ucar.edu/browse/NCF-272)
* NetCDF creates a `libnetcdf.settings` file after configuration now, similar to those generated by `HDF4` and `HDF5`. It is installed into the same directory as the libraries. [NCF-303](https://bugtracking.unidata.ucar.edu/browse/NCF-303).
* Renamed `man4/` directory to `docs/` to make the purpose and contents clearer. See [man4 vs. docs #60](https://github.com/Unidata/netcdf-c/issues/60).
* Removed redundant variable `BUILD_DOCS` from the CMake configuration file. See the issue at github: [#59](https://github.com/Unidata/netcdf-c/issues/59).
* Added missing documentation templates to `man4/Makefile.am`, to correct an issue when trying to build the local `Doxygen`-generated documentation. This issue was reported by Nico Schlömer and may be viewed on github. [Releases miss Doxygen files #56](https://github.com/Unidata/netcdf-c/issues/56)
* When the NC_MPIPOSIX flag is given for parallel I/O access and the HDF5 library does not have the MPI-POSIX VFD configured in, the NC_MPIPOSIX flag is transparently aliased to the NC_MPIIO flag within the netCDF-4 library.
## 4.3.2 Released 2014-04-23
@ -20,7 +91,7 @@ This file contains a high-level description of this package's evolution. Release
* `NC_TEST_DROP_SITE` - Specify an alternative Dashboard by URL or IP address.
* `NC_CTEST_DROP_LOC_PREFIX` - Specify a prefix on the remote webserver relative to the root directory. This lets CTest accommodate dashboards that do not live at the top level of the web server.
* Return an error code on open instead of an assertion violation for truncated file.
### 4.3.2-rc2 Released 2014-04-15
@ -36,7 +107,7 @@ This file contains a high-level description of this package's evolution. Release
* `hdf5: 1.8.12`
* `zlib: 1.2.8`
* `libcurl: 7.35.0`
* Added a separate flag to enable DAP AUTH tests. These tests are disabled by default. The flags for autotools and CMAKE-based builds are (respectively):
* --enable-dap-auth-tests
* -DENABLE\_DAP\_AUTH\_TESTS
@ -61,6 +132,12 @@ This file contains a high-level description of this package's evolution. Release
* Addressed an issue related to old DAP servers. [NCF-287](https://bugtracking.unidata.ucar.edu/browse/NCF-287)
* Modified nc_{get/put}_vars to no longer use
nc_get/put_varm. They now directly use nc_get/put_vara
directly. This means that nc_get/put_vars now work
properly for user defined types as well as atomic types.
[NCF-228] (https://bugtracking.unidata.ucar.edu/browse/NCF-228)
## 4.3.1.1 Released 2014-02-05
This is a bug-fix-only release for version 4.3.1.
@ -85,7 +162,7 @@ This is a bug-fix-only release for version 4.3.1.
* Addressed an issue reported by Jeff Whitaker regarding `nc_inq_nvars` returning an incorrect number of dimensions (this issue was introduced in 4.3.1-rc5). Integrated a test contributed by Jeff Whitaker.
* A number of previously-disabled unit tests were reviewed and made active.
* A number of previously-disabled unit tests were reviewed and made active.
### 4.3.1-rc5 Released 2013-12-06
@ -149,7 +226,7 @@ This is a bug-fix-only release for version 4.3.1.
* Modify ncgen to support disambiguating references to
an enum constant in a data list. [NCF-265]
[NCF-265]:https://bugtracking.unidata.ucar.edu/browse/NCF-265
* Corrected bug in netCDF-4 dimension ID ordering assumptions, resulting in access that works locally but fails through DAP server. [NCF-166]
@ -195,8 +272,8 @@ This is a bug-fix-only release for version 4.3.1.
$ git clone https://github.com/Unidata/netCDF-C.git
* Note: in this release, it is necessary to generate the `configure` script and makefile templates using `autoreconf` in the root netCDF-C directory.:
$ autoreconf -i -f
$ autoreconf -i -f
* Added `nc_rename_grp` to allow for group renaming in netCDF-4 files. [NCF-204]
@ -211,7 +288,7 @@ This is a bug-fix-only release for version 4.3.1.
* Added support for dynamic loading, to compliment the dynamic loading support introduced in hdf 1.8.11. Dynamic loading support depends on libdl, and is enabled as follows: [NCF-258]
* autotools-based builds: --enable-dynamic-loading
* cmake-based builds: -DENABLE\_DYNAMIC\_LOADING=ON
[NCF-258]: https://www.unidata.ucar.edu/jira/browse/NCF-258
* Fix issue of netCDF-4 parallel independent access with unlimited dimension hanging. Extending the size of an unlimited dimension in HDF5 must be a collective operation, so now an error is returned if trying to extend in independent access mode. [NCF-250]
@ -678,7 +755,7 @@ contiguous blocks of memory. [NCF-69]
* Make changes necessary for upgrading to HDF5 1.8.7 [NCF-66]
### 4.1.3-rc1 2011-05-06
### 4.1.3-rc1 2011-05-06
* Stop looking for xdr if --disable-dap is used.
@ -985,10 +1062,10 @@ output.
Turkal).
* Fixed bug in C++ API creating 64-bit offset files. (See
http://www.unidata.ucar.edu/software/netcdf/docs/known\_problems.html\#cxx\_64-bit.)
http://www.unidata.ucar.edu/software/netcdf/docs/known_problems.html#cxx_64-bit).
* Fixed bug for variables larger than 4 GB. (See
http://www.unidata.ucar.edu/software/netcdf/docs/known\_problems.html\#large\_vars\_362.)
http://www.unidata.ucar.edu/software/netcdf/docs/known_problems.html#large_vars_362).
* Changed the configure.ac to build either 3.6.x or 4.x build from the
same configure.ac.
@ -1139,7 +1216,7 @@ configure.
* Switched to new build system, with automake and libtool. Now shared
libraries are built (as well as static ones) on platforms which support
it. For more information about shared libraries, see
http://www.unidata.ucar.edu/software/netcdf/docs/faq.html\#shared\_intro
http://www.unidata.ucar.edu/software/netcdf/docs/faq.html#shared_intro
* Fixed ncdump crash that happened when no arguments were used.
@ -1302,7 +1379,7 @@ dimension sizes between 2\^31 and 2\^32 (for byte variables).
* Fixed ncgen to properly handle dimensions between 2\^31 and 2\^32.
### 3.6.0-beta2
### 3.6.0-beta2
* Added -v2 (version 2 format with 64-bit offsets) option to
ncgen, to specify that generated files or generated C/Fortran code
@ -1320,7 +1397,7 @@ part of the build process. VC++ with managed extensions is required
* Added windows installer files to build windows binary installs.
### 3.6.0-beta1
### 3.6.0-beta1
* By incorporating Greg Sjaardema's patch, added support for
64-bit offset files, which remove many of the restrictions relating to
@ -1336,7 +1413,7 @@ format testing, and once for 64-bit offset format testing.
* The implementation of the Fortran-77 interface has been adapted to
version 4.3 of Burkhard Burow's "cfortran.h".
### 3.6.0-alpha
### 3.6.0-alpha
* Added NEC SX specific optimization for NFILL tunable
parameter in libsrc/putget.c

46
cf
View File

@ -1,6 +1,7 @@
#!/bin/bash
#X="-x"
#NB=1
#DB=1
if test $# != 0 ; then
cmds=$@
@ -11,6 +12,12 @@ DAP=1
#HDF4=1
#PNETCDF=1
#PAR=1
if test "x$PNETCDF" = x1 ; then
PAR=1
fi
#RPC=1
#PGI=1
#M32=1
@ -21,16 +28,17 @@ CFLAGS=""
#CFLAGS="-Wall -Wno-unused-variable -Wno-unused-parameter ${CFLAGS}"
#CFLAGS="-Wconversion"
PREFIX=/usr/local
stddir="/usr/local"
PREFIX=/usr/local
if test "x$cmds" = x ; then
cmds=""
#cmds="all"
#cmds="all check"
#cmds="all dist"
#cmds="all distcheck"
#cmds="$cmds install"
if test "x${cmds}" = x ; then
cmds=""
else
for f in $cmds ; do
if test "x$f" = "xdistcheck" ; then
PREFIX=/tmp/$HOST
fi
done
fi
# HDF4=>HDF5
@ -101,12 +109,6 @@ fi
CXXFLAGS="$CPPFLAGS $CXXFLAGS"
if test -z "$NB" ; then
${MAKE} maintainer-clean >/dev/null 2>&1
if autoreconf -i --force ; then ok=1; else exit ; fi
fi
FLAGS="--prefix ${PREFIX}"
#FLAGS="$FLAGS --disable-f77 --disable-f90"
#FLAGS="$FLAGS --disable-cxx"
@ -120,7 +122,7 @@ FLAGS="$FLAGS --disable-examples"
#FLAGS="$FLAGS --enable-large-file-tests"
#FLAGS="$FLAGS --disable-testsets"
#FLAGS="$FLAGS --disable-dap-remote-tests"
FLAGS="$FLAGS --enable-dap-auth-tests"
#FLAGS="$FLAGS --enable-dap-auth-tests"
#FLAGS="$FLAGS --enable-doxygen"
#FLAGS="$FLAGS --enable-logging"
#FLAGS="$FLAGS --disable-diskless"
@ -131,8 +133,15 @@ FLAGS="$FLAGS --enable-dap-auth-tests"
#FLAGS="$FLAGS --enable-valgrind-tests"
FLAGS="$FLAGS --enable-jna"
#FLAGS="$FLAGS --disable-shared"
if test "x${DB}" = x1 ; then
FLAGS="$FLAGS --disable-shared"
else
FLAGS="$FLAGS --enable-shared"
fi
if test "x${PAR}" = x ; then
FLAGS="$FLAGS --disable-parallel"
fi
if test "x$HDF5" = "x" ; then
FLAGS="$FLAGS --disable-netcdf-4"
@ -181,6 +190,11 @@ export CXXFLAGS
DISTCHECK_CONFIGURE_FLAGS="$FLAGS"
export DISTCHECK_CONFIGURE_FLAGS
if test -z "$NB" ; then
${MAKE} maintainer-clean >/dev/null 2>&1
if autoreconf -i --force ; then ok=1; else exit ; fi
fi
if test -f Makefile ; then ${MAKE} distclean >/dev/null 2>&1 ; fi
sh $X ./configure ${FLAGS}
for c in $cmds; do

9
cf.cmake Normal file
View File

@ -0,0 +1,9 @@
rm -fr build
mkdir build
cd build
UL=/usr/local
PPATH="$UL"
HDF5="-DHDF5_LIB=${UL}/lib/libhdf5.so -DHDF5_HL_LIB=${UL}/lib/libhdf5_hl.so -DHDF5_INCLUDE_DIR=${UL}/include"
cmake -DCMAKE_INSTALL_PREFIX=${UL} -DCMAKE_PREFIX_PATH="$PPATH" ${HDF5} ..
cmake --build .
#cmake --build . --target RUN_TESTS

View File

@ -237,6 +237,7 @@ are set when opening a binary file on Windows. */
/* Set if we have strdup */
#cmakedefine HAVE_STRDUP
#cmakedefine HAVE_STRNDUP
#cmakedefine HAVE_STRLCAT
#cmakedefine HAVE_STRERROR
#cmakedefine HAVE_SNPRINTF

View File

@ -5,21 +5,31 @@
# the COPYRIGHT file for more information.
# Recall that ${VAR-exp} expands to $VAR if var is set (even to null),
# and to exp otherwise.
# and to exp otherwise.
## This puts the cvs ID tag in the output configure script.
AC_REVISION([$Id: configure.ac,v 1.450 2010/05/28 19:42:47 dmh Exp $])
# Running autoconf on this file will trigger a warning if
# Running autoconf on this file will trigger a warning if
# autoconf is not at least the specified version.
AC_PREREQ([2.59])
# Initialize with name, version, and support email address.
AC_INIT([netCDF], [4.3.3-rc1], [support-netcdf@unidata.ucar.edu])
# Initialize with name, version, and support email address.
AC_INIT([netCDF], [4.3.3-rc2], [support-netcdf@unidata.ucar.edu])
AC_SUBST([NC_VERSION_MAJOR]) NC_VERSION_MAJOR=4
AC_SUBST([NC_VERSION_MINOR]) NC_VERSION_MINOR=3
AC_SUBST([NC_VERSION_PATCH]) NC_VERSION_PATCH=3
AC_SUBST([NC_VERSION_NOTE]) NC_VERSION_NOTE="-rc2"
#####
# Set some variables used to generate a libnetcdf.settings file,
# pattered after the files generated by libhdf4, libhdf5.
#####
# Create the VERSION file, which contains the package version from
# AC_INIT.
echo -n AC_PACKAGE_VERSION>VERSION
echo AC_PACKAGE_VERSION>VERSION
AC_SUBST(PACKAGE_VERSION)
AC_MSG_NOTICE([netCDF AC_PACKAGE_VERSION])
@ -27,6 +37,9 @@ AC_MSG_NOTICE([netCDF AC_PACKAGE_VERSION])
# Keep libtool macros in an m4 directory.
AC_CONFIG_MACRO_DIR([m4])
# Configuration Date
AC_SUBST([CONFIG_DATE]) CONFIG_DATE="`date`"
# Find out about the host we're building on.
AC_CANONICAL_HOST
@ -70,7 +83,7 @@ AC_ARG_ENABLE([doxygen],
[AS_HELP_STRING([--enable-doxygen],
[Enable generation of documentation.])])
test "x$enable_doxygen" = xyes || enable_doxygen=no
AM_CONDITIONAL([BUILD_DOCS], [test "x$enable_doxygen" = xyes])
AM_CONDITIONAL([BUILD_DOCS], [test "x$enable_doxygen" = xyes])
AC_ARG_ENABLE([dot],
[AS_HELP_STRING([--enable-dot],
@ -81,7 +94,7 @@ AC_ARG_ENABLE([internal-docs],
[AS_HELP_STRING([--enable-internal-docs],
[Include documentation of library internals. This is of interest only to those developing the netCDF library.])])
test "x$enable_internal_docs" = xyes || enable_internal_docs=no
AC_SUBST([BUILD_INTERNAL_DOCS], [$enable_internal_docs])
AC_SUBST([BUILD_INTERNAL_DOCS], [$enable_internal_docs])
AC_MSG_CHECKING([if fsync support is enabled])
AC_ARG_ENABLE([fsync],
@ -110,7 +123,7 @@ AC_DEFINE([JNA], [1], [if true, include jna bug workaround code])
fi
# Does the user want to run extra tests with valgrind?
AC_MSG_CHECKING([whether extra valgrind tests should be run])
AC_MSG_CHECKING([whether extra valgrind tests should be run])
AC_ARG_ENABLE([valgrind-tests],
[AS_HELP_STRING([--enable-valgrind-tests],
[build with valgrind-tests (valgrind is required, static builds only)])])
@ -137,7 +150,7 @@ fi
AC_MSG_CHECKING([do we require hdf5 dynamic-loading support])
AC_ARG_ENABLE([dynamic-loading], [AS_HELP_STRING([--enable-dynamic-loading],
[enable dynamic loading for use with supported hdf5 installs (libdl, HDF5 required)])])
test "x$enable_dynamic_loading" = xyes || enable_dynamic_loading=xno
test "x$enable_dynamic_loading" = xno || enable_dynamic_loading=yes
AC_MSG_RESULT([$enable_dynamic_loading])
# Does the user want to turn on HDF4 read ability?
@ -243,7 +256,7 @@ AC_ARG_ENABLE([logging],
[AS_HELP_STRING([--enable-logging],
[enable logging capability (only applies when netCDF-4 is built). \
This debugging features is only of interest to netCDF developers. \
Ignored if netCDF-4 is not enabled.])])
Ignored if netCDF-4 is not enabled.])])
test "x$enable_logging" = xyes || enable_logging=no
AC_MSG_RESULT([$enable_logging])
@ -408,8 +421,8 @@ AM_CONDITIONAL(USE_FFIO, [test x$enable_ffio = xyes])
dnl AC_MSG_CHECKING([whether netCDF NEC-SX vectorization patch is enabled])
dnl AC_ARG_ENABLE([sx-vectorization],
dnl [AS_HELP_STRING([--enable-sx-vectorization],
dnl [enable a user-provided performance patch to allow \
dnl vectorization of type conversions on NEC SX machines.])])
dnl [enable a user-provided performance patch to allow \
dnl vectorization of type conversions on NEC SX machines.])])
dnl test "x$enable_sx_vectorization" = xyes || enable_sx_vectorization=no
dnl AC_MSG_RESULT([$enable_sx_vectorization])
dnl if test "x$enable_sx_vectorization" = xyes; then
@ -442,6 +455,8 @@ AC_MSG_RESULT($nc_build_v2)
AM_CONDITIONAL(BUILD_V2, [test x$nc_build_v2 = xyes])
if test "x$nc_build_v2" = xno; then
AC_DEFINE_UNQUOTED(NO_NETCDF_2, 1, [do not build the netCDF version 2 API])
else
AC_DEFINE_UNQUOTED(USE_NETCDF_2, 1, [build the netCDF version 2 API])
fi
# Does the user want to disable ncgen/ncdump/nccopy?
@ -482,8 +497,8 @@ fi
AC_MSG_CHECKING([whether benchmaks should be run (experimental)])
AC_ARG_ENABLE([benchmarks],
[AS_HELP_STRING([--enable-benchmarks],
[Run benchmarks. This is an experimental feature. You must fetch
sample data files from the Unidata ftp site to use these benchmarks.
[Run benchmarks. This is an experimental feature. You must fetch
sample data files from the Unidata ftp site to use these benchmarks.
The benchmarks are a bunch of extra tests, which are timed. We use these
tests to check netCDF performance.])])
test "x$enable_benchmarks" = xyes || enable_benchmarks=no
@ -502,7 +517,7 @@ case "$host_cpu $host_os" in
*)
test "x$enable_extreme_numbers" = xno || enable_extreme_numbers=yes
;;
esac
esac
AC_MSG_RESULT($enable_extreme_numbers)
if test "x$enable_extreme_numbers" = xyes; then
@ -532,6 +547,29 @@ AC_DEFINE_UNQUOTED([TEMP_LARGE], ["$TEMP_LARGE"], [Place to put very large netCD
# Find the C compiler.
AC_MSG_NOTICE([finding C compiler])
## Compiler with version information. This consists of the full path
## name of the compiler and the reported version number.
AC_SUBST([CC_VERSION])
## Strip anything that looks like a flag off of $CC
CC_NOFLAGS=`echo $CC | sed 's/ -.*//'`
if `echo $CC_NOFLAGS | grep ^/ >/dev/null 2>&1`; then
CC_VERSION="$CC"
else
CC_VERSION="$CC";
for x in `echo $PATH | sed -e 's/:/ /g'`; do
if test -x $x/$CC_NOFLAGS; then
CC_VERSION="$x/$CC"
break
fi
done
fi
if test -n "$cc_version_info"; then
CC_VERSION="$CC_VERSION ( $cc_version_info)"
fi
AC_PROG_CC
AM_PROG_CC_C_O
AC_C_CONST
@ -608,11 +646,11 @@ AC_MSG_NOTICE([finding other utilities])
# Is doxygen installed? If so, have configure construct the Doxyfile.
AC_CHECK_PROGS([DOXYGEN], [doxygen])
if test -z "$DOXYGEN"; then
if test -z "$DOXYGEN"; then
AC_MSG_WARN([Doxygen not found - documentation will not be built])
fi
# Is graphviz/dot installed? If so, we'll use dot to create
# Is graphviz/dot installed? If so, we'll use dot to create
# graphs in the documentation.
AC_CHECK_PROGS([DOT], [dot])
if test -z "$DOT"; then
@ -620,20 +658,20 @@ if test -z "$DOT"; then
HAVE_DOT=NO
elif test "x$enable_dot" = xno; then
HAVE_DOT=NO
else
else
HAVE_DOT=YES
fi
fi
# If we have doxygen, and it's enabled, then process the file.
if test "x$enable_doxygen" != xno; then
if test -n "$DOXYGEN"; then
AC_SUBST(HAVE_DOT)
AC_CONFIG_FILES([man4/Doxyfile])
AC_SUBST(HAVE_DOT)
AC_CONFIG_FILES([docs/Doxyfile])
fi
# Note: the list of files to input to doxygen
# has been moved to man4/Doxyfile.in so
# has been moved to docs/Doxyfile.in so
# that make distcheck works correctly.
# Any new inputs should be inserted into
# man4/Doxyfile.in and possibley man4/Makefile.am
# docs/Doxyfile.in and possibley docs/Makefile.am
fi
# Find the install program.
@ -747,7 +785,7 @@ if test "x$enable_netcdf_4" = xyes || test "x$enable_dap" = xyes; then
fi
# We need the math library
AC_CHECK_LIB([m], [floor], [],
AC_CHECK_LIB([m], [floor], [],
[AC_MSG_ERROR([Can't find or link to the math library.])])
if test "x$enable_netcdf_4" = xyes; then
@ -764,14 +802,14 @@ if test "x$enable_netcdf_4" = xyes; then
AC_CHECK_LIB([dl],[dlopen], [], [AC_MSG_ERROR([Can't find or link against libdf. See config.log for errors.])])
AC_DEFINE([USE_LIBDL],[1], [if true, enable dynamic loading support])
fi
# Check for the main hdf5 and hdf5_hl library.
AC_SEARCH_LIBS([H5Fflush], [hdf5dll hdf5], [],
AC_SEARCH_LIBS([H5Fflush], [hdf5dll hdf5], [],
[AC_MSG_ERROR([Can't find or link to the hdf5 library. Use --disable-netcdf-4, or see config.log for errors.])])
AC_SEARCH_LIBS([H5DSis_scale], [hdf5_hldll hdf5_hl], [],
AC_SEARCH_LIBS([H5DSis_scale], [hdf5_hldll hdf5_hl], [],
[AC_MSG_ERROR([Can't find or link to the hdf5 high-level. Use --disable-netcdf-4, or see config.log for errors.])])
AC_CHECK_HEADERS([hdf5.h], [], [AC_MSG_ERROR([Compiling a test with HDF5 failed. Either hdf5.h cannot be found, or config.log should be checked for other reason.])])
AC_CHECK_FUNCS([H5Pget_fapl_mpiposix H5Pget_fapl_mpio H5Pset_deflate H5Z_SZIP])
@ -779,7 +817,7 @@ if test "x$enable_netcdf_4" = xyes; then
if test "x$ac_cv_func_H5Pget_fapl_mpiposix" = xyes; then
AC_DEFINE([USE_PARALLEL_POSIX], [1], [if true, compile in parallel netCDF-4 based on MPI/POSIX])
fi
# The user may have parallel HDF5 based on MPI mumble mumble.
if test "x$ac_cv_func_H5Pget_fapl_mpio" = xyes; then
AC_DEFINE([USE_PARALLEL_MPIO], [1], [if true, compile in parallel netCDF-4 based on MPI/IO])
@ -791,14 +829,14 @@ if test "x$enable_netcdf_4" = xyes; then
enable_parallel=yes
AC_DEFINE([USE_PARALLEL], [1], [if true, parallel netCDF-4 is in use])
fi
AC_MSG_CHECKING([whether parallel I/O features are to be included])
AC_MSG_RESULT([$enable_parallel])
AC_MSG_RESULT([$enable_parallel])
# The user must have built HDF5 with the ZLIB library.
if test "x$ac_cv_func_H5Pset_deflate" = xyes; then
AC_DEFINE([USE_ZLIB], [1], [if true, compile in zlib compression in netCDF-4 variables])
else
else
AC_MSG_ERROR([HDF5 must be built with zlib for netCDF-4])
fi
@ -810,13 +848,13 @@ if test "x$enable_netcdf_4" = xyes; then
# If the user wants hdf4 built in, check it out.
if test "x$enable_hdf4" = xyes; then
AC_CHECK_HEADERS([mfhdf.h], [], [nc_mfhdf_h_missing=yes])
AC_CHECK_HEADERS([mfhdf.h], [], [nc_mfhdf_h_missing=yes])
if test "x$nc_mfhdf_h_missing" = xyes; then
AC_MSG_ERROR([Cannot find mfhdf.h, yet --enable-hdf4 was used.])
fi
fi
AC_CHECK_LIB([df], [Hclose], [], [AC_MSG_ERROR([Can't find or link to the hdf4 df library. See config.log for errors.])])
AC_CHECK_LIB([mfhdf], [SDcreate], [], [AC_MSG_ERROR([Can't find or link to the hdf4 mfhdf library. See config.log for errors.])])
AC_CHECK_LIB([jpeg], [jpeg_set_quality], [], [AC_MSG_ERROR([Can't find or link to the jpeg library (required by hdf4). See config.log for errors.])])
AC_DEFINE([USE_HDF4], [1], [if true, use HDF4 too])
@ -833,7 +871,7 @@ fi
# Using pnetcdf for classic parallel I/O?
if test "x$enable_pnetcdf" = xyes; then
AC_CHECK_LIB([pnetcdf], [ncmpi_create], [],
AC_CHECK_LIB([pnetcdf], [ncmpi_create], [],
[AC_MSG_ERROR([Cannot link to pnetcdf library, yet --enable-pnetcdf was used.])])
AC_DEFINE([USE_PNETCDF], [1], [if true, parallel netCDF is used])
@ -893,7 +931,7 @@ AM_CONDITIONAL(USE_PNETCDF, [test x$enable_pnetcdf = xyes])
AM_CONDITIONAL(USE_DISPATCH, [test x$enable_dispatch = xyes])
AM_CONDITIONAL(BUILD_CDMREMOTE, [test "x$enable_cdmremote" = xyes]) # Alias
AM_CONDITIONAL(BUILD_RPC, [test "x$enable_rpc" = xyes])
AM_CONDITIONAL(BUILD_DISKLESS, [test x$enable_diskless = xyes])
AM_CONDITIONAL(BUILD_DISKLESS, [test x$enable_diskless = xyes])
AM_CONDITIONAL(BUILD_MMAP, [test x$enable_mmap = xyes])
# If the machine doesn't have a long long, and we want netCDF-4, then
@ -928,17 +966,17 @@ AC_MSG_RESULT([$LIBS])
# Flags for nc-config script; by design $prefix, $includir, $libdir,
# etc. are left as shell variables in the script so as to facilitate
# relocation
if test "x$with_netcdf_c_lib" = x ; then
if test "x$with_netcdf_c_lib" = x ; then
NC_LIBS="-lnetcdf"
else
NC_LIBS="$with_netcdf_c_lib"
fi
if test "x$enable_shared" != xyes; then
NC_LIBS="$LDFLAGS $NC_LIBS $LIBS"
fi
fi
case "x$target_os" in
xsolaris*)
xsolaris*)
NEWNCLIBS=""
for x in $NC_LIBS ; do
case "$x" in
@ -955,15 +993,6 @@ esac
NC_FLIBS="-lnetcdff $NC_LIBS"
AC_SUBST(NC_LIBS,[$NC_LIBS])
AC_SUBST(HAS_DAP,[$enable_dap])
AC_SUBST(HAS_NC2,[$nc_build_v2])
AC_SUBST(HAS_NC4,[$enable_netcdf_4])
AC_SUBST(HAS_HDF4,[$enable_hdf4])
AC_SUBST(HAS_PNETCDF,[$enable_pnetcdf])
AC_SUBST(HAS_HDF5,[$enable_netcdf_4])
AC_SUBST(HAS_SZLIB,[$nc_has_szlib])
# temporary to deal with a JNA problem
AC_MSG_CHECKING([If compilation is for use with JNA])
AC_ARG_ENABLE([jna],
@ -977,6 +1006,20 @@ if test "x$enable_jna" = xyes ; then
AC_DEFINE([JNA], [1], [if true, include JNA bug fix])
fi
AC_SUBST(NC_LIBS,[$NC_LIBS])
AC_SUBST(HAS_DAP,[$enable_dap])
AC_SUBST(HAS_NC2,[$nc_build_v2])
AC_SUBST(HAS_NC4,[$enable_netcdf_4])
AC_SUBST(HAS_HDF4,[$enable_hdf4])
AC_SUBST(HAS_PNETCDF,[$enable_pnetcdf])
AC_SUBST(HAS_HDF5,[$enable_netcdf_4])
AC_SUBST(HAS_SZLIB,[$nc_has_szlib])
AC_SUBST(HAS_PARALLEL,[$enable_parallel])
AC_SUBST(HAS_DISKLESS,[$enable_diskless])
AC_SUBST(HAS_MMAP,[$enable_mmap])
AC_SUBST(HAS_JNA,[$enable_jna])
# Include some specifics for netcdf on windows.
#AH_VERBATIM([_WIN32_STRICMP],
AH_BOTTOM(
@ -994,15 +1037,65 @@ AH_BOTTOM([#include "ncconfigure.h"])
# debugging purposes.
# cp confdefs.h my_config.h
#####
# Create output variables from various
# shell variables, for use in generating
# libnetcdf.settings.
#####
AC_SUBST([enable_shared])
AC_SUBST([enable_static])
AC_SUBST([CFLAGS])
AC_SUBST([CPPFLAGS])
AC_SUBST([LDFLAGS])
AC_SUBST([AM_CFLAGS])
AC_SUBST([AM_CPPFLAGS])
AC_SUBST([AM_LDFLAGS])
# Args:
# 1. netcdf_meta.h variable
# 2. conditional variable that is yes or no.
# 3. default condition
#
# example: AX_SET_META([NC_HAS_NC2],[$nc_build_v2],[]) # Because it checks for no.
# AX_SET_META([NC_HAS_HDF4],[$enable_hdf4],[yes])
AC_DEFUN([AX_SET_META],[
if [ test "x$2" = x$3 ]; then
AC_SUBST([$1]) $1=1
else
AC_SUBST([$1]) $1=0
fi
])
#####
# Define values used in include/netcdf_meta.h
#####
AC_SUBST([NC_VERSION]) NC_VERSION=$VERSION
AX_SET_META([NC_HAS_NC2],[$nc_build_v2],[yes])
AX_SET_META([NC_HAS_NC4],[$enable_netcdf_4],[yes])
AX_SET_META([NC_HAS_HDF4],[$enable_hdf4],[yes])
AX_SET_META([NC_HAS_HDF5],[$enable_netcdf_4],[yes])
AX_SET_META([NC_HAS_SZIP],[$ac_cv_func_H5Z_SZIP],[yes])
AX_SET_META([NC_HAS_DAP],[$enable_dap],[yes])
AX_SET_META([NC_HAS_DISKLESS],[$enable_diskless],[yes])
AX_SET_META([NC_HAS_MMAP],[$enable_mmap],[yes])
AX_SET_META([NC_HAS_JNA],[$enable_jna],[yes])
AX_SET_META([NC_HAS_PNETCDF],[$enable_pnetcdf],[yes])
AX_SET_META([NC_HAS_PARALLEL],[$enable_parallel],[yes])
#####
# End netcdf_meta.h definitions.
#####
AC_MSG_NOTICE([generating header files and makefiles])
AC_CONFIG_FILES([Makefile
nc-config
netcdf.pc
libnetcdf.settings
include/netcdf_meta.h
include/Makefile
h5_test/Makefile
man4/Makefile
man4/images/Makefile
docs/Makefile
docs/images/Makefile
libsrc/Makefile
libsrc4/Makefile
libsrc5/Makefile
@ -1019,8 +1112,8 @@ AC_CONFIG_FILES([Makefile
libdap2/Makefile
libdispatch/Makefile
liblib/Makefile
ncdump/cdl4/Makefile
ncdump/expected4/Makefile
ncdump/cdl/Makefile
ncdump/expected/Makefile
ncdap_test/Makefile
ncdap_test/testdata3/Makefile
ncdap_test/expected3/Makefile
@ -1030,3 +1123,5 @@ AC_CONFIG_FILES([Makefile
],
[test -f nc-config && chmod 755 nc-config])
AC_OUTPUT()
cat libnetcdf.settings

82
docs/CMakeLists.txt Normal file
View File

@ -0,0 +1,82 @@
#####
# Build doxygen documentation, if need be.
#####
IF(ENABLE_DOXYGEN)
# The following is redundant but does not hurt anything.
FILE(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.html ${CMAKE_CURRENT_SOURCE_DIR}/images ${CMAKE_CURRENT_SOURCE_DIR}/*.doc ${CMAKE_CURRENT_SOURCE_DIR}/*.xml ${CMAKE_CURRENT_SOURCE_DIR}/*.m4 ${CMAKE_CURRENT_SOURCE_DIR}/*.texi)
FILE(COPY ${COPY_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
# Set abs_top_srcdir to work with the autotools
# doxyfile template.
SET(abs_top_srcdir ${CMAKE_SOURCE_DIR})
SET(abs_builddir ${CMAKE_CURRENT_BINARY_DIR})
# Create general and guide doxyfiles.
CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in
${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY)
# Generate User Documentation
ADD_CUSTOM_TARGET(doc_all ALL
${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating API Documentation" VERBATIM)
# If ENABLE_DOXYGEN_LATEX_OUTPUT is true, automatically build
# the PDF files.
IF(ENABLE_DOXYGEN_PDF_OUTPUT)
# Process 'main' netcdf documentation.
FIND_PROGRAM(NC_MAKE NAMES make)
FIND_PROGRAM(NC_PDFLATEX NAMES pdflatex)
IF(NOT NC_MAKE OR NOT NC_PDFLATEX)
MESSAGE(WARNING "Unable to locate 'make' and/or 'pdflatex' program. Unable to build pdf documentation.")
ELSE()
ADD_CUSTOM_TARGET(doc_all_pdf ALL
make
COMMAND mv refman.pdf ../netcdf.pdf
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/latex_main"
COMMENT "Building NetCDF PDF Documentation" VERBATIM
)
# Process 'guide' netcdf documentation.
ADD_CUSTOM_TARGET(doc_guide_pdf ALL
make
COMMAND mv refman.pdf ../netcdf-guide.pdf
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/latex_guide"
COMMENT "Building NetCDF-Guide PDF Documentation" VERBATIM
)
# Process 'tutorial' netcdf documentation.
ADD_CUSTOM_TARGET(doc_tutorial_pdf ALL
make
COMMAND mv refman.pdf ../netcdf-tutorial.pdf
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/latex_tutorial"
COMMENT "Building NetCDF-Tutorial PDF Documentation" VERBATIM
)
INSTALL(FILES "${CMAKE_CURRENT_BINARY_DIR}/netcdf.pdf"
"${CMAKE_CURRENT_BINARY_DIR}/netcdf-guide.pdf"
"${CMAKE_CURRENT_BINARY_DIR}/netcdf-tutorial.pdf"
DESTINATION "${CMAKE_INSTALL_DOCDIR}/pdf"
COMPONENT documentation)
ENDIF()
ENDIF()
# Copy the image files used by markdown documentation
# manually.
FILE(GLOB IMG_FILES ${CMAKE_CURRENT_BINARY_DIR}/images/*.jpg
${CMAKE_CURRENT_BINARY_DIR}/images/*.png)
FILE(COPY ${IMG_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/html)
INSTALL(DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/html/"
DESTINATION "${CMAKE_INSTALL_DOCDIR}/html"
COMPONENT documentation)
ENDIF(ENABLE_DOXYGEN)
SET(CUR_EXTRA_DIST ${CUR_EXTRA_DIST} CMakeLists.txt Makefile.am netcdf.m4 DoxygenLayout.xml Doxyfile.in Doxyfile.guide.in footer.html mainpage.dox tutorial.dox install.dox dispatch.dox guide.dox types.dox notes.dox cdl.dox architecture.dox internal.dox install-fortran.dox Doxyfile.in.cmake windows-binaries.md building-with-cmake.md)
ADD_EXTRA_DIST("${CUR_EXTRA_DIST}")

View File

@ -44,21 +44,21 @@ PROJECT_NUMBER = @PACKAGE_VERSION@
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF =
PROJECT_BRIEF =
# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
# the documentation. The maximum height of the logo should not exceed 55 pixels
# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
# to the output directory.
PROJECT_LOGO = ../../man4/netcdf-50x50.png
PROJECT_LOGO = @abs_top_srcdir@/docs/netcdf-50x50.png
# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
# into which the generated documentation will be written. If a relative path is
# entered, it will be relative to the location where doxygen was started. If
# left blank the current directory will be used.
OUTPUT_DIRECTORY =
OUTPUT_DIRECTORY =
# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
# directories (in 2 levels) under the output directory of each output format and
@ -110,7 +110,7 @@ REPEAT_BRIEF = YES
# the entity):The $name class, The $name widget, The $name file, is, provides,
# specifies, contains, represents, a, an and the.
ABBREVIATE_BRIEF =
ABBREVIATE_BRIEF =
# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
# doxygen will generate a detailed section even if there is only a brief
@ -144,7 +144,7 @@ FULL_PATH_NAMES = NO
# will be relative from the directory where doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
STRIP_FROM_PATH =
STRIP_FROM_PATH =
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
# path mentioned in the documentation of a class, which tells the reader which
@ -153,7 +153,7 @@ STRIP_FROM_PATH =
# specify the list of include paths that are normally passed to the compiler
# using the -I flag.
STRIP_FROM_INC_PATH =
STRIP_FROM_INC_PATH =
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
@ -208,7 +208,7 @@ SEPARATE_MEMBER_PAGES = NO
# uses this value to replace tabs by spaces in code fragments.
# Minimum value: 1, maximum value: 16, default value: 4.
TAB_SIZE = 8
TAB_SIZE = 4
# This tag can be used to specify a number of aliases that act as commands in
# the documentation. An alias has the form:
@ -220,13 +220,13 @@ TAB_SIZE = 8
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines.
ALIASES =
ALIASES =
# This tag can be used to specify a number of word-keyword mappings (TCL only).
# A mapping has the form "name=value". For example adding "class=itcl::class"
# will allow you to use the command class in the itcl::class meaning.
TCL_SUBST =
TCL_SUBST =
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
# only. Doxygen will then generate output that is more tailored for C. For
@ -270,7 +270,7 @@ OPTIMIZE_OUTPUT_VHDL = NO
# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
# the files are not read by doxygen.
EXTENSION_MAPPING =
EXTENSION_MAPPING =
# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
# according to the Markdown format, which allows for more readable
@ -606,7 +606,7 @@ GENERATE_DEPRECATEDLIST= YES
# sections, marked by \if <section_label> ... \endif and \cond <section_label>
# ... \endcond blocks.
ENABLED_SECTIONS =
ENABLED_SECTIONS =
# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
# initial value of a variable or macro / define can have for it to appear in the
@ -648,7 +648,7 @@ SHOW_NAMESPACES = YES
# by doxygen. Whatever the program writes to standard output is used as the file
# version. For an example see the documentation.
FILE_VERSION_FILTER =
FILE_VERSION_FILTER =
# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
# by doxygen. The layout file controls the global structure of the generated
@ -661,7 +661,7 @@ FILE_VERSION_FILTER =
# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
# tag is left empty.
LAYOUT_FILE = @abs_top_srcdir@/man4/DoxygenLayout.xml
LAYOUT_FILE = @abs_top_srcdir@/docs/DoxygenLayout.xml
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
@ -672,7 +672,7 @@ LAYOUT_FILE = @abs_top_srcdir@/man4/DoxygenLayout.xml
# search path. Do not use file names with spaces, bibtex cannot handle them. See
# also \cite for info how to create references.
CITE_BIB_FILES =
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
@ -731,7 +731,7 @@ WARN_FORMAT = "$file:$line: $text"
# messages should be written. If left blank the output is written to standard
# error (stderr).
WARN_LOGFILE =
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
@ -746,30 +746,31 @@ WARN_LOGFILE =
INPUT = \
@abs_top_srcdir@/RELEASE_NOTES.md \
@abs_top_srcdir@/COPYRIGHT \
@abs_top_srcdir@/man4/groups.dox \
@abs_top_srcdir@/man4/mainpage.dox \
@abs_top_srcdir@/man4/install.md \
@abs_top_srcdir@/man4/install-fortran.md \
@abs_top_srcdir@/man4/dispatch.dox \
@abs_top_srcdir@/man4/types.dox \
@abs_top_srcdir@/man4/notes.dox \
@abs_top_srcdir@/man4/cdl.dox \
@abs_top_srcdir@/man4/architecture.dox \
@abs_top_srcdir@/man4/internal.dox \
@abs_top_srcdir@/man4/windows-binaries.md \
@abs_top_srcdir@/man4/all-error-codes.md \
@abs_top_srcdir@/man4/cmake_faq.md \
@abs_top_srcdir@/docs/FAQ.md \
@abs_top_srcdir@/docs/architecture.dox \
@abs_top_srcdir@/docs/groups.dox \
@abs_top_srcdir@/docs/mainpage.dox \
@abs_top_srcdir@/docs/install.md \
@abs_top_srcdir@/docs/install-fortran.md \
@abs_top_srcdir@/docs/dispatch.dox \
@abs_top_srcdir@/docs/types.dox \
@abs_top_srcdir@/docs/notes.dox \
@abs_top_srcdir@/docs/internal.dox \
@abs_top_srcdir@/docs/windows-binaries.md \
@abs_top_srcdir@/docs/all-error-codes.md \
@abs_top_srcdir@/docs/cmake_faq.md \
@abs_top_srcdir@/include/netcdf.h \
@abs_top_srcdir@/include/netcdf_meta.h \
@abs_top_srcdir@/libdispatch/dfile.c \
@abs_top_srcdir@/libdispatch/ddim.c \
@abs_top_srcdir@/libdispatch/datt.c \
@abs_top_srcdir@/libdispatch/dattget.c \
@abs_top_srcdir@/libdispatch/dattinq.c \
@abs_top_srcdir@/libdispatch/dattput.c \
@abs_top_srcdir@/libdispatch/dvar.c \
@abs_top_srcdir@/libdispatch/dvarget.c \
@abs_top_srcdir@/libdispatch/dvarinq.c \
@abs_top_srcdir@/libdispatch/dvarput.c \
@abs_top_srcdir@/libdispatch/datt.c \
@abs_top_srcdir@/libdispatch/dattget.c \
@abs_top_srcdir@/libdispatch/dattinq.c \
@abs_top_srcdir@/libdispatch/dattput.c \
@abs_top_srcdir@/libdispatch/dgroup.c \
@abs_top_srcdir@/libdispatch/dtype.c \
@abs_top_srcdir@/libdispatch/dcompound.c \
@ -781,7 +782,20 @@ INPUT = \
@abs_top_srcdir@/libdispatch/dv2i.c \
@abs_top_srcdir@/libsrc4/nc4file.c \
@abs_top_srcdir@/ncdump/ncdump.c \
@abs_top_srcdir@/ncdump/nccopy.c
@abs_top_srcdir@/ncdump/nccopy.c \
@abs_top_srcdir@/docs/guide.dox \
@abs_top_srcdir@/docs/tutorial.dox \
@abs_top_srcdir@/examples/C/simple_xy_wr.c \
@abs_top_srcdir@/examples/C/simple_xy_rd.c \
@abs_top_srcdir@/examples/C/sfc_pres_temp_wr.c \
@abs_top_srcdir@/examples/C/sfc_pres_temp_rd.c \
@abs_top_srcdir@/examples/C/pres_temp_4D_wr.c \
@abs_top_srcdir@/examples/C/pres_temp_4D_rd.c \
@abs_top_srcdir@/examples/C/simple_nc4_wr.c \
@abs_top_srcdir@/examples/C/simple_nc4_rd.c \
@abs_top_srcdir@/examples/C/simple_xy_nc4_wr.c \
@abs_top_srcdir@/examples/C/simple_xy_nc4_rd.c
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
@ -801,13 +815,13 @@ INPUT_ENCODING = UTF-8
# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
# *.qsf, *.as and *.js.
FILE_PATTERNS =
FILE_PATTERNS =
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = YES
RECURSIVE = NO
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
@ -816,7 +830,7 @@ RECURSIVE = YES
# Note that relative paths are relative to the directory from which doxygen is
# run.
EXCLUDE =
EXCLUDE =
# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
# directories that are symbolic links (a Unix file system feature) are excluded
@ -832,7 +846,7 @@ EXCLUDE_SYMLINKS = NO
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories for example use the pattern */test/*
EXCLUDE_PATTERNS =
EXCLUDE_PATTERNS =
# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
# (namespaces, classes, functions, etc.) that should be excluded from the
@ -843,7 +857,7 @@ EXCLUDE_PATTERNS =
# Note that the wildcards are matched against the file with absolute path, so to
# exclude all test directories use the pattern */test/*
EXCLUDE_SYMBOLS =
EXCLUDE_SYMBOLS =
# The EXAMPLE_PATH tag can be used to specify one or more files or directories
# that contain example code fragments that are included (see the \include
@ -856,7 +870,7 @@ EXAMPLE_PATH = @abs_top_srcdir@/examples/C
# *.h) to filter out the source-files in the directories. If left blank all
# files are included.
EXAMPLE_PATTERNS =
EXAMPLE_PATTERNS =
# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
# searched for input files to be used with the \include or \dontinclude commands
@ -869,19 +883,19 @@ EXAMPLE_RECURSIVE = NO
# that contain images that are to be included in the documentation (see the
# \image command).
IMAGE_PATH = @abs_top_srcdir@/man4/images/chunking2.png \
@abs_top_srcdir@/man4/images/compatibility3.png \
@abs_top_srcdir@/man4/images/compression.png \
@abs_top_srcdir@/man4/images/groups.png \
@abs_top_srcdir@/man4/images/nc4-model.png \
@abs_top_srcdir@/man4/images/ncatts.png \
@abs_top_srcdir@/man4/images/nc-classic-uml.png \
@abs_top_srcdir@/man4/images/nccoords.png \
@abs_top_srcdir@/man4/images/ncfile.png \
@abs_top_srcdir@/man4/images/netcdf_architecture.png \
@abs_top_srcdir@/man4/images/pnetcdf.png \
@abs_top_srcdir@/man4/images/deptree.jpg \
@abs_top_srcdir@/man4/images/InstallTreeWindows.jpg
IMAGE_PATH = @abs_top_srcdir@/docs/images/chunking2.png \
@abs_top_srcdir@/docs/images/compatibility3.png \
@abs_top_srcdir@/docs/images/compression.png \
@abs_top_srcdir@/docs/images/groups.png \
@abs_top_srcdir@/docs/images/nc4-model.png \
@abs_top_srcdir@/docs/images/ncatts.png \
@abs_top_srcdir@/docs/images/nc-classic-uml.png \
@abs_top_srcdir@/docs/images/nccoords.png \
@abs_top_srcdir@/docs/images/ncfile.png \
@abs_top_srcdir@/docs/images/netcdf_architecture.png \
@abs_top_srcdir@/docs/images/pnetcdf.png \
@abs_top_srcdir@/docs/images/deptree.jpg \
@abs_top_srcdir@/docs/images/InstallTreeWindows.jpg
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
@ -898,7 +912,7 @@ IMAGE_PATH = @abs_top_srcdir@/man4/images/chunking2.png \
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
INPUT_FILTER =
INPUT_FILTER =
# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
# basis. Doxygen will compare the file name with each pattern and apply the
@ -907,7 +921,7 @@ INPUT_FILTER =
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
FILTER_PATTERNS =
FILTER_PATTERNS =
# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
# INPUT_FILTER ) will also be used to filter the input files that are used for
@ -922,14 +936,14 @@ FILTER_SOURCE_FILES = NO
# *.ext= (so without naming a filter).
# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
FILTER_SOURCE_PATTERNS =
FILTER_SOURCE_PATTERNS =
# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
# is part of the input, its contents will be placed on the main page
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
USE_MDFILE_AS_MAINPAGE =
USE_MDFILE_AS_MAINPAGE =
#---------------------------------------------------------------------------
# Configuration options related to source browsing
@ -1034,7 +1048,7 @@ VERBATIM_HEADERS = YES
# specified with INPUT and INCLUDE_PATH.
# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
# CLANG_OPTIONS =
# CLANG_OPTIONS =
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
@ -1060,7 +1074,7 @@ COLS_IN_ALPHA_INDEX = 5
# while generating the index headers.
# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
IGNORE_PREFIX =
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
@ -1104,7 +1118,7 @@ HTML_FILE_EXTENSION = .html
# of the possible markers and block names see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_HEADER =
HTML_HEADER =
# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
# generated HTML page. If the tag is left blank doxygen will generate a standard
@ -1114,7 +1128,7 @@ HTML_HEADER =
# that doxygen normally uses.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_FOOTER = @abs_top_srcdir@/man4/footer.html
HTML_FOOTER = @abs_top_srcdir@/docs/footer.html
# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
# sheet that is used by each HTML page. It can be used to fine-tune the look of
@ -1126,7 +1140,7 @@ HTML_FOOTER = @abs_top_srcdir@/man4/footer.html
# obsolete.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_STYLESHEET =
HTML_STYLESHEET =
# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user-
# defined cascading style sheet that is included after the standard style sheets
@ -1137,7 +1151,7 @@ HTML_STYLESHEET =
# see the documentation.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_STYLESHEET =
HTML_EXTRA_STYLESHEET =
# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
# files or namespaces will be aligned in HTML using tables. If set to
@ -1153,7 +1167,7 @@ HTML_EXTRA_STYLESHEET =
# files will be copied as-is; there are no commands or markers available.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_EXTRA_FILES =
HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the stylesheet and background images according to
@ -1281,7 +1295,7 @@ GENERATE_HTMLHELP = NO
# written to the html output directory.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_FILE =
CHM_FILE =
# The HHC_LOCATION tag can be used to specify the location (absolute path
# including file name) of the HTML help compiler ( hhc.exe). If non-empty
@ -1289,7 +1303,7 @@ CHM_FILE =
# The file has to be specified with full path.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
HHC_LOCATION =
HHC_LOCATION =
# The GENERATE_CHI flag controls if a separate .chi index file is generated (
# YES) or that it should be included in the master .chm file ( NO).
@ -1302,7 +1316,7 @@ GENERATE_CHI = NO
# and project file content.
# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
CHM_INDEX_ENCODING =
CHM_INDEX_ENCODING =
# The BINARY_TOC flag controls whether a binary table of contents is generated (
# YES) or a normal table of contents ( NO) in the .chm file.
@ -1332,7 +1346,7 @@ GENERATE_QHP = NO
# the HTML output folder.
# This tag requires that the tag GENERATE_QHP is set to YES.
QCH_FILE =
QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
@ -1357,7 +1371,7 @@ QHP_VIRTUAL_FOLDER = doc
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_NAME =
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
@ -1365,21 +1379,21 @@ QHP_CUST_FILTER_NAME =
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_CUST_FILTER_ATTRS =
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's filter section matches. Qt Help Project / Filter Attributes (see:
# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
QHP_SECT_FILTER_ATTRS =
# The QHG_LOCATION tag can be used to specify the location of Qt's
# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
# generated .qhp file.
# This tag requires that the tag GENERATE_QHP is set to YES.
QHG_LOCATION =
QHG_LOCATION =
# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
# generated, together with the HTML files, they form an Eclipse help plugin. To
@ -1505,7 +1519,7 @@ MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_EXTENSIONS =
MATHJAX_EXTENSIONS =
# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
# of code that will be used on startup of the MathJax code. See the MathJax site
@ -1513,7 +1527,7 @@ MATHJAX_EXTENSIONS =
# example see the documentation.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_CODEFILE =
MATHJAX_CODEFILE =
# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
# the HTML output. The underlying search engine uses javascript and DHTML and
@ -1546,7 +1560,7 @@ SEARCHENGINE = YES
# The default value is: NO.
# This tag requires that the tag SEARCHENGINE is set to YES.
SERVER_BASED_SEARCH = NO
SERVER_BASED_SEARCH = @SERVER_SIDE_SEARCH@
# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
# script for searching. Instead the search results are written to an XML file
@ -1573,7 +1587,7 @@ EXTERNAL_SEARCH = NO
# Searching" for details.
# This tag requires that the tag SEARCHENGINE is set to YES.
SEARCHENGINE_URL =
SEARCHENGINE_URL =
# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
# search data is written to a file for indexing by an external tool. With the
@ -1589,7 +1603,7 @@ SEARCHDATA_FILE = searchdata.xml
# projects and redirect the results back to the right project.
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTERNAL_SEARCH_ID =
EXTERNAL_SEARCH_ID =
# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
# projects other than the one defined by this configuration file, but that are
@ -1599,7 +1613,7 @@ EXTERNAL_SEARCH_ID =
# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
# This tag requires that the tag SEARCHENGINE is set to YES.
EXTRA_SEARCH_MAPPINGS =
EXTRA_SEARCH_MAPPINGS =
#---------------------------------------------------------------------------
# Configuration options related to the LaTeX output
@ -1608,7 +1622,7 @@ EXTRA_SEARCH_MAPPINGS =
# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output.
# The default value is: YES.
GENERATE_LATEX = NO
GENERATE_LATEX = @NC_ENABLE_DOXYGEN_PDF_OUTPUT@
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
@ -1616,7 +1630,7 @@ GENERATE_LATEX = NO
# The default directory is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_OUTPUT = latex
LATEX_OUTPUT = latex_main
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked.
@ -1660,7 +1674,7 @@ PAPER_TYPE = a4wide
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
EXTRA_PACKAGES =
EXTRA_PACKAGES =
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
# generated LaTeX document. The header should contain everything until the first
@ -1676,7 +1690,7 @@ EXTRA_PACKAGES =
# PROJECT_NAME), or the project number (see PROJECT_NUMBER).
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_HEADER =
LATEX_HEADER =
# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
# generated LaTeX document. The footer should contain everything after the last
@ -1685,7 +1699,7 @@ LATEX_HEADER =
# Note: Only use a user-defined footer if you know what you are doing!
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_FOOTER =
LATEX_FOOTER =
# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
# other source files which should be copied to the LATEX_OUTPUT output
@ -1693,7 +1707,7 @@ LATEX_FOOTER =
# markers available.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EXTRA_FILES =
LATEX_EXTRA_FILES =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
@ -1793,14 +1807,14 @@ RTF_HYPERLINKS = NO
# default style sheet that doxygen normally uses.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_STYLESHEET_FILE =
RTF_STYLESHEET_FILE =
# Set optional variables used in the generation of an RTF document. Syntax is
# similar to doxygen's config file. A template extensions file can be generated
# using doxygen -e rtf extensionFile.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_EXTENSIONS_FILE =
RTF_EXTENSIONS_FILE =
#---------------------------------------------------------------------------
# Configuration options related to the man page output
@ -1857,18 +1871,6 @@ GENERATE_XML = NO
XML_OUTPUT = xml
# The XML_SCHEMA tag can be used to specify a XML schema, which can be used by a
# validating XML parser to check the syntax of the XML files.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_SCHEMA =
# The XML_DTD tag can be used to specify a XML DTD, which can be used by a
# validating XML parser to check the syntax of the XML files.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_DTD =
# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program
# listings (including syntax highlighting and cross-referencing information) to
# the XML output. Note that enabling this will significantly increase the size
@ -1944,7 +1946,7 @@ PERLMOD_PRETTY = YES
# overwrite each other's variables.
# This tag requires that the tag GENERATE_PERLMOD is set to YES.
PERLMOD_MAKEVAR_PREFIX =
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
@ -1985,7 +1987,7 @@ SEARCH_INCLUDES = YES
# preprocessor.
# This tag requires that the tag SEARCH_INCLUDES is set to YES.
INCLUDE_PATH =
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
@ -1993,7 +1995,7 @@ INCLUDE_PATH =
# used.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
INCLUDE_FILE_PATTERNS =
INCLUDE_FILE_PATTERNS =
# The PREDEFINED tag can be used to specify one or more macro names that are
# defined before the preprocessor is started (similar to the -D option of e.g.
@ -2012,7 +2014,7 @@ PREDEFINED = USE_NETCDF4
# definition found in the source code.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_AS_DEFINED =
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
# remove all refrences to function-like macros that are alone on a line, have an
@ -2041,13 +2043,13 @@ SKIP_FUNCTION_MACROS = YES
# the path). If a tag file is not located in the directory in which doxygen is
# run, you must also specify the path to the tagfile here.
TAGFILES = @CMAKE_CURRENT_BINARY_DIR@/guide.tag=html_guide/ "@CMAKE_CURRENT_BINARY_DIR@/tutorial.tag = html_tutorial/"
# TAGFILES = html/guide.tag=./html_guide "html/tutorial.tag = ./html_tutorial"
# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
# tag file that is based on the input files it reads. See section "Linking to
# external documentation" for more information about the usage of tag files.
GENERATE_TAGFILE = @CMAKE_CURRENT_BINARY_DIR@/main.tag
# GENERATE_TAGFILE = html/main.tag
# If the ALLEXTERNALS tag is set to YES all external class will be listed in the
# class index. If set to NO only the inherited external classes will be listed.
@ -2095,14 +2097,14 @@ CLASS_DIAGRAMS = YES
# the mscgen tool resides. If left empty the tool is assumed to be found in the
# default search path.
MSCGEN_PATH =
MSCGEN_PATH =
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
# If left empty dia is assumed to be found in the default search path.
DIA_PATH =
DIA_PATH =
# If set to YES, the inheritance and collaboration graphs will hide inheritance
# and usage relations if the target is undocumented or is not a class.
@ -2151,7 +2153,7 @@ DOT_FONTSIZE = 10
# the path where dot can find it using this tag.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_FONTPATH =
DOT_FONTPATH =
# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
# each documented class showing the direct and indirect inheritance relations.
@ -2289,26 +2291,26 @@ INTERACTIVE_SVG = NO
# found. If left blank, it is assumed the dot tool can be found in the path.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_PATH =
DOT_PATH =
# The DOTFILE_DIRS tag can be used to specify one or more directories that
# contain dot files that are included in the documentation (see the \dotfile
# command).
# This tag requires that the tag HAVE_DOT is set to YES.
DOTFILE_DIRS =
DOTFILE_DIRS =
# The MSCFILE_DIRS tag can be used to specify one or more directories that
# contain msc files that are included in the documentation (see the \mscfile
# command).
MSCFILE_DIRS =
MSCFILE_DIRS =
# The DIAFILE_DIRS tag can be used to specify one or more directories that
# contain dia files that are included in the documentation (see the \diafile
# command).
DIAFILE_DIRS =
DIAFILE_DIRS =
# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
# that will be shown in the graph. If the number of nodes in a graph becomes

2029
docs/FAQ.md Normal file

File diff suppressed because it is too large Load Diff

View File

@ -8,7 +8,9 @@ EXTRA_DIST = netcdf.m4 DoxygenLayout.xml Doxyfile.in footer.html \
mainpage.dox tutorial.dox install.dox dispatch.dox \
guide.dox types.dox notes.dox cdl.dox \
architecture.dox internal.dox windows-binaries.md \
building-with-cmake.md CMakeLists.txt
building-with-cmake.md CMakeLists.txt \
groups.dox install.md install-fortran.md \
all-error-codes.md cmake_faq.md
# Turn off parallel builds in this directory.
.NOTPARALLEL:
@ -22,7 +24,7 @@ if BUILD_DOCS
# Copy man pages.
#directory = $(top_srcdir)/docs/man/man3/
#dist_man_MANS = $(man4directory)/man_page_1.3 $(directory)/man_page_2.3
#dist_man_MANS = $(docsdirectory)/man_page_1.3 $(directory)/man_page_2.3
# $(directory)/man_page_1.3: doxyfile.stamp
# $(directory)/man_page_2.3: doxyfile.stamp
@ -36,12 +38,16 @@ if BUILD_DOCS
all-local: doxyfile.stamp
clean-local:
rm -rf $(top_builddir)/man4/man
rm -rf $(top_builddir)/man4/html
rm -rf $(top_builddir)/man4/latex
endif
rm -rf $(top_builddir)/docs/man
rm -rf $(top_builddir)/docs/html
rm -rf $(top_builddir)/docs/latex
endif
# Timestamp to prevent rebuilds.
# We must do this twice. The first time
# builds the tag files. The second time
# includes them in the documentation.
doxyfile.stamp:
$(DOXYGEN) Doxyfile
echo Timestamp > doxyfile.stamp

View File

@ -3,49 +3,47 @@ NetCDF Error Code Listing {#nc-error-codes}
\ingroup error
\todo Review list for completeness.
\tableofcontents
# NetCDF-3 Error Codes {#nc3-error-codes}
~~~~
#define NC_NOERR 0 // No Error
#define NC_EBADID (-33) // Not a netcdf id
#define NC_ENFILE (-34) // Too many netcdfs open
#define NC_EEXIST (-35) // netcdf file exists && NC_NOCLOBBER
#define NC_EINVAL (-36) // Invalid Argument
#define NC_EPERM (-37) // Write to read only
#define NC_ENOTINDEFINE (-38) // Operation not allowed in data mode
#define NC_EINDEFINE (-39) // Operation not allowed in define mode
#define NC_EINVALCOORDS (-40) // Index exceeds dimension bound
#define NC_EMAXDIMS (-41) // NC_MAX_DIMS exceeded
#define NC_ENAMEINUSE (-42) // String match to name in use
#define NC_ENOTATT (-43) // Attribute not found
#define NC_EMAXATTS (-44) // NC_MAX_ATTRS exceeded
#define NC_EBADTYPE (-45) // Not a netcdf data type
#define NC_EBADDIM (-46) // Invalid dimension id or name
#define NC_EUNLIMPOS (-47) // NC_UNLIMITED in the wrong index
#define NC_EMAXVARS (-48) // NC_MAX_VARS exceeded
#define NC_ENOTVAR (-49) // Variable not found
#define NC_EGLOBAL (-50) // Action prohibited on NC_GLOBAL varid
#define NC_ENOTNC (-51) // Not a netcdf file
#define NC_ESTS (-52) // In Fortran, string too short
#define NC_EMAXNAME (-53) // NC_MAX_NAME exceeded
#define NC_EUNLIMIT (-54) // NC_UNLIMITED size already in use
#define NC_ENORECVARS (-55) // nc_rec op when there are no record vars
#define NC_ECHAR (-56) // Attempt to convert between text & numbers
#define NC_EEDGE (-57) // Edge+start exceeds dimension bound
#define NC_ESTRIDE (-58) // Illegal stride
#define NC_EBADNAME (-59) // Attribute or variable name contains illegal characters
#define NC_NOERR 0 // No Error
#define NC_EBADID (-33) // Not a netcdf id
#define NC_ENFILE (-34) // Too many netcdfs open
#define NC_EEXIST (-35) // netcdf file exists && NC_NOCLOBBER
#define NC_EINVAL (-36) // Invalid Argument
#define NC_EPERM (-37) // Write to read only
#define NC_ENOTINDEFINE (-38) // Operation not allowed in data mode
#define NC_EINDEFINE (-39) // Operation not allowed in define mode
#define NC_EINVALCOORDS (-40) // Index exceeds dimension bound
#define NC_EMAXDIMS (-41) // NC_MAX_DIMS exceeded
#define NC_ENAMEINUSE (-42) // String match to name in use
#define NC_ENOTATT (-43) // Attribute not found
#define NC_EMAXATTS (-44) // NC_MAX_ATTRS exceeded
#define NC_EBADTYPE (-45) // Not a netcdf data type
#define NC_EBADDIM (-46) // Invalid dimension id or name
#define NC_EUNLIMPOS (-47) // NC_UNLIMITED in the wrong index
#define NC_EMAXVARS (-48) // NC_MAX_VARS exceeded
#define NC_ENOTVAR (-49) // Variable not found
#define NC_EGLOBAL (-50) // Action prohibited on NC_GLOBAL varid
#define NC_ENOTNC (-51) // Not a netcdf file
#define NC_ESTS (-52) // In Fortran, string too short
#define NC_EMAXNAME (-53) // NC_MAX_NAME exceeded
#define NC_EUNLIMIT (-54) // NC_UNLIMITED size already in use
#define NC_ENORECVARS (-55) // nc_rec op when there are no record vars
#define NC_ECHAR (-56) // Attempt to convert between text & numbers
#define NC_EEDGE (-57) // Edge+start exceeds dimension bound
#define NC_ESTRIDE (-58) // Illegal stride
#define NC_EBADNAME (-59) // Attribute or variable name contains illegal characters
// N.B. following must match value in ncx.h
// N.B. following must match value in ncx.h
#define NC_ERANGE (-60) // Math result not representable
#define NC_ENOMEM (-61) // Memory allocation (malloc) failure
#define NC_EVARSIZE (-62) // One or more variable sizes violate format constraints
#define NC_EDIMSIZE (-63) // Invalid dimension size
#define NC_ETRUNC (-64) // File likely truncated or possibly corrupted
#define NC_ERANGE (-60) // Math result not representable
#define NC_ENOMEM (-61) // Memory allocation (malloc) failure
#define NC_EVARSIZE (-62) // One or more variable sizes violate format constraints
#define NC_EDIMSIZE (-63) // Invalid dimension size
#define NC_ETRUNC (-64) // File likely truncated or possibly corrupted
~~~~
# NetCDF-4 Error Codes {#nc4-error-codes}
@ -66,10 +64,10 @@ were added for new errors unique to netCDF-4.
#define NC_ENOCOMPOUND (-109)
#define NC_EATTEXISTS (-110)
#define NC_ENOTNC4 (-111) // Attempting netcdf-4 operation on netcdf-3 file.
#define NC_ESTRICTNC3 (-112) // Attempting netcdf-4 operation on strict nc3 netcdf-4 file.
#define NC_EBADGRPID (-113) // Bad group id. Bad!
#define NC_EBADTYPEID (-114) // Bad type id.
#define NC_EBADFIELDID (-115) // Bad field id.
#define NC_ESTRICTNC3 (-112) // Attempting netcdf-4 operation on strict nc3 netcdf-4 file.
#define NC_EBADGRPID (-113) // Bad group id. Bad!
#define NC_EBADTYPEID (-114) // Bad type id.
#define NC_EBADFIELDID (-115) // Bad field id.
#define NC_EUNKNAME (-116)
~~~~
@ -79,25 +77,24 @@ If the DAP client is enabled, then the following additional error codes
may occur.
~~~~
#define NC_EDAP (-66) // Generic DAP error
#define NC_ECURL (-67) // Generic libcurl error
#define NC_EIO (-68) // Generic IO error
#define NC_ENODATA (-69) // Attempt to access variable with no data
#define NC_EDAPSVC (-70) // DAP Server side error
#define NC_EDAS (-71) // Malformed or inaccessible DAS
#define NC_EDDS (-72) // Malformed or inaccessible DDS
#define NC_EDATADDS (-73) // Malformed or inaccessible DATADDS
#define NC_EDAPURL (-74) // Malformed DAP URL
#define NC_EDAP (-66) // Generic DAP error
#define NC_ECURL (-67) // Generic libcurl error
#define NC_EIO (-68) // Generic IO error
#define NC_ENODATA (-69) // Attempt to access variable with no data
#define NC_EDAPSVC (-70) // DAP Server side error
#define NC_EDAS (-71) // Malformed or inaccessible DAS
#define NC_EDDS (-72) // Malformed or inaccessible DDS
#define NC_EDATADDS (-73) // Malformed or inaccessible DATADDS
#define NC_EDAPURL (-74) // Malformed DAP URL
#define NC_EDAPCONSTRAINT (-75) // Malformed DAP Constraint
#define NC_EDAP (-66) // Generic DAP error
#define NC_ECURL (-67) // Generic libcurl error
#define NC_EIO (-68) // Generic IO error
#define NC_ENODATA (-69) // Attempt to access variable with no data
#define NC_EDAPSVC (-70) // DAP Server side error
#define NC_EDAS (-71) // Malformed or inaccessible DAS
#define NC_EDDS (-72) // Malformed or inaccessible DDS
#define NC_EDATADDS (-73) // Malformed or inaccessible DATADDS
#define NC_EDAPURL (-74) // Malformed DAP URL
#define NC_EDAP (-66) // Generic DAP error
#define NC_ECURL (-67) // Generic libcurl error
#define NC_EIO (-68) // Generic IO error
#define NC_ENODATA (-69) // Attempt to access variable with no data
#define NC_EDAPSVC (-70) // DAP Server side error
#define NC_EDAS (-71) // Malformed or inaccessible DAS
#define NC_EDDS (-72) // Malformed or inaccessible DDS
#define NC_EDATADDS (-73) // Malformed or inaccessible DATADDS
#define NC_EDAPURL (-74) // Malformed DAP URL
#define NC_EDAPCONSTRAINT (-75) // Malformed DAP Constraint
~~~~

20
docs/architecture.dox Normal file
View File

@ -0,0 +1,20 @@
/** \file
Documentation of netCDF architecture.
\page architecture NetCDF Library Architecture
\image html netcdf_architecture.png "NetCDF Architecture"
\image latex netcdf_architecture.png "NetCDF Architecture"
\image rtf netcdf_architecture.png "NetCDF Architecture"
The netCDF C-based libraries depend on a core C library and some externally developed libraries.
- NetCDF-Java is an independent implementation, not shown here
- C-based 3rd-party netCDF APIs for other languages include Python, Ruby, Perl, Fortran-2003, MATLAB, IDL, and R
- Libraries that don't support netCDF-4 include Perl and old C++
- 3rd party libraries are optional (HDF5, HDF4, zlib, szlib, pnetcdf, libcurl), depending on what features are needed and how netCDF is configured
- "Apps" in the above means applications, not mobile apps!
*/

View File

@ -14,7 +14,7 @@ In addition to providing new build options for netCDF-C, we will also provide pr
The following packages are required to build netCDF-C using CMake.
* netCDF-C Source Code
* CMake version 2.8.9 or greater.
* CMake version 2.8.12 or greater.
* Optional Requirements:
* HDF5 Libraries for netCDF4/HDF5 support.
* libcurl for DAP support.

View File

@ -4,7 +4,7 @@ Documentation for Common Data Language
\page CDL
\section CDL Syntax
\section cdl_syntax CDL Syntax
Below is an example of CDL, describing a netCDF dataset with several
named dimensions (lat, lon, time), variables (z, t, p, rh, lat, lon,
@ -23,12 +23,12 @@ data.
double p(time,lat,lon);
int rh(time,lat,lon);
lat:units = "degrees_north";
char lat:units = "degrees_north";
lon:units = "degrees_east";
time:units = "seconds";
z:units = "meters";
z:valid_range = 0., 5000.;
p:_FillValue = -9999.;
float z:valid_range = 0., 5000.;
double p:_FillValue = -9999.;
rh:_FillValue = -1;
data:
@ -75,18 +75,26 @@ type, a length, and a value. In contrast to variables that are
intended for data, attributes are intended for ancillary data or
metadata (data about data).
In CDL, an attribute is designated by a variable and attribute name,
separated by a colon (':'). It is possible to assign global attributes
to the netCDF dataset as a whole by omitting the variable name and
beginning the attribute name with a colon (':'). The data type of an
attribute in CDL, if not explicitly specified, is derived from the
type of the value assigned to it. The length of an attribute is the
number of data values or the number of characters in the character
string assigned to it. Multiple values are assigned to non-character
attributes by separating the values with commas (','). All values
assigned to an attribute must be of the same type. In the netCDF-4
enhanced model, attributes may be declared to be of user-defined type,
like variables.
In CDL, an attribute is designated by a data type, a
variable, and an attribute name. The variable and the
attribute name are separated by a colon (':'). If present,
the data type precedes the variable name. It is possible to
assign global attributes to the netCDF dataset as a whole by
omitting the variable name and beginning the attribute name
with a colon (':'). The data type of an attribute in CDL, if
not explicitly specified, is derived from the type of the
value assigned to it, with one exception. If the value is a
string, then the inferred type is char, not string. If it
is desired to have a string typed attribute, this must be
stated explicitly.
The length of an attribute is the number of data values or
the number of characters in the character string assigned to
it if the type is char. Multiple values are assigned to
non-character attributes by separating the values with
commas (','). All values assigned to an attribute must be of
the same type. In the netCDF-4 enhanced model, attributes
may be declared to be of user-defined type, like variables.
In CDL, just as for netCDF, the names of dimensions, variables and
attributes (and, in netCDF-4 files, groups, user-defined types,
@ -136,7 +144,7 @@ conversions among primitive types are supported.
A special notation for fill values is supported: the _ character
designates a fill value for variables.
\section CDL Data Types
\section cdl_data_types CDL Data Types
The CDL primitive data types for the classic model are:
- char Characters.
@ -176,7 +184,7 @@ floating-point numbers. The double type can hold values between about
standard normalized double-precision, floating-point numbers. The
string type holds variable length strings.
\section CDL Notation for Data Constants
\section cdl_notations_for_data_constants CDL Notation for Data Constants
This section describes the CDL notation for constants.
@ -268,4 +276,4 @@ acceptable double constants:
1.d
\endcode
*/
*/

View File

@ -69,4 +69,4 @@ Below are a list of commonly-asked questions regarding NetCDF and CMake.
-DHDF5_INCLUDE_DIR=/usr/include/openmpi-x86_64 \
You will, of course, need to use the location of the libraries specific to your development environment.
You will, of course, need to use the location of the libraries specific to your development environment.

View File

@ -1,5 +1,5 @@
<html><!-- InstanceBegin template="/Templates/MyUnidata.dwt" codeOutsideHTMLIsLocked="true" -->
<!-- This document is kept in netcdf-c/man4;
<!-- This document is kept in netcdf-c/docs;
it is not normally included in the distribution.
-->
<head>

5
docs/groups.dox Normal file
View File

@ -0,0 +1,5 @@
/** \defgroup lib_version Library Version
Functions related to querying the library version.
*/

View File

@ -1,9 +1,7 @@
/*! \file guide.dox The NetCDF User's Guide
/*! \page guide.dox The NetCDF User's Guide
\brief The NetCDF User's Guide
\mainpage
\tableofcontents
\section user_guide The NetCDF User's Guide
@ -51,7 +49,7 @@ that makes the data useful.
This User's Guide presents the netCDF data model. It explains how the
netCDF data model uses dimensions, variables, and attributes to store
data.
data.
Reference documentation for UNIX systems, in the form of UNIX 'man'
pages for the C and FORTRAN interfaces is also available at the netCDF
@ -213,7 +211,7 @@ beginning of the file. In this documentation this format is called
“64-bit offset format.”
Since 64-bit offset format was introduced in version 3.6.0, earlier
versions of the netCDF library can't read 64-bit offset files.
versions of the netCDF library can't read 64-bit offset files.
\subsection netcdf_4_format NetCDF-4 Format
@ -734,7 +732,7 @@ enumeration symbols) consist of arbitrary sequences of alphanumeric
characters, underscore '_', period '.', plus '+', hyphen '-', or at
sign '@', but beginning with an alphanumeric character or
underscore. However names commencing with underscore are reserved for
system use.
system use.
Beginning with versions 3.6.3 and 4.0, names may also include UTF-8
encoded Unicode characters as well as other special characters, except
@ -742,11 +740,11 @@ for the character '/', which may not appear in a name.
Names that have trailing space characters are also not permitted.
Case is significant in netCDF names.
Case is significant in netCDF names.
\subsection Name Length
A zero-length name is not allowed.
A zero-length name is not allowed.
Names longer than ::NC_MAX_NAME will not be accepted any netCDF define
function. An error of ::NC_EMAXNAME will be returned.
@ -759,7 +757,7 @@ characters.
\subsection NetCDF Conventions
Some widely used conventions restrict names to only alphanumeric
characters or underscores.
characters or underscores.
\section archival Is NetCDF a Good Archive Format?
@ -783,7 +781,7 @@ and written compressed.
Attribute conventions are assumed by some netCDF generic applications,
e.g., units as the name for a string attribute that gives the units
for a netCDF variable.
for a netCDF variable.
It is strongly recommended that applicable conventions be followed
unless there are good reasons for not doing so. Below we list the
@ -890,7 +888,7 @@ If valid values are specified using the valid_min, valid_max,
valid_range, or _FillValue attributes, those values should be
specified in the domain of the data in the file (the packed data), so
that they can be interpreted before the scale_factor and add_offset
are applied.
are applied.
\subsection add_offset Add Offset
@ -932,7 +930,7 @@ values should be treated as signed or unsigned. The attributes
valid_min and valid_max may be used for this purpose. For example, if
you intend that a byte variable store only non-negative values, you
can use valid_min = 0 and valid_max = 255. This attribute is ignored
by the netCDF library.
by the netCDF library.
\subsection C_format C Format
@ -945,7 +943,7 @@ be appropriate to define the C_format attribute as "%.3g". The ncdump
utility program uses this attribute for variables for which it is
defined. The format applies to the scaled (internal) type and value,
regardless of the presence of the scaling attributes scale_factor and
add_offset.
add_offset.
\subsection FORTRAN_format FORTRAN format
@ -957,7 +955,7 @@ be appropriate to define the FORTRAN_format attribute as "(G10.3)".
\subsection title Title
A global attribute that is a character array providing a succinct
description of what is in the dataset.
description of what is in the dataset.
\subsection history History
@ -965,7 +963,7 @@ A global attribute for an audit trail. This is a character array with
a line for each invocation of a program that has modified the
dataset. Well-behaved generic netCDF applications should append a line
containing: date, time of day, user name, program name and command
arguments.
arguments.
\subsection Conventions Conventions
@ -1349,9 +1347,9 @@ declared as:
...
float temp[TIMES*LEVELS*LATS*LONS];
\endcode
to keep the data in a one-dimensional array, or
\code
...
float temp[TIMES][LEVELS][LATS][LONS];
@ -1377,9 +1375,9 @@ lon, varying fastest:
temp[0][1][0][1]
temp[0][1][0][2]
temp[0][1][0][3]
...
temp[2][1][4][7]
temp[2][1][4][8]
temp[2][1][4][9]
@ -2011,12 +2009,12 @@ For illustrative purposes, the following example will be used.
Dataset {
Int32 f1;
Structure {
Int32 f11;
Int32 f11;
Structure {
Int32 f1[3];
Int32 f2;
} FS2[2];
} S1;
} FS2[2];
} S1;
Structure {
Grid {
Array:
@ -2054,7 +2052,7 @@ within grids are left out in order to mimic the behavior of libnc-dap.
S2.G1.temp
S2.G2.G2
lat
lon
lon
\endcode
\subsection var_dim_trans Variable Dimension Translation
@ -2090,7 +2088,7 @@ variables.
S2.G2.lat -> S2.G2.lat[lat=2]
S2.G2.lon -> S2.G2.lon[lon=2]
lat -> lat[lat=2]
lon -> lon[lon=2]
lon -> lon[lon=2]
\endcode
Collect all of the dimension specifications from the DDS, both named
@ -2105,7 +2103,7 @@ example, this would create the following dimensions.
S1.FS2.f1_1 = 3 ;
S1.FS2.f2_0 = 2 ;
S2.G2.lat_0 = 2 ;
S2.G2.lon_0 = 2 ;
S2.G2.lon_0 = 2 ;
\endcode
If however, the anonymous dimension is the single dimension of a MAP
@ -2114,7 +2112,7 @@ vector This leads to the following.
\code
S2.G2.lat_0 -> S2.G2.lat
S2.G2.lon_0 -> S2.G2.lon
S2.G2.lon_0 -> S2.G2.lon
\endcode
For each unique named dimension "<name>=NN", create a netCDF dimension
@ -2124,7 +2122,7 @@ duplicates are ignored. This produces the following.
\code
S2.G2.lat -> lat
S2.G2.lon -> lon
S2.G2.lon -> lon
\endcode
Note that this produces duplicates that will be ignored later.
@ -2166,16 +2164,16 @@ following fields would be collected.
S2.G1.temp
S2.G2.G2
lat
lon
lon
\endcode
All grid array variables are renamed to be the same as the containing
grid and the grid prefix is removed. In the above DDS, this results in
the following changes.
\code
\code
G1.temp -> G1
G2.G2 -> G2
G2.G2 -> G2
\endcode
It is important to note that this process could produce duplicate
@ -2254,7 +2252,7 @@ Dataset {
Int32 f1[3];
Int32 f2;
} SQ1;
} S1[2];
} S1[2];
Sequence {
Structure {
Int32 x1[7];
@ -2428,56 +2426,56 @@ follows.
HTTP.VERBOSE
Type: boolean ("1"/"0")
Description: Produce verbose output, especially using SSL.
Related CURL Flags: CURLOPT_VERBOSE
Related CURL Flags: CURLOPT_VERBOSE
HTTP.DEFLATE
Type: boolean ("1"/"0")
Description: Allow use of compression by the server.
Related CURL Flags: CURLOPT_ENCODING
Related CURL Flags: CURLOPT_ENCODING
HTTP.COOKIEJAR
Type: String representing file path
Description: Specify the name of file into which to store cookies. Defaults to in-memory storage.
Related CURL Flags:CURLOPT_COOKIEJAR
Related CURL Flags:CURLOPT_COOKIEJAR
HTTP.COOKIEFILE
Type: String representing file path
Description: Same as HTTP.COOKIEJAR.
Related CURL Flags: CURLOPT_COOKIEFILE
Related CURL Flags: CURLOPT_COOKIEFILE
HTTP.CREDENTIALS.USER
Type: String representing user name
Description: Specify the user name for Digest and Basic authentication.
Related CURL Flags:
Related CURL Flags:
HTTP.CREDENTIALS.PASSWORD
Type: String representing password
Type: boolean ("1"/"0")
Description: Specify the password for Digest and Basic authentication.
Related CURL Flags:
Related CURL Flags:
HTTP.SSL.CERTIFICATE
Type: String representing file path
Description: Path to a file containing a PEM cerficate.
Related CURL Flags: CURLOPT_CERT
Related CURL Flags: CURLOPT_CERT
HTTP.SSL.KEY
Type: String representing file path
Description: Same as HTTP.SSL.CERTIFICATE, and should usually have the same value.
Related CURL Flags: CURLOPT_SSLKEY
Related CURL Flags: CURLOPT_SSLKEY
HTTP.SSL.KEYPASSWORD
Type: String representing password
Description: Password for accessing the HTTP.SSL.KEY/HTTP.SSL.CERTIFICATE
Related CURL Flags: CURLOPT_KEYPASSWORD
Related CURL Flags: CURLOPT_KEYPASSWORD
HTTP.SSL.CAPATH
Type: String representing directory
Description: Path to a directory containing trusted certificates for validating server sertificates.
Related CURL Flags: CURLOPT_CAPATH
Related CURL Flags: CURLOPT_CAPATH
HTTP.SSL.VALIDATE
Type: boolean ("1"/"0")
Description: Cause the client to verify the server's presented certificate.
Related CURL Flags: CURLOPT_SSL_VERIFYPEER, CURLOPT_SSL_VERIFYHOST
Related CURL Flags: CURLOPT_SSL_VERIFYPEER, CURLOPT_SSL_VERIFYHOST
HTTP.TIMEOUT
Type: String ("dddddd")
Description: Specify the maximum time in seconds that you allow the http transfer operation to take.
Related CURL Flags: CURLOPT_TIMEOUT, CURLOPT_NOSIGNAL
Related CURL Flags: CURLOPT_TIMEOUT, CURLOPT_NOSIGNAL
HTTP.PROXY_SERVER
Type: String representing url to access the proxy: (e.g.http://[username:password@]host[:port])
Description: Specify the needed information for accessing a proxy.
Related CURL Flags: CURLOPT_PROXY, CURLOPT_PROXYHOST, CURLOPT_PROXYUSERPWD
Related CURL Flags: CURLOPT_PROXY, CURLOPT_PROXYHOST, CURLOPT_PROXYUSERPWD
</pre>
The related curl flags line indicates the curl flags modified by this
@ -2491,7 +2489,7 @@ For ESG client side key support, the following entries must be specified:
HTTP.COOKIEJAR
HTTP.SSL.CERTIFICATE
HTTP.SSL.KEY
HTTP.SSL.CAPATH
HTTP.SSL.CAPATH
\endcode
Additionally, for ESG, the HTTP.SSL.CERTIFICATE and HTTP.SSL.KEY
@ -2608,15 +2606,15 @@ in nc_test4/run_bm_elena.sh.
<pre>
#!/bin/sh
# This shell runs some benchmarks that Elena ran as described here:
# http://hdfeos.org/workshops/ws06/presentations/Pourmal/HDF5_IO_Perf.pdf
# $Id: netcdf.texi,v 1.82 2010/05/15 20:43:13 dmh Exp $
set -e
echo ""
echo "*** Testing the benchmarking program bm_file for simple float file, no compression..."
./bm_file -h -d -f 3 -o tst_elena_out.nc -c 0:-1:0:1024:16:256 tst_elena_int_3D.nc
./bm_file -d -f 3 -o tst_elena_out.nc -c 0:-1:0:1024:256:256 tst_elena_int_3D.nc
@ -2625,7 +2623,7 @@ in nc_test4/run_bm_elena.sh.
./bm_file -d -f 3 -o tst_elena_out.nc -c 0:-1:0:256:64:256 tst_elena_int_3D.nc
./bm_file -d -f 3 -o tst_elena_out.nc -c 0:-1:0:256:256:256 tst_elena_int_3D.nc
echo '*** SUCCESS!!!'
exit 0
</pre>
@ -2669,16 +2667,16 @@ data.
\code
netcdf foo { // example netCDF specification in CDL
dimensions:
lat = 10, lon = 5, time = unlimited;
variables:
int lat(lat), lon(lon), time(time);
float z(time,lat,lon), t(time,lat,lon);
double p(time,lat,lon);
int rh(time,lat,lon);
lat:units = "degrees_north";
lon:units = "degrees_east";
time:units = "seconds";
@ -2686,7 +2684,7 @@ data.
z:valid_range = 0., 5000.;
p:_FillValue = -9999.;
rh:_FillValue = -1;
data:
lat = 0, 10, 20, 30, 40, 50, 60, 70, 80, 90;
lon = -140, -118, -96, -84, -52;
@ -2810,7 +2808,7 @@ The CDL primitive data types for the classic model are:
- long - (Deprecated, synonymous with int)
- float - IEEE single-precision floating point (32 bits).
- real - (Synonymous with float).
- double - IEEE double-precision floating point (64 bits).
- double - IEEE double-precision floating point (64 bits).
NetCDF-4 supports the additional primitive types:
- ubyte - Unsigned eight-bit integers.
@ -2818,7 +2816,7 @@ NetCDF-4 supports the additional primitive types:
- uint - Unsigned 32-bit integers.
- int64 - 64-bit singed integers.
- uint64 - Unsigned 64-bit singed integers.
- string - Variable-length string of characters
- string - Variable-length string of characters
Except for the added numeric data-types byte and ubyte, CDL supports
the same numeric primitive
@ -2984,7 +2982,7 @@ extended as necessary.
The ncgen man-page reference has more details about CDL representation
of constants of user-defined types.
\section guide_ncdump ncdump
\section guide_ncdump ncdump
Convert NetCDF file to text form (CDL)
@ -3047,7 +3045,7 @@ be appropriate to use the variable attribute
\subsection ncdump_OPTIONS ncdump options
@par -c
@par -c
Show the values of \e coordinate \e variables (1D variables with the
same names as dimensions) as well as the declarations of all
dimensions, variables, attribute values, groups, and user-defined
@ -3063,9 +3061,9 @@ for any variables. The output is identical to using the '-c' option
except that the values of coordinate variables are not included. (At
most one of '-c' or '-h' options may be present.)
@par -v \a var1,...
@par -v \a var1,...
@par
@par
The output will include data values for the specified variables, in
addition to the declarations of all dimensions, variables, and
attributes. One or more variables must be specified by name in the
@ -3156,7 +3154,7 @@ than the logical schema of the data. All the special virtual
attributes begin with '_' followed by an upper-case
letter. Currently they include the global attribute '_Format' and
the variable attributes '_ChunkSizes', '_DeflateLevel',
'_Endianness', '_Fletcher32', '_NoFill', '_Shuffle', and '_Storage'.
'_Endianness', '_Fletcher32', '_NoFill', '_Shuffle', and '_Storage'.
The \b ncgen utility recognizes these attributes and
supports them appropriately.
@ -3231,7 +3229,7 @@ omega:
Examine the translated DDS for the DAP source from the specified URL:
\code
ncdump -h http://test.opendap.org:8080/dods/dts/test.01
ncdump -h http://test.opendap.org:8080/dods/dts/test.01
\endcode
Without dumping all the data, show the special virtual attributes that indicate
@ -3372,7 +3370,7 @@ each specifying a dimension name, a '/' character, and optionally the
corresponding chunk length for that dimension. No blanks should
appear in the chunkspec string, except possibly escaped blanks that
are part of a dimension name. A chunkspec names dimensions along
which chunking is to take place, and omits dimensions which are
which chunking is to take place, and omits dimensions which are
not to be chunked or for
which the default chunk length is desired. If a dimension name is
followed by a '/' character but no subsequent chunk length, the actual
@ -3381,7 +3379,7 @@ netCDF-4 output file and not naming all dimensions in the chunkspec,
unnamed dimensions will also use the actual dimension length for the
chunk length. An example of a chunkspec for variables that use
'm' and 'n' dimensions might be 'm/100,n/200' to specify 100 by 200
chunks.
chunks.
\par
The chunkspec '/' that omits all dimension names and
@ -3402,9 +3400,9 @@ programming interface has no such restriction. If you need to
customize chunking for variables independently, you will need to use
the library API in a custom utility program.
\par -v \a var1,...
\par -v \a var1,...
\par
\par
The output will include data values for the specified variables, in
addition to the declarations of all dimensions, variables, and
attributes. One or more variables must be specified by name in the
@ -3418,9 +3416,9 @@ file may be specified with an absolute path name, such as
default, without this option, is to include data values for \e all variables
in the output.
\par -V \a var1,...
\par -V \a var1,...
\par
\par
The output will include the specified variables only but all dimensions and
global or group attributes. One or more variables must be specified by name in the
comma-delimited list following this option. The list must be a single argument
@ -3485,7 +3483,7 @@ better performance, if the output fits in memory.
\par
For netCDF-4 output, including netCDF-4 classic model, specifies
number of chunks that the chunk cache can hold. A suffix of K, M, G,
or T multiplies the number of chunks that can be held in the cache
or T multiplies the number of chunks that can be held in the cache
by one thousand, million, billion, or trillion, respectively. This is not a
property of the file, but merely a performance tuning parameter for
avoiding compressing or decompressing the same data multiple times
@ -3640,7 +3638,7 @@ arguments to the -k flag can be as follows.
1, classic Produce a netcdf classic file format file.
2, 64-bit-offset, '64-bit offset' Produce a netcdf 64 bit classic file format file.
3, hdf5, netCDF-4, enhanced Produce a netcdf-4 format file.
4, hdf5-nc3, 'netCDF-4 classic model', enhanced-nc3 Produce a netcdf-4 file format, but restricted to netcdf-3 classic CDL input.
4, hdf5-nc3, 'netCDF-4 classic model', enhanced-nc3 Produce a netcdf-4 file format, but restricted to netcdf-3 classic CDL input.
Note that the -v flag is a deprecated alias for -k.
@ -4094,7 +4092,7 @@ components and values.
C D F 001 \0 \0 \0 \0 \0 \0 \0 \0 \0 \0 \0 \0
17220 17921 00000 00000 00000 00000 00000 00000
[magic number ] [ 0 records ] [ 0 dimensions (ABSENT) ]
0000 0000 0000 0000 0000 0000 0000 0000
\0 \0 \0 \0 \0 \0 \0 \0 \0 \0 \0 \0 \0 \0 \0 \0
00000 00000 00000 00000 00000 00000 00000 00000
@ -4121,27 +4119,27 @@ which corresponds to a 92-byte netCDF file. The following is an edited dump of t
C D F 001 \0 \0 \0 \0 \0 \0 \0 \n \0 \0 \0 001
17220 17921 00000 00000 00000 00010 00000 00001
[magic number ] [ 0 records ] [NC_DIMENSION ] [ 1 dimension ]
0000 0003 6469 6d00 0000 0005 0000 0000
\0 \0 \0 003 d i m \0 \0 \0 \0 005 \0 \0 \0 \0
00000 00003 25705 27904 00000 00005 00000 00000
[ 3 char name = "dim" ] [ size = 5 ] [ 0 global atts
0000 0000 0000 000b 0000 0001 0000 0002
\0 \0 \0 \0 \0 \0 \0 013 \0 \0 \0 001 \0 \0 \0 002
00000 00000 00000 00011 00000 00001 00000 00002
(ABSENT) ] [NC_VARIABLE ] [ 1 variable ] [ 2 char name =
7678 0000 0000 0001 0000 0000 0000 0000
v x \0 \0 \0 \0 \0 001 \0 \0 \0 \0 \0 \0 \0 \0
30328 00000 00000 00001 00000 00000 00000 00000
"vx" ] [1 dimension ] [ with ID 0 ] [ 0 attributes
0000 0000 0000 0003 0000 000c 0000 0050
\0 \0 \0 \0 \0 \0 \0 003 \0 \0 \0 \f \0 \0 \0 P
00000 00000 00000 00003 00000 00012 00000 00080
(ABSENT) ] [type NC_SHORT] [size 12 bytes] [offset: 80]
0003 0001 0004 0001 0005 8001
\0 003 \0 001 \0 004 \0 001 \0 005 200 001
00003 00001 00004 00001 00005 -32767
@ -4336,11 +4334,11 @@ However, these may be different. Consider the following code:
\code
/* Create a test file. */
if (nc_create(FILE_NAME, NC_CLASSIC_MODEL|NC_NETCDF4, &ncid)) ERR;
/* Define dimensions in order. */
if (nc_def_dim(ncid, DIM0, NC_UNLIMITED, &dimids[0])) ERR;
if (nc_def_dim(ncid, DIM1, 4, &dimids[1])) ERR;
/* Define coordinate variables in a different order. */
if (nc_def_var(ncid, DIM1, NC_DOUBLE, 1, &dimids[1], &varid[1])) ERR;
if (nc_def_var(ncid, DIM0, NC_DOUBLE, 1, &dimids[0], &varid[0])) ERR;
@ -4385,7 +4383,7 @@ little-endian has been specified for that variable.)
- NC_INT64 = H5T_NATIVE_LLONG
- NC_UINT64 = H5T_NATIVE_ULLONG
- NC_FLOAT = H5T_NATIVE_FLOAT
- NC_DOUBLE = H5T_NATIVE_DOUBLE
- NC_DOUBLE = H5T_NATIVE_DOUBLE
The NC_CHAR type represents a single character, and the NC_STRING an
array of characters. This can be confusing because a one-dimensional
@ -4417,7 +4415,7 @@ ignored by the netCDF-4 API.
dimension scale API.
- NAME This attribute is created and maintained by the HDF5 dimension
scale API.
- _Netcdf4Dimid Holds a scalar H5T_NATIVE_INT that is the (zero-based)
- _Netcdf4Dimid Holds a scalar H5T_NATIVE_INT that is the (zero-based)
dimension ID for this dimension, needed when dimensions and
coordinate variables are defined in different orders.
@ -4426,7 +4424,7 @@ ignored by the netCDF-4 API.
Each user-defined data type in an HDF5 file exactly corresponds to a
user-defined data type in the netCDF-4 file. Only base data types
which correspond to netCDF-4 data types may be used. (For example, no
HDF5 reference data types may be used.)
HDF5 reference data types may be used.)
\subsection compression_spec Compression
@ -4491,7 +4489,7 @@ straighforward manner.
- DFNT_INT32 = NC_INT
- DFNT_UINT32 = NC_UINT
- DFNT_FLOAT32 = NC_FLOAT
- DFNT_FLOAT64 = NC_DOUBLE
- DFNT_FLOAT64 = NC_DOUBLE
*/

View File

Before

Width:  |  Height:  |  Size: 60 KiB

After

Width:  |  Height:  |  Size: 60 KiB

View File

Before

Width:  |  Height:  |  Size: 285 KiB

After

Width:  |  Height:  |  Size: 285 KiB

View File

Before

Width:  |  Height:  |  Size: 128 KiB

After

Width:  |  Height:  |  Size: 128 KiB

View File

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

Before

Width:  |  Height:  |  Size: 565 KiB

After

Width:  |  Height:  |  Size: 565 KiB

View File

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View File

Before

Width:  |  Height:  |  Size: 98 KiB

After

Width:  |  Height:  |  Size: 98 KiB

View File

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 24 KiB

View File

Before

Width:  |  Height:  |  Size: 61 KiB

After

Width:  |  Height:  |  Size: 61 KiB

View File

Before

Width:  |  Height:  |  Size: 85 KiB

After

Width:  |  Height:  |  Size: 85 KiB

View File

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 13 KiB

View File

Before

Width:  |  Height:  |  Size: 7.2 KiB

After

Width:  |  Height:  |  Size: 7.2 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 129 KiB

View File

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 11 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -23,7 +23,7 @@ syntax to set environment variables instead of the
ENV_VARIABLE=value
syntax used in the examples that use a POSIX-standard shell. In either
case, *\${DIR1}* is the value of the environment variable *DIR1*.
case, <I>${DIR1}</I> is the value of the environment variable <I>DIR1</I>.
It will be easier to build the netCDF Fortran library if the C (and if
needed, HDF5) libraries are built as shared libraries (the default), but
@ -33,8 +33,8 @@ Building with shared libraries {#building_fortran_shared_libraries}
==============================
1. First make sure the netCDF C library has been built, tested, and
installed under directory *\${DIR1}*, as specified by
--prefix=*\${DIR1}* to the C library configure script, or under
installed under directory <I>${DIR1}</I>, as specified by
--prefix=<I>${DIR1}</I> to the C library configure script, or under
directory /usr/local by default.
2. For the Fortran netCDF library, use the same C compiler as used to
create the netCDF C library, specified with the CC environment
@ -141,3 +141,50 @@ libraries is to use the "nf-config" utility installed in *${DIR1}*/bin:
or the more general "pkg-config" utility, if you have it:
fortran_compiler my_prog.f -o my_prog `pkg-config --cflags --libs netcdf-fortran`
Specifying The Environment for Building {#specify_build_env_fortran}
========================================
The netCDF configure script searches your path to find the compilers and tools it needed. To use compilers that can't be found in your path, set their environment variables.
The configure script will use gcc and associated GNU tools if they are found. Many users, especially those with performance concerns, will wish to use a vendor supplied compiler.
For example, on an AIX system, users may wish to use xlc (the AIX compiler) in one of its many flavors. Set environment variables before the build to achieve this.
For example, to change the C compiler, set CC to xlc (in sh: export CC=xlc). (But don't forget to also set CXX to xlC, or else configure will try to use g++, the GNU C++ compiler to build the netCDF C++ API. Similarly set FC to xlf90 so that the Fortran APIs are built properly.)
By default, the netCDF library is built with assertions turned on. If you wish to turn off assertions, set CPPFLAGS to -DNDEBUG (csh ex: setenv CPPFLAGS -DNDEBUG).
If GNU compilers are used, the configure script sets CPPFLAGS to “-g -O2”. If this is not desired, set CPPFLAGS to nothing, or to whatever other value you wish to use, before running configure.
For cross-compiles, the following environment variables can be used to override the default fortran/C type settings like this (in sh):
export NCBYTE_T=''integer(selected_int_kind(2))''
export NCSHORT_T=''integer*2''
export NF_INT1_T=''integer(selected_int_kind(2))''
export NF_INT2_T=''integer*2''
export NF_INT1_IS_C_SHORT=1
export NF_INT2_IS_C_SHORT=1
export NF_INT_IS_C_INT=1
export NF_REAL_IS_C_FLOAT=1
export NF_DOUBLEPRECISION_IS_C_DOUBLE=1
In this case you will need to run configure with disable-fortran-compiler-check and disable-fortran-type-check.
Variable Description Notes
--------------------------
Variable | Usage | Description
---|---|---
CC | C compiler | If you don't specify this, the configure script will try to find a suitable C compiler. The default choice is gcc. If you wish to use a vendor compiler you must set CC to that compiler, and set other environment variables (as described below) to appropriate settings.
FC | Fortran compiler (if any)| If you don't specify this, the configure script will try to find a suitable Fortran and Fortran 77 compiler. Set FC to "" explicitly, or provide the disable-f77 option to configure, if no Fortran interface (neither F90 nor F77) is desired. Use disable-f90 to disable the netCDF Fortran 90 API, but build the netCDF Fortran 77 API.
F77 | Fortran 77 compiler (if any) | Only specify this if your platform explicitly needs a different Fortran 77 compiler. Otherwise use FC to specify the Fortran compiler. If you don't specify this, the configure script will try to find a suitable Fortran compiler. For vendor compilers, make sure you're using the same vendor's Fortran 90 compiler. Using Fortran compilers from different vendors, or mixing vendor compilers with g77, the GNU F77 compiler, is not supported and may not work.
CXX | C++ compiler | If you don't specify this, the configure script will try to find a suitable C++ compiler. Set CXX to "" explicitly, or use the disable-cxx configure option, if no C++ interface is desired. If using a vendor C++ compiler, use that vendor's C compiler to compile the C interface. Using different vendor compilers for C and C++ may not work.
CFLAGS | C compiler flags | "-O" or "-g", for example.
CPPFLAGS | C preprocessor options | "-DNDEBUG" to omit assertion checks, for example.
FCFLAGS| Fortran 90 compiler flags | "-O" or "-g", for example. These flags will be used for FORTRAN 90. If setting these you may also need to set FFLAGS for the FORTRAN 77 test programs.
FFLAGS | Fortran 77 compiler flags | "-O" or "-g", for example. If you need to pass the same arguments to the FORTRAN 90 build, also set FCFLAGS.
CXXFLAGS | C++ compiler flags | "-O" or "-g", for example.
ARFLAGS, NMFLAGS, FPP, M4FLAGS, LIBS, FLIBS, FLDFLAGS | Miscellaneous | One or more of these were needed for some platforms, as specified below. Unless specified, you should not set these environment variables, because that may interfere with the configure script.

View File

@ -121,14 +121,14 @@ services</a>.
Build zlib like this:
\verbatim
./configure --prefix=/home/ed/local
./configure --prefix=/home/username/local
make check install
\endverbatim
Then you build HDF5, specifying the location of the zlib library:
\verbatim
./configure --with-zlib=/home/ed/local --prefix=/home/ed/local
./configure --with-zlib=/home/username/local --prefix=/home/username/local
make check install
\endverbatim
@ -149,7 +149,7 @@ HDF5, zlib, and (if built into HDF5) the szip header files and
libraries in the CPPFLAGS and LDFLAGS environment variables. For example:
\verbatim
CPPFLAGS=-I/home/ed/local/include LDFLAGS=-L/home/ed/local/lib ./configure --prefix=/home/ed/local
CPPFLAGS=-I/home/username/local/include LDFLAGS=-L/home/username/local/lib ./configure --prefix=/home/username/local
make check install
\endverbatim
@ -157,8 +157,8 @@ The configure script will try to find necessary tools in your
path. When you run configure you may optionally use the <CODE>--prefix</CODE>
argument to change the default installation directory. The above
examples install the zlib, HDF5, and netCDF-4 libraries in
/home/ed/local/lib, the header file in /home/ed/local/include, and the
utilities in /home/ed/local/bin. If you don't provide a <CODE>--prefix</CODE>
/home/username/local/lib, the header file in /home/username/local/include, and the
utilities in /home/username/local/bin. If you don't provide a <CODE>--prefix</CODE>
option, installation will be in /usr/local/, in subdirectories lib/,
include/, and bin/. The installation location specified with the
<CODE>--prefix</CODE> option must be different from the source directory where the
@ -178,11 +178,11 @@ To build without support for the netCDF-4 formats or the additional
netCDF-4 functions, but with remote access, use:
\verbatim
./configure --prefix=/home/ed/local --disable-netcdf-4
./configure --prefix=/home/username/local --disable-netcdf-4
make check install
\endverbatim
(Replace ``/home/ed/local'' with the name of the directory where
(Replace ``/home/username/local'' with the name of the directory where
netCDF is to be installed. The installation location specified with
the <CODE>--prefix</CODE> option must be different from the source directory where
the software is being built.)
@ -193,14 +193,14 @@ with full support for netCDF-4 APIs and format but without remote
client access, use:
\verbatim
./configure --prefix=/home/ed/local --disable-dap
./configure --prefix=/home/username/local --disable-dap
make check install
\endverbatim
To build without netCDF-4 support or remote client access, use:
\verbatim
./configure --prefix=/home/ed/local --disable-netcdf-4 --disable-dap
./configure --prefix=/home/username/local --disable-netcdf-4 --disable-dap
make check install
\endverbatim

View File

@ -32,7 +32,7 @@ Pre-release libraries for Windows may be found here: \ref winbin.
Getting the latest NetCDF-C Source Code {#sec_get_source}
----------------------------------------
Starting with netCDF-C version 4.3.1, the netCDF-C source code is hosted at the
Starting with netCDF-C version 4.3.1, the netCDF-C source code is hosted at the
Unidata GitHub repository, available at http://github.com/Unidata/netcdf-c.
Two options are available for building from source:
@ -75,6 +75,14 @@ full functionality. (See \ref architecture).
- \ref building_netcdf_fortran
- \ref configure_options
Requirements {#netcdf_requirements}
----------------------------------
* HDF5 1.8.9 (netcdf-4 support)
* zlib 1.2.5
* curl 7.18.0 (DAP support)
CMake and Windows support {#sub}
--------------------------------
@ -86,7 +94,7 @@ Building with NetCDF-4 and the Remote Data Client {#build_default}
The usual way of building netCDF requires the HDF5, zlib, and curl
libraries. (And, optionally, the szlib library). Versions required are
at least HDF5 1.8.8, zlib 1.2.5, and curl 7.18.0 or later.
at least HDF5 1.8.9, zlib 1.2.5, and curl 7.18.0 or later.
(Optionally, if building with szlib, get szip 2.0 or later.)
HDF5 1.8.9 and zlib 1.2.7 packages are available from the <a
@ -107,7 +115,7 @@ Fortran, C++, or Java API's. Only the HDF5 C library is used.
Optionally, you can also build netCDF-4 with the szip library
(a.k.a. szlib). NetCDF cannot create szipped data files, but can read
HDF5 data files that have used szip.
8
There are license restrictions on the use of szip, see the section on
licensing terms in the <a
href="http://www.hdfgroup.org/doc_resource/SZIP/">web page on szip
@ -127,14 +135,14 @@ services</a>.
Build zlib like this:
~~~
$ ./configure --prefix=/home/ed/local
$ ./configure --prefix=/home/username/local
$ make check install
~~~
Then you build HDF5, specifying the location of the zlib library:
~~~
$ ./configure --with-zlib=/home/ed/local --prefix=/home/ed/local
$ ./configure --with-zlib=/home/username/local --prefix=/home/username/local
$ make check install
~~~
@ -155,7 +163,7 @@ HDF5, zlib, and (if built into HDF5) the szip header files and
libraries in the CPPFLAGS and LDFLAGS environment variables. For example:
~~~
$ CPPFLAGS=-I/home/ed/local/include LDFLAGS=-L/home/ed/local/lib ./configure --prefix=/home/ed/local
$ CPPFLAGS=-I/home/username/local/include LDFLAGS=-L/home/username/local/lib ./configure --prefix=/home/username/local
$ make check install
~~~
@ -163,8 +171,8 @@ The configure script will try to find necessary tools in your
path. When you run configure you may optionally use the <CODE>--prefix</CODE>
argument to change the default installation directory. The above
examples install the zlib, HDF5, and netCDF-4 libraries in
/home/ed/local/lib, the header file in /home/ed/local/include, and the
utilities in /home/ed/local/bin. If you don't provide a <CODE>--prefix</CODE>
/home/username/local/lib, the header file in /home/username/local/include, and the
utilities in /home/username/local/bin. If you don't provide a <CODE>--prefix</CODE>
option, installation will be in /usr/local/, in subdirectories lib/,
include/, and bin/. The installation location specified with the
<CODE>--prefix</CODE> option must be different from the source directory where the
@ -185,29 +193,29 @@ To build without support for the netCDF-4 formats or the additional
netCDF-4 functions, but with remote access, use:
~~~
$ ./configure --prefix=/home/ed/local --disable-netcdf-4
$ ./configure --prefix=/home/username/local --disable-netcdf-4
$ make check install
~~~
(Replace `/home/ed/local` with the name of the directory where
(Replace `/home/username/local` with the name of the directory where
netCDF is to be installed. The installation location specified with
the <CODE>--prefix</CODE> option must be different from the source directory where
the software is being built.)
Starting with version 4.1.1 the netCDF C libraries and utilities have
supported remote data access, using the OPeNDAP protocols. To build
supported remote data access, using the OPeNDAP protocols. To build
with full support for netCDF-4 APIs and format but without remote
client access, use:
~~~
$ ./configure --prefix=/home/ed/local --disable-dap
$ ./configure --prefix=/home/username/local --disable-dap
$ make check install
~~~
To build without netCDF-4 support or remote client access, use:
~~~
$ ./configure --prefix=/home/ed/local --disable-netcdf-4 --disable-dap
$ ./configure --prefix=/home/username/local --disable-netcdf-4 --disable-dap
$ make check install
~~~
@ -218,7 +226,7 @@ Building with HDF4 Support {#build_hdf4}
---------------------
The netCDF-4 library can (since version 4.1) read HDF4 data files, if
they were created with the SD (Scientific Data) API.
they were created with the SD (Scientific Data) API.
For this to work, you must build the HDF4 library with the
configure option
@ -229,7 +237,7 @@ to prevent it from building an HDF4 version of the netCDF-2 library
that conflicts with the netCDF-2 functions that are built into the Unidata
netCDF library.
Then, when building netCDF-4, use the
Then, when building netCDF-4, use the
~~~
--enable-hdf4
~~~
@ -317,7 +325,7 @@ For example, one user reports that she can build other applications
with netCDF-4 by setting the LIBS environment variable:
~~~
LIBS='-L/X/netcdf-4.0/lib -lnetcdf -L/X/hdf5-1.8.6/lib -lhdf5_hl -lhdf5 -lz -lm -L/X/szip-2.1/lib -lsz'
LIBS='-L/X/netcdf-4.0/lib -lnetcdf -L/X/hdf5-1.8.9/lib -lhdf5_hl -lhdf5 -lz -lm -L/X/szip-2.1/lib -lsz'
~~~
For shared builds, only -lnetcdf is needed. All other libraries will
@ -336,7 +344,7 @@ cc -o myapp myapp.c `nc-config --cflags --libs`
configure options {#configure_options}
-----------------------------
These options are used for `autotools`-based builds. For `cmake` options, see \todo Finish this reference.
These options are used for `autotools`-based builds. For `cmake` options, see
Note: --disable prefix indicates that the option is normally enabled.
<table>
@ -388,14 +396,14 @@ Build Instructions for NetCDF-C using CMake {#netCDF-CMake}
Starting with netCDF-C 4.3.0, we are happy to announce the inclusion of CMake support. CMake will allow for building netCDF on a wider range of platforms, include Microsoft Windows with Visual Studio. CMake support also provides robust unit and regression testing tools. We will also maintain the standard autotools-based build system in parallel.
In addition to providing new build options for netCDF-C, we will also provide pre-built binary downloads for the shared versions of netCDF for use with Visual Studio.
In addition to providing new build options for netCDF-C, we will also provide pre-built binary downloads for the shared versions of netCDF for use with Visual Studio.
## Requirements {#cmake_requirements}
The following packages are required to build netCDF-C using CMake.
* netCDF-C Source Code
* CMake version 2.8.9 or greater.
* CMake version 2.8.12 or greater.
* Optional Requirements:
* HDF5 Libraries for netCDF4/HDF5 support.
* libcurl for DAP support.
@ -450,7 +458,7 @@ If you have libraries installed in a custom directory, you may need to specify t
## Building {#cmake_building}
The compiler can be executed directly with 'make' or the appropriate command for the configurator which was used.
The compiler can be executed directly with 'make' or the appropriate command for the configurator which was used.
> $ make
@ -478,12 +486,10 @@ Once netCDF has been built and tested, it may be installed using the following c
> $ make install
or
or
> $ cmake --build [Build Directory] --target install
## See Also {#cmake_see_also}
For further information regarding NetCDF and CMake, see \ref cmake_faq

View File

@ -1,4 +1,5 @@
/** \file
\internal
\page nc_dispatch Internal Dispatch Table Architecture

View File

@ -16,7 +16,7 @@ The NetCDF homepage may be found at <a href="http://www.unidata.ucar.edu/netcdf"
\subsection this_release Learn more about the current NetCDF-C Release
- \ref release_notes
- \ref RELEASE_NOTES
- \ref getting_and_building_netcdf
\subsubsection nightly_status The Latest NetCDF-C Build Status
@ -52,7 +52,7 @@ software tools for use in geoscience education and research.
\internal
Internal Documentation
Internal Documentation
The dispatch interface within the netCDF library allows the netCDF API
to be used for different types of back-ends, for example, the HDF5

View File

Before

Width:  |  Height:  |  Size: 2.0 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -1,33 +1,10 @@
/** \file
/** \file
Documentation of error handling.
\page programming_notes Programming Notes
\tableofcontents
\section error_handling Error Handling
Each netCDF function returns an integer status value. Non-zero values
indicate error.
The nc_strerror function is available to convert a returned integer
error status into an error message string.
If the returned status value indicates an error, you may handle it in
any way desired, from printing an associated error message and exiting
to ignoring the error indication and proceeding (not
recommended!). For simplicity, the examples in this guide check the
error status and call a separate function, handle_err(), to handle any
errors. One possible definition of handle_err() can be found within
the documentation of nc_strerror().
Occasionally, low-level I/O errors may occur in a layer below the
netCDF library. For example, if a write operation causes you to exceed
disk quotas or to attempt to write to a device that is no longer
available, you may get an error from a layer below the netCDF library,
but the resulting write error will still be reflected in the returned
status value.
\section ignored_if_null Ignored if NULL
Many of the argurments of netCDF functions are pointers. For example,
@ -41,7 +18,7 @@ A NULL may be passed for any of these pointers, and it will be
ignored. For example, interested in the number of dimensions only, the
following code will work:
\code
\code
int ndims;
...
if (nc_inq(ncid, &ndims, NULL, NULL, NULL))
@ -61,19 +38,19 @@ with vectors which specify the start, count, stride, and mapping.
\subsection start_vector A Vector Specifying Start Index for Each Dimension
A vector of size_t integers specifying the index in the
variable where the first of the data values will be read.
variable where the first of the data values will be read.
The indices are relative to 0, so for example, the first data value of
a variable would have index (0, 0, ... , 0).
a variable would have index (0, 0, ... , 0).
The length of start vector must be the same as the number of
dimensions of the specified variable. The elements of start
correspond, in order, to the variable's dimensions.
correspond, in order, to the variable's dimensions.
\subsection count_vector A Vector Specifying Count for Each Dimension
A vector of size_t integers specifying the edge lengths
along each dimension of the block of data values to be read.
along each dimension of the block of data values to be read.
To read a single value, for example, specify count as (1, 1, ... , 1).
@ -91,7 +68,7 @@ indices.
A value of 1 accesses adjacent values of the netCDF variable in the
corresponding dimension; a value of 2 accesses every other value of
the netCDF variable in the corresponding dimension; and so on.
the netCDF variable in the corresponding dimension; and so on.
The elements of the stride vector correspond, in order, to the
variable's dimensions.

View File

@ -1,8 +1,7 @@
/*! \file tutorial.dox The NetCDF-C Tutorial
/*! \page tutorial.dox The NetCDF-C Tutorial
\brief The NetCDF-C Tutorial
\mainpage
\tableofcontents
@ -16,7 +15,7 @@ This page contains refrences to various other NetCDF background and tutorial pag
- \ref netcdf_documentation
\subsection sub_sec_netcdf_data_model NetCDF Data Model:
- \ref netcdf_data_model
- \ref netcdf_data_model
- \ref classic_model
- \ref enhanced_model
- \ref unlimited_dims
@ -67,7 +66,7 @@ See also: \ref guide_ncdump
The ncgen utility can take an ASCII input file, in CDL format, and
generate a binary netCDF file. It is the opposite of ncdump.
See also:
See also:
- \ref guide_ncgen
- \ref guide_ncgen3
@ -92,11 +91,11 @@ many situations. Some of the tools on this list are developed at
Unidata. The others are developed elsewhere, and we can make no
guarantees about their continued availability or success. All of these
tools are open-source.
- <a href="http://www.unidata.ucar.edu/software/udunits">UDUNITS</a> - Unidata library to help with scientific units.
- <a href="http://www.unidata.ucar.edu/software/idv">IDV</a> - Unidata's Integrated Data Viewer, a 3D visualization and analysis package (Java based).
- <a href="http://www.ncl.ucar.edu">NCL</a> - NCAR Command Language, a graphics and data manipulation package.
- <a href="http://grads.iges.org/grads/grads.html">GrADS</a> - The Grid Analysis and Display System package.
- <a href="http://nco.sourceforge.net">NCO</a> - NetCDF Command line Operators, tools to manipulate netCDF files.
- <a href="http://www.unidata.ucar.edu/software/udunits">UDUNITS</a> - Unidata library to help with scientific units.
- <a href="http://www.unidata.ucar.edu/software/idv">IDV</a> - Unidata's Integrated Data Viewer, a 3D visualization and analysis package (Java based).
- <a href="http://www.ncl.ucar.edu">NCL</a> - NCAR Command Language, a graphics and data manipulation package.
- <a href="http://grads.iges.org/grads/grads.html">GrADS</a> - The Grid Analysis and Display System package.
- <a href="http://nco.sourceforge.net">NCO</a> - NetCDF Command line Operators, tools to manipulate netCDF files.
A <a href="http://www.unidata.ucar.edu/netcdf/software.html">list of
netCDF tools</a> that we know about can be found on the website. If
@ -118,7 +117,7 @@ utilities, ncgen/ncdump/nccopy.
The C++, Fortran 77 and Fortran 90 APIs are distributed separately
from the C library. The C library must be installed before any of
these APIs may be built. They depend on the C API.
these APIs may be built. They depend on the C API.
Due to the nature of C++ and Fortran 90, users of those languages can
also use the C and Fortran 77 APIs (respectively) directly.
@ -161,7 +160,7 @@ C, C++, Fortran 77, Fortran 90, and Java APIs:
- C++ - The NetCDF C++ Interface Guide.
- Fortran 77 - The NetCDF Fortran 77 Interface Guide.
- Fortran 90 - The NetCDF Fortran 90 Interface Guide.
- Java <a href="http://www.unidata.ucar.edu/software/netcdf-java/v2.1/NetcdfJavaUserManual.htm">The netCDF-Java Users Guide</a>.
- Java <a href="http://www.unidata.ucar.edu/software/netcdf-java/v2.1/NetcdfJavaUserManual.htm">The netCDF-Java Users Guide</a>.
Man pages for the C, F77, and F90 interfaces, and ncgen and ncdump,
are available on the documentation page of the netCDF web site
@ -190,7 +189,7 @@ created with the \ref classic_model.
<p>\image html nc4-model.png "The NetCDF Enhanced Data Model"
\section classic_model The Classic Model
\section classic_model The Classic Model
The classic netCDF data model consists of variables, dimensions, and
attributes. This way of thinking about data was introduced with the
@ -247,8 +246,8 @@ adds:
<li>groups - A way of hierarchically organizing data, similar to
directories in a Unix file system.
<li>user-defined types - The user can now define compound types
(like C structures), enumeration types, variable length arrays, and
<li>user-defined types - The user can now define compound types
(like C structures), enumeration types, variable length arrays, and
opaque types.
</ul>
@ -474,7 +473,7 @@ ncdump and ncgen see NetCDF Utilities.
variables:
int data(x, y) ;
data:
data =
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
@ -524,11 +523,11 @@ The CDL version of the data file, generated by ncdump, is shown below
float temperature(latitude, longitude) ;
temperature:units = "celsius" ;
data:
latitude = 25, 30, 35, 40, 45, 50 ;
longitude = -125, -120, -115, -110, -105, -100, -95, -90, -85, -80, -75, -70 ;
pressure =
900, 906, 912, 918, 924, 930, 936, 942, 948, 954, 960, 966,
901, 907, 913, 919, 925, 931, 937, 943, 949, 955, 961, 967,
@ -536,7 +535,7 @@ The CDL version of the data file, generated by ncdump, is shown below
903, 909, 915, 921, 927, 933, 939, 945, 951, 957, 963, 969,
904, 910, 916, 922, 928, 934, 940, 946, 952, 958, 964, 970,
905, 911, 917, 923, 929, 935, 941, 947, 953, 959, 965, 971 ;
temperature =
9, 10.5, 12, 13.5, 15, 16.5, 18, 19.5, 21, 22.5, 24, 25.5,
9.25, 10.75, 12.25, 13.75, 15.25, 16.75, 18.25, 19.75, 21.25, 22.75, 24.25,
@ -563,7 +562,7 @@ step at a time, as is typical in scientific applications that use the
unlimited dimension.
The sample data file created by pres_temp_4D_wr can be examined with
the utility ncdump (see \ref netcdf_utilities).
the utility ncdump (see \ref netcdf_utilities).
\code
netcdf pres_temp_4D {
@ -582,11 +581,11 @@ the utility ncdump (see \ref netcdf_utilities).
float temperature(time, level, latitude, longitude) ;
temperature:units = "celsius" ;
data:
latitude = 25, 30, 35, 40, 45, 50 ;
longitude = -125, -120, -115, -110, -105, -100, -95, -90, -85, -80, -75, -70 ;
pressure =
900, 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911,
912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923,
@ -612,7 +611,7 @@ the utility ncdump (see \ref netcdf_utilities).
1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 1019,
1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043 ;
temperature =
9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
@ -649,7 +648,7 @@ This example is only available in C for this version of netCDF-4. The example cr
The simple_xy.nc data file contains two dimensions, “x” and “y”, two groups, “grp1” and “grp2”, and two data variables, one in each group, both named: “data.” One data variable is an unsigned 64-bit integer, the other a user-defined compound type.
The example program simple_nc4_wr.c creates the example data file simple_nc4.nc. The example program simple_nc4_rd.c reads the data file.
The example program simple_nc4_wr.c creates the example data file simple_nc4.nc. The example program simple_nc4_rd.c reads the data file.
- simple_nc4_wr.c
- simple_nc4_rd.c
@ -784,7 +783,7 @@ be opened or created for use with parallel I/O. (They may be opened
and created, but parallel I/O is not available.)
A few functions have been added to the netCDF C API to handle parallel
I/O.
I/O.
You must build netCDF-4 properly to take advantage of parallel
features (see \ref build_parallel).
@ -912,7 +911,7 @@ num attributes.
- nc_inq_dimid() Find dimension ID from its name.
- nc_inq_varid() Find variable ID from its name.
- nc_inq_format() Find file format: classic or 64-bit offset
- nc_inq_libvers() Find the netCDF library version.
- nc_inq_libvers() Find the netCDF library version.
\page accessing_subsets Reading and Writing Subsets of Data
@ -945,7 +944,7 @@ data, in both cases. They type of the file data is determined when the
netCDF variable is defined.
The type of the data may be automatically converted on read or
write.
write.
\example simple_xy_wr.c
\example simple_xy_rd.c

View File

@ -1,8 +1,10 @@
/** \file
/** \file types.dox Documentation related to NetCDF Types
Documentation of types.
\page data_type Data Types
\tableofcontents
Data in a netCDF file may be one of the \ref external_types, or may be
a user-defined data type (see \ref user_defined_types).
@ -125,7 +127,7 @@ attributes, or variables that refer to other attributes or variables,
provides a flexible mechanism for representing some kinds of complex
structures in netCDF datasets.
\section user_defined_types NetCDF-4 User Defined Data Types
\section nc4_user_defined_types NetCDF-4 User Defined Data Types
NetCDF supported six data types through version 3.6.0 (char, byte,
short, int, float, and double). Starting with version 4.0, many new
@ -158,12 +160,16 @@ user-defined types or the new atomic data types.
Once types are defined, use their ID like any other type ID when
defining variables or attributes. Use functions
nc_put_att()/nc_get_att() and the nc_put_var()/nc_get_var(),
nc_put_var1()/nc_get_var1(), nc_put_vara()/nc_get_vara(), or
nc_put_vars()/nc_get_vars() functons to access attribute and variable
data of user defined type.
\subsection Compound Types
- nc_put_att() / nc_get_att()
- nc_put_var() / nc_get_var()
- nc_put_var1() / nc_get_var1()
- nc_put_vara() / nc_get_vara()
- nc_put_vars() / nc_get_vars()
functons to access attribute and variable data of user defined type.
\subsection types_compound_types Compound Types
Compound types allow the user to combine atomic and user-defined types
into C-like structs. Since users defined types may be used within a
@ -172,7 +178,7 @@ compound type, they can contain nested compound types.
Users define a compound type, and (in their C code) a corresponding C
struct. They can then use nc_put_vara() and related functions to write
multi-dimensional arrays of these structs, and nc_get_vara() calls
to read them.
to read them.
While structs, in general, are not portable from platform to platform,
the HDF5 layer (when installed) performs the magic required to figure
@ -183,7 +189,7 @@ are portable.
For more information on creating and using compound types, see
Compound Types in The NetCDF C Interface Guide.
\subsection VLEN Types
\subsection vlen_types VLEN Types
Variable length arrays can be used to create a ragged array of data,
in which one of the dimensions varies in size from point to point.
@ -210,19 +216,19 @@ and pointers to the data, rather than the actual data.
For more information on creating and using variable length arrays, see
Variable Length Arrays in The NetCDF C Interface Guide.
\subsection Opaque Types
\subsection types_opaque_types Opaque Types
Opaque types allow the user to store arrays of data blobs of a fixed size.
For more information on creating and using opaque types, see Opaque
Type in The NetCDF C Interface Guide.
\subsection Enum Types
\subsection enum_types Enum Types
Enum types allow the user to specify an enumeration.
For more information on creating and using enum types, see Enum Type
in The NetCDF C Interface Guide.
in The NetCDF C Interface Guide.
\section type_conversion Type Conversion
@ -286,4 +292,14 @@ precision value you can write to a double variable is the largest
double-precision number representable on your system that is less than
2 to the 1024th power.
The _uchar and _schar functions were introduced in netCDF-3 to
eliminate an ambiguity, and support both signed and unsigned byte data.
In netCDF-2, whether the external NC_BYTE type represented signed or
unsigned values was left up to the user. In netcdf-3, we treat NC_BYTE
as signed for the purposes of conversion to short, int, long, float, or
double. (Of course, no conversion takes place when the internal type is
signed char.) In the _uchar functions, we treat NC_BYTE as if it were
unsigned. Thus, no NC_ERANGE error can occur converting between NC_BYTE
and unsigned char.
*/

View File

Before

Width:  |  Height:  |  Size: 5.9 KiB

After

Width:  |  Height:  |  Size: 5.9 KiB

View File

@ -34,16 +34,16 @@ netCDF4+DAP | [netCDF4.3.2-NC4-DAP-32.exe][r4] | [netCDF4.3.2-NC4-DAP-64.exe][r
## Latest Release Candidate (netCDF-C 4.3.2-rc2)
## Latest Release Candidate (netCDF-C 4.3.3-rc2)
*Note: The latest release candidate may actually* **pre-date** *the latest stable release. If you are interested in using the latest developer snapshot on Windows, it may be downloaded from http://github.com/Unidata/netcdf-c but be warned,* **you will need to compile it yourself!**
Configuration | 32-bit | 64-bit |
:-------------------|:-------- |:-------|
netCDF 3 | [netCDF4.3.2-rc2-NC3-32.exe][rc1] | [netCDF4.3.2-rc2-NC3-64.exe][rc6]
netCDF3+DAP | [netCDF4.3.2-rc2-NC3-DAP-32.exe][rc2] | [netCDF4.3.2-rc2-NC3-DAP-64.exe][rc6]
netCDF4 | [netCDF4.3.2-rc2-NC4-32.exe][rc3] | [netCDF4.3.2-rc2-NC4-64.exe][rc7]
netCDF4+DAP | [netCDF4.3.2-rc2-NC4-DAP-32.exe][rc4] | [netCDF4.3.2-rc2-NC4-DAP-64.exe][rc8]
netCDF 3 | [netCDF4.3.3-rc2-NC3-32.exe][rc1] | [netCDF4.3.3-rc2-NC3-64.exe][rc6]
netCDF3+DAP | [netCDF4.3.3-rc2-NC3-DAP-32.exe][rc2] | [netCDF4.3.3-rc2-NC3-DAP-64.exe][rc6]
netCDF4 | [netCDF4.3.3-rc2-NC4-32.exe][rc3] | [netCDF4.3.3-rc2-NC4-64.exe][rc7]
netCDF4+DAP | [netCDF4.3.3-rc2-NC4-DAP-32.exe][rc4] | [netCDF4.3.3-rc2-NC4-DAP-64.exe][rc8]
# Using the netCDF-C Libraries with Visual Studio
In order to use the netcdf libraries, you must ensure that the .dll files (along with any dependencies from deps/shared/bin) are on the system path. In order to compile a program using these libraries, you must first link your program against the appropriate 'import' (.lib) libraries.
@ -73,11 +73,11 @@ When installed, the netCDF libraries are placed in the specified locations, alon
[r8]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.2-NC4-DAP-64.exe
[rc1]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.2-rc2-NC3-32.exe
[rc2]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.2-rc2-NC3-DAP-32.exe
[rc3]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.2-rc2-NC4-32.exe
[rc4]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.2-rc2-NC4-DAP-32.exe
[rc6]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.2-rc2-NC3-64.exe
[rc6]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.2-rc2-NC3-DAP-64.exe
[rc7]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.2-rc2-NC4-64.exe
[rc8]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.2-rc2-NC4-DAP-64.exe
[rc1]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.3-rc2-NC3-32.exe
[rc2]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.3-rc2-NC3-DAP-32.exe
[rc3]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.3-rc2-NC4-32.exe
[rc4]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.3-rc2-NC4-DAP-32.exe
[rc6]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.3-rc2-NC3-64.exe
[rc6]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.3-rc2-NC3-DAP-64.exe
[rc7]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.3-rc2-NC4-64.exe
[rc8]: http://www.unidata.ucar.edu/netcdf/win_netcdf/netCDF4.3.3-rc2-NC4-DAP-64.exe

View File

@ -1,10 +1,6 @@
SET(CMAKE_INCLUDE_CURRENT_DIR ON)
INCLUDE_DIRECTORIES(".")
SET(exam_C_tests simple_xy_wr simple_xy_rd sfc_pres_temp_wr sfc_pres_temp_rd pres_temp_4D_wr pres_temp_4D_rd)
SET(exam_C_tests_source "")
FOREACH(F ${exam_C_tests})
FOREACH(F ${exam_C_tests})
set(exam_C_tests_source ${exam_C_test_source} ${F}.c)
ENDFOREACH()

View File

@ -1,8 +1,3 @@
SET(CMAKE_INCLUDE_CURRENT_DIR ON)
INCLUDE_DIRECTORIES(".")
SET(CDL_EXAMPLE_TESTS create_sample_files do_comps)
FILE(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.sh ${CMAKE_CURRENT_SOURCE_DIR}/*.cdl)

View File

@ -1,7 +1,3 @@
SET(CMAKE_INCLUDE_CURRENT_DIR ON)
INCLUDE_DIRECTORIES(".")
ADD_SUBDIRECTORY(C)
IF(BUILD_UTILITIES)

View File

@ -1,7 +1,3 @@
SET(CMAKE_INCLUDE_CURRENT_DIR ON)
INCLUDE_DIRECTORIES(".")
SET(H5TESTS tst_h_files tst_h_files2 tst_h_files4 tst_h_atts tst_h_atts3 tst_h_atts4 tst_h_vars tst_h_vars2 tst_h_vars3 tst_h_grps tst_h_compounds tst_h_compounds2 tst_h_wrt_cmp tst_h_rd_cmp tst_h_vl tst_h_opaques tst_h_strings tst_h_strings1 tst_h_strings2 tst_h_ints tst_h_dimscales tst_h_dimscales1 tst_h_dimscales2 tst_h_dimscales3 tst_h_enums tst_h_dimscales4)
FILE(GLOB COPY_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.h5 ${CMAKE_CURRENT_SOURCE_DIR}/*.nc)
@ -9,7 +5,11 @@ FILE(COPY ${COPY_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)
FOREACH(CTEST ${H5TESTS})
ADD_EXECUTABLE(${CTEST} ${CTEST}.c)
TARGET_LINK_LIBRARIES(${CTEST} netcdf)
TARGET_LINK_LIBRARIES(${CTEST}
netcdf
${HDF5_C_LIBRARIES}
${HDF5_HL_LIBRARIES}
)
ADD_TEST(${CTEST} ${EXECUTABLE_OUTPUT_PATH}/${CTEST})
ENDFOREACH()

1
include/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
netcdf_meta.h

View File

@ -1,7 +1,3 @@
SET(CMAKE_INCLUDE_CURRENT_DIR ON)
INCLUDE_DIRECTORIES(".")
FILE(GLOB CUR_EXTRA_DIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/*.h)
SET(CUR_EXTRA_DIST ${CUR_EXTRA_DIST} Makefile.am CMakeLists.txt)
ADD_EXTRA_DIST("${CUR_EXTRA_DIST}")

View File

@ -4,11 +4,11 @@
# This automake file generates the Makefile to build the include
# directory.
include_HEADERS = netcdf.h
include_HEADERS = netcdf.h netcdf_meta.h
if BUILD_PARALLEL
include_HEADERS += netcdf_par.h
endif
include_HEADERS += netcdf_par.h
endif
noinst_HEADERS = nc_logging.h nc_tests.h fbits.h nc.h \
nclist.h ncuri.h utf8proc.h ncdispatch.h ncdimscale.h \
@ -17,8 +17,7 @@ nclog.h ncconfigure.h nc4internal.h nctime.h nc3dispatch.h nc3internal.h \
onstack.h
if USE_NETCDF4
noinst_HEADERS += ncaux.h
noinst_HEADERS += ncaux.h
endif
EXTRA_DIST = CMakeLists.txt XGetopt.h
EXTRA_DIST = CMakeLists.txt XGetopt.h netcdf_meta.h.in

View File

@ -3,7 +3,7 @@
* Research/Unidata. See COPYRIGHT file for more info.
*
* This header file is for the parallel I/O functions of netCDF.
*
*
*/
/* "$Id: netcdf_par.h,v 1.1 2010/06/01 15:46:49 ed Exp $" */
@ -22,14 +22,14 @@ defined.
extern char* strdup(const char*);
#endif
//#if HAVE_BASETSD_H
//#ifndef ssize_t
//#ifndef SSIZE_T
//#include <BaseTsd.h>
//#endif
//#define ssize_t SSIZE_T
//#endif
//#endif
/* #if HAVE_BASETSD_H */
/* #ifndef ssize_t */
/* #ifndef SSIZE_T */
/* #include <BaseTsd.h> */
/* #endif */
/* #define ssize_t SSIZE_T */
/* #endif */
/* #endif */
@ -40,7 +40,7 @@ extern char* strdup(const char*);
#else
char *nulldup(const char* s);
#endif
#endif
#endif
#ifndef nulldup

View File

@ -348,9 +348,9 @@ extern const char* NCDAP_urllookup(void* dapurl, const char* param);
# else
# define MSC_NCDISPATCH_EXTRA __declspec(dllimport)
# endif
MSC_NCDISPATCH_EXTRA extern const char* NC_findtestserver(const char*);
MSC_NCDISPATCH_EXTRA extern char* NC_findtestserver(const char*, const char**);
#else
extern const char* NC_findtestserver(const char*);
extern char* NC_findtestserver(const char*,const char**);
#endif
/* Ping a specific server */

View File

@ -33,6 +33,8 @@ typedef struct bounds_node bounds_node_t;
#define CD_NULL_MONTH 1 /* Null month value */
#define CD_NULL_YEAR 0 /* Null year value, component time */
/* Why do we have same enum defined twice? */
typedef enum CdTimeUnit {
CdBadTimeUnit = 0,
CdMinute = 1,
@ -46,7 +48,7 @@ typedef enum CdTimeUnit {
} CdTimeUnit;
typedef enum cdUnitTime {
cdBadUnit = CdBadTimeUnit,
cdBadUnit = CdBadTimeUnit,
cdMinute = CdMinute,
cdHour = CdHour,
cdDay = CdDay,

File diff suppressed because it is too large Load Diff

52
include/netcdf_meta.h.in Normal file
View File

@ -0,0 +1,52 @@
/*! \file netcdf_meta.h
*
* Meta information for libnetcdf which can be used by other packages which
* depend on libnetcdf.
*
* This file is automatically generated by the build system
* at configure time, and contains information related to
* how libnetcdf was built. It will not be required to
* include this file unless you want to probe the capabilities
* of libnetcdf. This should ideally only happen when configuring
* a project which depends on libnetcdf. At configure time,
* the dependant project can set its own macros which can be used
* in conditionals.
*
* Copyright 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
* 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014
* University Corporation for Atmospheric Research/Unidata.
* See \ref copyright file for more info.
*/
#ifndef NETCDF_META_H
#define NETCDF_META_H
#define NC_VERSION_MAJOR @NC_VERSION_MAJOR@ /*!< netcdf-c major version. */
#define NC_VERSION_MINOR @NC_VERSION_MINOR@ /*!< netcdf-c minor version. */
#define NC_VERSION_PATCH @NC_VERSION_PATCH@ /*!< netcdf-c patch version. */
#define NC_VERSION_NOTE "@NC_VERSION_NOTE@" /*!< netcdf-c note. May be blank. */
/*! netcdf-c version string.
*
* The format of the netcdf-c version string is as follows:
* \code
* NC_VERSION_MAJOR.NC_VERSION_MINOR.NC_VERSION_PATCH.NC_VERSION_NOTE
* \endcode
* Note that the trailing NC_VERSION_NOTE may be empty. It serves for
* identifiers such as '-rc1', etc.
*/
#define NC_VERSION "@NC_VERSION@"
#define NC_HAS_NC2 @NC_HAS_NC2@ /*!< API version 2 support. */
#define NC_HAS_NC4 @NC_HAS_NC4@ /*!< API version 4 support. */
#define NC_HAS_HDF4 @NC_HAS_HDF4@ /*!< hdf4 support. */
#define NC_HAS_HDF5 @NC_HAS_HDF5@ /*!< hdf5 support. */
#define NC_HAS_SZIP @NC_HAS_SZIP@ /*!< szip support (hdf5 only) */
#define NC_HAS_DAP @NC_HAS_DAP@ /*!< DAP support. */
#define NC_HAS_DISKLESS @NC_HAS_DISKLESS@ /*!< diskless support. */
#define NC_HAS_MMAP @NC_HAS_MMAP@ /*!< mmap support. */
#define NC_HAS_JNA @NC_HAS_JNA@ /*!< jna support. */
#define NC_HAS_PNETCDF @NC_HAS_PNETCDF@ /*!< pnetcdf support. */
#define NC_HAS_PARALLEL @NC_HAS_PARALLEL@ /*!< parallel IO support via hdf5. */
#endif

29
jna
View File

@ -1,29 +0,0 @@
#!/bin/bash
PREFIX=/home/dmh/opt/jna
CFLAGS=""
CPPFLAGS="-I/usr/local/include"
LDFLAGS="-L${PREFIX}/lib -lhdf5_hl -lhdf5 -lz $LDFLAGS"
LD_LIBRARY_PATH="${PREFIX}/lib:$LD_LIBRARY_PATH"
MAKE=make
FLAGS="--prefix ${PREFIX}"
FLAGS="$FLAGS --disable-cxx"
FLAGS="$FLAGS --disable-examples"
FLAGS="$FLAGS --disable-utilities"
export PATH
export CC
export CPPFLAGS
export CFLAGS
export LDFLAGS
export LD_LIBRARY_PATH
DISTCHECK_CONFIGURE_FLAGS="$FLAGS"
export DISTCHECK_CONFIGURE_FLAGS
if test -f Makefile ; then ${MAKE} distclean >/dev/null 2>&1 ; fi
sh ./configure ${FLAGS}

View File

@ -1,10 +1,6 @@
SET(CMAKE_INCLUDE_CURRENT_DIR ON)
INCLUDE_DIRECTORIES(".")
SET(dap2_SOURCES constraints.c dapcvt.c dapalign.c dapodom.c daputil.c ncdaperr.c cdf.c cache.c dapdump.c dapdebug.c dapattr.c ncd2dispatch.c getvara.c dceconstraints.c dcetab.c dceparse.c dcelex.c)
add_library(dap2 OBJECT ${dap2_SOURCES})
add_library(dap2 OBJECT ${dap2_SOURCES})
###
# Options related to the man page generation.

View File

@ -257,7 +257,7 @@ e::
cc -E -g -c ${E}.c ${INCL} -I../oc2 >${E}.txt
##################################################
T=t_auth
T=test_nstride_cached
v::
cc -g -c ${T}.c ${INCL}

View File

@ -6,6 +6,13 @@
#include "ncdap.h"
#include "dapdump.h"
/*
Grads servers always require a constraint,
which does not necessarily happen during prefetch.
So this flag controls this. By default, it is on.
*/
#define GRADS_PREFETCH
static int iscacheableconstraint(DCEconstraint* con);
/* Return 1 if we can reuse cached data to address
@ -97,7 +104,7 @@ prefetchdata(NCDAPCOMMON* nccomm)
if(FLAGSET(nccomm->controls,NCF_UNCONSTRAINABLE)) {
/* If we cannot constrain and caching is enabled,
then pull in everything */
if(FLAGSET(nccomm->controls,NCF_CACHE)) {
if(FLAGSET(nccomm->controls,NCF_CACHE)) {
for(i=0;i<nclistlength(allvars);i++) {
nclistpush(vars,nclistget(allvars,i));
}
@ -136,7 +143,7 @@ nclog(NCLOGDBG,"prefetch: %s",var->ncfullname);
/* Create a single constraint consisting of the projections for the variables;
each projection is whole variable. The selections are passed on as is.
The exception is if we are prefetching everything.
Conditionally, The exception is if we are prefetching everything.
*/
newconstraint = (DCEconstraint*)dcecreate(CES_CONSTRAINT);
@ -158,7 +165,9 @@ nullfree(s);
}
flags = NCF_PREFETCH;
#ifndef GRADS_PREFETCH
if(nclistlength(allvars) == nclistlength(vars)) flags |= NCF_PREFETCH_ALL;
#endif
ncstat = buildcachenode(nccomm,newconstraint,vars,&cache,flags);
newconstraint = NULL; /* buildcachenodetakes control of newconstraint */
if(ncstat != OC_NOERR) goto done;
@ -190,7 +199,7 @@ ncbytesfree(buf);
done:
nclistfree(vars);
dcefree((DCEnode*)newconstraint);
dcefree((DCEnode*)newconstraint);
if(ncstat && cache != NULL) freenccachenode(nccomm,cache);
return THROW(ncstat);
}
@ -212,9 +221,11 @@ buildcachenode(NCDAPCOMMON* nccomm,
int isprefetch = 0;
if((flags & NCF_PREFETCH) != 0)
isprefetch = 1;
isprefetch = 1;
#ifndef GRADS_PREFETCH
if((flags & NCF_PREFETCH_ALL) == 0)
#endif
ce = buildconstraintstring(constraint);
ncstat = dap_fetch(nccomm,conn,ce,OCDATADDS,&ocroot);
@ -359,7 +370,7 @@ iscacheableprojection(DCEprojection* proj)
cacheable = 1; /* assume so */
for(i=0;i<nclistlength(proj->var->segments);i++) {
DCEsegment* segment = (DCEsegment*)nclistget(proj->var->segments,i);
if(!iswholesegment(segment)) {cacheable = 0; break;}
if(!iswholesegment(segment)) {cacheable = 0; break;}
}
return cacheable;
}
@ -408,16 +419,18 @@ markprefetch(NCDAPCOMMON* nccomm)
CDFnode* dim = (CDFnode*)nclistget(var->array.dimsettrans,j);
nelems *= dim->dim.declsize;
}
if(nelems <= nccomm->cdf.smallsizelimit
&& FLAGSET(nccomm->controls,NCF_PREFETCH)) {
var->prefetchable = 1;
if(SHOWFETCH)
{
extern char* ocfqn(OCddsnode);
nclog(NCLOGDBG,"prefetchable: %s=%lu",
ocfqn(var->ocnode),(unsigned long)nelems);
}
}
if(nelems <= nccomm->cdf.smallsizelimit
&& FLAGSET(nccomm->controls,NCF_PREFETCH)) {
var->prefetchable = 1;
if(SHOWFETCH)
{
extern char* ocfqn(OCddsnode);
char *tmp = ocfqn(var->ocnode);
nclog(NCLOGDBG,"prefetchable: %s=%lu",
tmp,(unsigned long)nelems);
free(tmp);
}
}
}
return NC_NOERR;
}

View File

@ -41,7 +41,7 @@ computecdfnodesets(NCDAPCOMMON* nccomm, CDFtree* tree)
NClist* allnodes;
allnodes = tree->nodes;
varnodes = nclistnew();
varnodes = nclistnew();
if(tree->seqnodes == NULL) tree->seqnodes = nclistnew();
if(tree->gridnodes == NULL) tree->gridnodes = nclistnew();
@ -140,7 +140,7 @@ fixgrid(NCDAPCOMMON* nccomm, CDFnode* grid)
CDFnode* array;
glen = nclistlength(grid->subnodes);
array = (CDFnode*)nclistget(grid->subnodes,0);
array = (CDFnode*)nclistget(grid->subnodes,0);
if(nccomm->controls.flags & (NCF_NC3)) {
/* Rename grid Array: variable, but leave its oc base name alone */
nullfree(array->ncbasename);
@ -298,7 +298,7 @@ NCerror
sequencecheck(NCDAPCOMMON* nccomm)
{
(void)sequencecheckr(nccomm->cdf.ddsroot,
nccomm->cdf.ddsroot->tree->varnodes,NULL);
nccomm->cdf.ddsroot->tree->varnodes,NULL);
return NC_NOERR;
}
@ -556,7 +556,7 @@ findin(CDFnode* parent, CDFnode* child)
this occurs because some servers (that means you ferret and you thredds!)
do not adhere to the DAP2 protocol spec.
*/
static CDFnode*
makenewstruct(NCDAPCOMMON* ncc, CDFnode* node, CDFnode* templatenode)
{
@ -599,7 +599,7 @@ mapnodesr(CDFnode* connode, CDFnode* fullnode, int depth)
NCerror ncstat = NC_NOERR;
ASSERT((simplenodematch(connode,fullnode)));
#ifdef DEBUG
{
char* path1 = makecdfpathstring(fullnode,".");
@ -667,7 +667,7 @@ unmap(CDFnode* root)
}
}
/*
/*
Move dimension data from basenodes to nodes
*/
@ -700,7 +700,7 @@ fprintf(stderr,"dimimprint %s/%d -> %s/%d\n",
for(j=0;j<noderank;j++) {
CDFnode* dim = (CDFnode*)nclistget(node->array.dimset0,j);
CDFnode* basedim = (CDFnode*)nclistget(basenode->array.dimset0,j);
dim->dim.declsize0 = basedim->dim.declsize;
dim->dim.declsize0 = basedim->dim.declsize;
#ifdef DEBUG
fprintf(stderr,"dimimprint: %d: %lu -> %lu\n",i,basedim->dim.declsize,dim->dim.declsize0);
#endif
@ -944,6 +944,9 @@ buildcdftree(NCDAPCOMMON* nccomm, OCddsnode ocroot, OCdxd occlass, CDFnode** cdf
CDFnode* root = NULL;
CDFtree* tree = (CDFtree*)calloc(1,sizeof(CDFtree));
NCerror err = NC_NOERR;
if(!tree)
return OC_ENOMEM;
tree->ocroot = ocroot;
tree->nodes = nclistnew();
tree->occlass = occlass;
@ -956,7 +959,7 @@ buildcdftree(NCDAPCOMMON* nccomm, OCddsnode ocroot, OCdxd occlass, CDFnode** cdf
if(cdfrootp) *cdfrootp = root;
}
return err;
}
}
static NCerror
buildcdftreer(NCDAPCOMMON* nccomm, OCddsnode ocnode, CDFnode* container,
@ -1002,7 +1005,7 @@ buildcdftreer(NCDAPCOMMON* nccomm, OCddsnode ocnode, CDFnode* container,
if(tree->root == NULL) {
tree->root = cdfnode;
cdfnode->tree = tree;
}
}
#endif
break;
@ -1013,7 +1016,7 @@ buildcdftreer(NCDAPCOMMON* nccomm, OCddsnode ocnode, CDFnode* container,
if(tree->root == NULL) {
tree->root = cdfnode;
cdfnode->tree = tree;
}
}
#endif
break;
@ -1021,7 +1024,7 @@ buildcdftreer(NCDAPCOMMON* nccomm, OCddsnode ocnode, CDFnode* container,
default: PANIC1("buildcdftree: unexpect OC node type: %d",(int)octype);
}
/* Avoid a rare but perhaps possible null-dereference
/* Avoid a rare but perhaps possible null-dereference
of cdfnode. Not sure what error to throw, so using
NC_EDAP: generic DAP error. */
if(!cdfnode) {
@ -1040,7 +1043,10 @@ buildcdftreer(NCDAPCOMMON* nccomm, OCddsnode ocnode, CDFnode* container,
CDFnode* subnode;
oc_dds_ithfield(nccomm->oc.conn,ocnode,i,&ocsubnode);
ncerr = buildcdftreer(nccomm,ocsubnode,cdfnode,tree,&subnode);
if(ncerr) return ncerr;
if(ncerr) {
if(ocname) free(ocname);
return ncerr;
}
nclistpush(cdfnode->subnodes,(void*)subnode);
}
nullfree(ocname);
@ -1074,7 +1080,7 @@ freecdfroot(CDFnode* root)
/* Free up a single node, but not any
nodes it points to.
*/
*/
static void
free1cdfnode(CDFnode* node)
{
@ -1193,7 +1199,7 @@ static void
defdimensions(OCddsnode ocnode, CDFnode* cdfnode, NCDAPCOMMON* nccomm, CDFtree* tree)
{
size_t i,ocrank;
oc_dds_rank(nccomm->oc.conn,ocnode,&ocrank);
assert(ocrank > 0);
for(i=0;i<ocrank;i++) {
@ -1201,7 +1207,7 @@ defdimensions(OCddsnode ocnode, CDFnode* cdfnode, NCDAPCOMMON* nccomm, CDFtree*
OCddsnode ocdim;
char* ocname;
size_t declsize;
oc_dds_ithdimension(nccomm->oc.conn,ocnode,i,&ocdim);
oc_dimension_properties(nccomm->oc.conn,ocdim,&declsize,&ocname);
@ -1212,9 +1218,8 @@ defdimensions(OCddsnode ocnode, CDFnode* cdfnode, NCDAPCOMMON* nccomm, CDFtree*
/* Initially, constrained and unconstrained are same */
cdfdim->dim.declsize = declsize;
cdfdim->dim.array = cdfnode;
if(cdfnode->array.dimset0 == NULL)
if(cdfnode->array.dimset0 == NULL)
cdfnode->array.dimset0 = nclistnew();
nclistpush(cdfnode->array.dimset0,(void*)cdfdim);
}
}
}

View File

@ -62,7 +62,7 @@ mapconstraints(DCEconstraint* constraint,
for(i=0;i<nclistlength(dceprojections);i++) {
CDFnode* cdfmatch = NULL;
DCEprojection* proj = (DCEprojection*)nclistget(dceprojections,i);
if(proj->discrim != CES_VAR) continue; // ignore functions
if(proj->discrim != CES_VAR) continue; /* ignore functions */
ncstat = matchpartialname(nodes,proj->var->segments,&cdfmatch);
if(ncstat) goto done;
/* Cross links */

View File

@ -42,7 +42,7 @@ dcelex(YYSTYPE* lvalp, DCEparsestate* state)
int token;
int c;
int len;
char* p=lexstate->next;
char* p=NULL;
token = 0;
ncbytesclear(lexstate->yytext);
ncbytesnull(lexstate->yytext);
@ -109,7 +109,7 @@ dcelex(YYSTYPE* lvalp, DCEparsestate* state)
isnumber = 1; /* maybe */
}
/* A number followed by an id char is assumed to just be
a funny id */
a funny id */
if(isnumber && (*p == '\0' || strchr(wordcharsn,*p) == NULL)) {
token = SCAN_NUMBERCONST;
} else {
@ -191,9 +191,22 @@ void
dcelexinit(char* input, DCElexstate** lexstatep)
{
DCElexstate* lexstate = (DCElexstate*)malloc(sizeof(DCElexstate));
if(lexstatep) *lexstatep = lexstate;
/* If lexstatep is NULL,
we want to free lexstate and
return to avoid a memory leak. */
if(lexstatep) {
*lexstatep = lexstate;
} else {
if(lexstate) free(lexstate);
return;
}
if(lexstate == NULL) return;
memset((void*)lexstate,0,sizeof(DCElexstate));
#ifdef URLDECODE
lexstate->input = ncuridecode(input);
#else
@ -221,4 +234,3 @@ dcelexcleanup(DCElexstate** lexstatep)
free(lexstate);
*lexstatep = NULL;
}

View File

@ -203,7 +203,7 @@ array_indices(DCEparsestate* state, Object list0, Object indexno)
if(start < 0) {
dceerror(state,"Illegal array index");
start = 1;
}
}
slice = (DCEslice*)dcecreate(CES_SLICE);
slice->first = start;
slice->stride = 1;
@ -221,13 +221,13 @@ indexer(DCEparsestate* state, Object name, Object indices)
NClist* list = (NClist*)indices;
DCEsegment* seg = (DCEsegment*)dcecreate(CES_SEGMENT);
seg->name = strdup((char*)name);
for(i=0;i<nclistlength(list);i++) {
for(i=0;i<nclistlength(list);i++) {
DCEslice* slice = (DCEslice*)nclistget(list,i);
seg->slices[i] = *slice;
free(slice);
}
nclistfree(indices);
return seg;
return seg;
}
Object
@ -271,7 +271,7 @@ Object
var(DCEparsestate* state, Object indexpath)
{
DCEvar* v = (DCEvar*)dcecreate(CES_VAR);
v->segments = (NClist*)indexpath;
v->segments = (NClist*)indexpath;
return v;
}
@ -334,18 +334,18 @@ dce_parse_cleanup(DCEparsestate* state)
static DCEparsestate*
ce_parse_init(char* input, DCEconstraint* constraint)
{
DCEparsestate* state = NULL;
if(input==NULL) {
dceerror(state,"ce_parse_init: no input buffer");
} else {
state = (DCEparsestate*)calloc(1,sizeof(DCEparsestate));
if(state==NULL) return (DCEparsestate*)NULL;
state->errorbuf[0] = '\0';
state->errorcode = 0;
dcelexinit(input,&state->lexstate);
DCEparsestate* state = (DCEparsestate*)calloc(1,sizeof(DCEparsestate));;
if(state==NULL) return (DCEparsestate*)NULL;
if(input==NULL) {
dceerror(state,"ce_parse_init: no input buffer");
} else {
state->errorbuf[0] = '\0';
state->errorcode = 0;
dcelexinit(input,&state->lexstate);
state->constraint = constraint;
}
return state;
}
return state;
}
#ifdef PARSEDEBUG

View File

@ -1,15 +1,14 @@
#TOP="/home/dmh/mach/netcdf-c"
TOP="/cygdrive/f/git/netcdf-c"
alias xx="cd ..;make; cd libdap2"
PARMS=""; ARGS=""; CON="" ; CE=""; OCON="" ; VAR=""; SHARP='#'
alias q0=;alias qq=;alias qv=;alias q=;alias qh=;alias qqh=;alias qall=;alias qv=;alias qo=;
#TOP="/home/dmh/mach/netcdf-c"
TOP="/cygdrive/f/git/netcdf-c"
F="http://thredds.ucar.edu/thredds/dodsC/grib/NCEP/NAM/CONUS_12km/best"
#CON="OneD.amp,TwoD.amp,ThreeD.amp"
#VAR=SPEED
F="http://thredds.aodn.org.au/thredds/fileServer/IMOS/ANMN/NSW/PH100/Velocity/IMOS_ANMN-NSW_AETVZ_20131127T230000Z_PH100_FV01_PH100-1311-Workhorse-ADCP-109.5_END-20140306T010000Z_C-20140521T053527Z.nc"
#PROG=./ncd
PROG="$TOP/ncdump/ncdump"
@ -23,7 +22,7 @@ PARMS="log"
#PARMS="${PARMS}&nocache"
#PARMS="${PARMS}&wholevar"
PARMS="${PARMS}&show=fetch"
PARMS="${PARMS}&noprefetch"
#PARMS="${PARMS}&noprefetch"
#PARMS="${PARMS}&prefetch"
#PARMS="${PARMS}&prefetch=eager"

View File

@ -19,7 +19,7 @@ static DCEnode* save = NULL;
struct NCMEMORY {
void* memory;
char* next; /* where to store the next chunk of data*/
};
};
/* Forward:*/
static NCerror moveto(NCDAPCOMMON*, Getvara*, CDFnode* dataroot, void* memory);
@ -39,6 +39,7 @@ static int extract(NCDAPCOMMON*, Getvara*, CDFnode*, DCEsegment*, size_t diminde
static int extractstring(NCDAPCOMMON*, Getvara*, CDFnode*, DCEsegment*, size_t dimindex, OClink, OCdatanode, struct NCMEMORY*);
static void freegetvara(Getvara* vara);
static NCerror makegetvar(NCDAPCOMMON*, CDFnode*, void*, nc_type, Getvara**);
static NCerror attachsubset(CDFnode* target, CDFnode* template);
/**************************************************/
/**
@ -124,7 +125,7 @@ nc3d_getvarx(int ncid, int varid,
ncstat = NC_check_id(ncid, (NC**)&drno);
if(ncstat != NC_NOERR) goto fail;
dapcomm = (NCDAPCOMMON*)drno->dispatchdata;
ncstat = NC_check_id(drno->substrate, (NC**)&substrate);
if(ncstat != NC_NOERR) goto fail;
@ -151,7 +152,7 @@ nc3d_getvarx(int ncid, int varid,
if(ncstat != NC_NOERR) {THROWCHK(ncstat); goto fail;}
}
}
/* Get the dimension info */
ncdimsall = cdfvar->array.dimsetall;
ncrank = nclistlength(ncdimsall);
@ -190,27 +191,26 @@ fprintf(stderr,"\n");
for(i=0;i<ncrank;i++) {
CDFnode* dim = (CDFnode*)nclistget(ncdimsall,i);
/* countp and startp are unsigned, so will never be < 0 */
//if(startp[i] < 0 || countp[i] < 0 || stridep[i] < 1) {
if(stridep[i] < 1) {
ncstat = NC_EINVALCOORDS;
goto fail;
goto fail;
}
if(startp[i] >= dim->dim.declsize
|| startp[i]+(stridep[i]*(countp[i]-1)) >= dim->dim.declsize) {
ncstat = NC_EINVALCOORDS;
goto fail;
goto fail;
}
}
}
#ifdef DEBUG
{
NClist* dims = cdfvar->array.dimsetall;
fprintf(stderr,"getvarx: %s",cdfvar->ncfullname);
if(nclistlength(dims) > 0) {int i;
for(i=0;i<nclistlength(dims);i++)
for(i=0;i<nclistlength(dims);i++)
fprintf(stderr,"(%lu:%lu:%lu)",(unsigned long)startp[i],(unsigned long)countp[i],(unsigned long)stridep[i]);
fprintf(stderr," -> ");
for(i=0;i<nclistlength(dims);i++)
for(i=0;i<nclistlength(dims);i++)
if(stridep[i]==1)
fprintf(stderr,"[%lu:%lu]",(unsigned long)startp[i],(unsigned long)((startp[i]+countp[i])-1));
else {
@ -287,7 +287,7 @@ fprintf(stderr,"var is in cache\n");
else
state = FETCHPART;
}
ASSERT(state != 0);
ASSERT(state != 0);
switch (state) {
@ -305,7 +305,9 @@ fprintf(stderr,"getvarx: FETCHWHOLE: fetchconstraint: %s\n",dumpconstraint(fetch
#endif
ncstat = buildcachenode(dapcomm,fetchconstraint,vars,&cachenode,0);
fetchconstraint = NULL; /*buildcachenode34 takes control of fetchconstraint.*/
if(ncstat != NC_NOERR) {THROWCHK(ncstat); goto fail;}
if(ncstat != NC_NOERR) {THROWCHK(ncstat); nullfree(varainfo);
varainfo=NULL;
goto fail;}
} break;
case CACHED: {
@ -324,7 +326,7 @@ fprintf(stderr,"getvarx: FETCHWHOLE: fetchconstraint: %s\n",dumpconstraint(fetch
dcemakewholeprojection(fetchprojection);
#ifdef DEBUG
fprintf(stderr,"getvarx: FETCHVAR: fetchprojection: |%s|\n",dumpprojection(fetchprojection));
fprintf(stderr,"getvarx: FETCHVAR: fetchprojection: |%s|\n",dumpprojection(fetchprojection));
#endif
/* Build the complete constraint to use in the fetch */
@ -333,15 +335,15 @@ fprintf(stderr,"getvarx: FETCHVAR: fetchprojection: |%s|\n",dumpprojection(fetch
fetchconstraint->selections = dceclonelist(dapcomm->oc.dapconstraint->selections);
/* and the created fetch projection */
fetchconstraint->projections = nclistnew();
nclistpush(fetchconstraint->projections,(void*)fetchprojection);
nclistpush(fetchconstraint->projections,(void*)fetchprojection);
#ifdef DEBUG
fprintf(stderr,"getvarx: FETCHVAR: fetchconstraint: %s\n",dumpconstraint(fetchconstraint));
#endif
/* buildcachenode3 will create a new cachenode and
will also fetch the corresponding datadds.
*/
ncstat = buildcachenode(dapcomm,fetchconstraint,vars,&cachenode,0);
fetchconstraint = NULL; /*buildcachenode34 takes control of fetchconstraint.*/
ncstat = buildcachenode(dapcomm,fetchconstraint,vars,&cachenode,0);
fetchconstraint = NULL; /*buildcachenode34 takes control of fetchconstraint.*/
if(ncstat != NC_NOERR) {THROWCHK(ncstat); goto fail;}
} break;
@ -355,12 +357,12 @@ fprintf(stderr,"getvarx: FETCHVAR: fetchconstraint: %s\n",dumpconstraint(fetchco
if(ncstat != NC_NOERR) {THROWCHK(ncstat); goto fail;}
/* Shift the varaprojection for simple walk */
dcefree((DCEnode*)walkprojection) ; /* reclaim any existing walkprojection */
dcefree((DCEnode*)walkprojection) ; /* reclaim any existing walkprojection */
walkprojection = (DCEprojection*)dceclone((DCEnode*)varaprojection);
dapshiftprojection(walkprojection);
#ifdef DEBUG
fprintf(stderr,"getvarx: FETCHPART: fetchprojection: |%s|\n",dumpprojection(fetchprojection));
fprintf(stderr,"getvarx: FETCHPART: fetchprojection: |%s|\n",dumpprojection(fetchprojection));
#endif
/* Build the complete constraint to use in the fetch */
@ -369,9 +371,9 @@ fprintf(stderr,"getvarx: FETCHPART: fetchprojection: |%s|\n",dumpprojection(fetc
fetchconstraint->selections = dceclonelist(dapcomm->oc.dapconstraint->selections);
/* and the created fetch projection */
fetchconstraint->projections = nclistnew();
nclistpush(fetchconstraint->projections,(void*)fetchprojection);
nclistpush(fetchconstraint->projections,(void*)fetchprojection);
#ifdef DEBUG
fprintf(stderr,"getvarx: FETCHPART: fetchconstraint: %s\n",dumpconstraint(fetchconstraint));
fprintf(stderr,"getvarx: FETCHPART: fetchconstraint: %s\n",dumpconstraint(fetchconstraint));
#endif
/* buildcachenode3 will create a new cachenode and
will also fetch the corresponding datadds.
@ -394,7 +396,7 @@ fprintf(stderr,"cache.datadds=%s\n",dumptree(cachenode->datadds));
/* attach DATADDS to (constrained) DDS */
unattach(dapcomm->cdf.ddsroot);
ncstat = attachsubset(cachenode->datadds,dapcomm->cdf.ddsroot);
if(ncstat) goto fail;
if(ncstat) goto fail;
/* Fix up varainfo to use the cache */
varainfo->cache = cachenode;
@ -405,7 +407,7 @@ fprintf(stderr,"cache.datadds=%s\n",dumptree(cachenode->datadds));
/* Get the var correlate from the datadds */
target = varainfo->target;
xtarget = target->attachment;
if(xtarget == NULL)
if(xtarget == NULL)
{THROWCHK(ncstat=NC_ENODATA); goto fail;}
/* Switch to datadds tree space*/
@ -414,11 +416,11 @@ save = (DCEnode*)varaprojection;
ncstat = moveto(dapcomm,varainfo,varainfo->cache->datadds,data);
if(ncstat != NC_NOERR) {THROWCHK(ncstat); goto fail;}
nclistfree(vars);
dcefree((DCEnode*)varaprojection);
dcefree((DCEnode*)fetchconstraint);
freegetvara(varainfo);
fail:
if(vars != NULL) nclistfree(vars);
if(varaprojection != NULL) dcefree((DCEnode*)varaprojection);
if(fetchconstraint != NULL) dcefree((DCEnode*)fetchconstraint);
if(varainfo != NULL) freegetvara(varainfo);
if(ocstat != OC_NOERR) ncstat = ocerrtoncerr(ocstat);
return THROW(ncstat);
}
@ -550,6 +552,7 @@ fprintf(stderr," segment=%s hasstringdim=%d\n",
dapodom_next(odom);
}
dapodom_free(odom);
odom = NULL;
} else {/* scalar instance */
ncstat = movetofield(nccomm,currentcontent,path,depth,xgetvar,dimindex,memory,segments);
if(ocstat != OC_NOERR) {THROWCHK(ocstat); goto done;}
@ -565,7 +568,7 @@ fprintf(stderr," segment=%s hasstringdim=%d\n",
for the case when the user set a limit and that limit
is not actually reached in this request.
*/
/* By construction, this sequence represents the first
/* By construction, this sequence represents the first
(and only) dimension of this segment */
odom = dapodom_fromsegment(segment,0,1);
while(dapodom_more(odom)) {
@ -596,7 +599,7 @@ fprintf(stderr," segment=%s hasstringdim=%d\n",
if(hasstringdim)
ncstat = extractstring(nccomm, xgetvar, xnode, segment, dimindex, conn, currentcontent, memory);
else
else
ncstat = extract(nccomm, xgetvar, xnode, segment, dimindex, conn, currentcontent, memory);
break;
@ -607,6 +610,7 @@ done:
oc_data_free(conn,fieldcontent);
oc_data_free(conn,reccontent);
if(ocstat != OC_NOERR) ncstat = ocerrtoncerr(ocstat);
if(odom) dapodom_free(odom);
return THROW(ncstat);
}
@ -630,11 +634,25 @@ movetofield(NCDAPCOMMON* nccomm,
OCdatanode fieldcontent = NULL;
CDFnode* xnext;
int newdepth;
int ffield;
/* currentcontent points to the grid/dataset/structure/record instance */
xnext = (CDFnode*)nclistget(path,depth+1);
ASSERT((xnext != NULL));
fieldindex = findfield(xnode,xnext);
/* If findfield is less than 0,
and passes through this stanza,
an undefined value will be passed to
oc_data_ithfield. See coverity
issue 712596. */
ffield = findfield(xnode, xnext);
if(ffield < 0) {
ncstat = NC_EBADFIELD;
goto done;
} else {
fieldindex = findfield(xnode,xnext);
}
/* If the next node is a nc_virtual node, then
we need to effectively
ignore it and use the appropriate subnode.
@ -672,7 +690,7 @@ done:
the odometer will be walking the whole subslice
This will allow us to optimize.
*/
static int
static int
wholeslicepoint(Dapodometer* odom)
{
unsigned int i;
@ -685,7 +703,7 @@ wholeslicepoint(Dapodometer* odom)
}
if(point == -1)
point = 0; /* wholevariable */
else if(point == (odom->rank - 1))
else if(point == (odom->rank - 1))
point = -1; /* no whole point */
else
point += 1; /* intermediate point */
@ -750,7 +768,7 @@ extract(
size_t externtypesize;
size_t interntypesize;
int requireconversion;
char value[16];
char value[16];
ASSERT((segment != NULL));
@ -880,7 +898,7 @@ slicestring(OClink conn, char* stringmem, DCEslice* slice, struct NCMEMORY* memo
/* libnc-dap chooses to convert string escapes to the corresponding
character; so we do likewise.
*/
dapexpandescapes(stringmem);
dapexpandescapes(stringmem);
stringlen = strlen(stringmem);
#ifdef DEBUG2
@ -932,7 +950,7 @@ extractstring(
ASSERT(xnode->etype == NC_STRING || xnode->etype == NC_URL);
/* Compute rank minus string dimension */
/* Compute rank minus string dimension */
rank0 = nclistlength(xnode->array.dimset0);
/* keep whole extracted strings stored in an NClist */
@ -942,7 +960,7 @@ extractstring(
char* value = NULL;
ocstat = oc_data_readscalar(conn,currentcontent,sizeof(value),&value);
if(ocstat != OC_NOERR) goto done;
nclistpush(strings,(void*)value);
nclistpush(strings,(void*)value);
} else {
/* Use the odometer to walk to the appropriate fields*/
odom = dapodom_fromsegment(segment,0,rank0);
@ -951,10 +969,11 @@ extractstring(
ocstat = oc_data_readn(conn,currentcontent,odom->index,1,sizeof(value),&value);
if(ocstat != OC_NOERR)
goto done;
nclistpush(strings,(void*)value);
nclistpush(strings,(void*)value);
dapodom_next(odom);
}
dapodom_free(odom);
odom = NULL;
}
/* Get each string in turn, slice it by applying the string dimm
and store in user supplied memory
@ -962,10 +981,10 @@ extractstring(
for(i=0;i<nclistlength(strings);i++) {
char* s = (char*)nclistget(strings,i);
slicestring(conn,s,&segment->slices[rank0],memory);
free(s);
}
nclistfree(strings);
free(s);
}
done:
if(strings != NULL) nclistfree(strings);
if(ocstat != OC_NOERR) ncstat = ocerrtoncerr(ocstat);
return THROW(ncstat);
}
@ -1039,7 +1058,7 @@ fprintf(stderr,"attachdim: %s->%s\n",xdim->ocname,tdim->ocname);
return NC_NOERR;
}
/*
/*
Match a DATADDS node to a DDS node.
It is assumed that both trees have been re-struct'ed if necessary.
*/
@ -1058,7 +1077,7 @@ attachr(CDFnode* xnode, NClist* templatepath, int depth)
lastnode = (depth == (plen-1));
templatepathnode = (CDFnode*)nclistget(templatepath,depth);
ASSERT((simplenodematch(xnode,templatepathnode)));
setattach(xnode,templatepathnode);
setattach(xnode,templatepathnode);
#ifdef DEBUG2
fprintf(stderr,"attachnode: %s->%s\n",xnode->ocname,templatepathnode->ocname);
#endif
@ -1148,13 +1167,13 @@ done:
}
/*
/*
Match nodes in template tree to nodes in target tree;
template tree is typically a structural superset of target tree.
WARNING: Dimensions are not attached
WARNING: Dimensions are not attached
*/
NCerror
static NCerror
attachsubset(CDFnode* target, CDFnode* template)
{
NCerror ncstat = NC_NOERR;
@ -1242,7 +1261,7 @@ nc3d_getvarmx(int ncid, int varid,
NULL,NULL,NULL,
data,dsttype0));
}
dsttype = (dsttype0);
/* Default to using the inquiry type for this var*/
@ -1333,7 +1352,7 @@ fprintf(stderr,"new: %lu -> %lu %f\n",
*(float*)localpos);
*/
dapodom_next(odom);
}
}
#else
odom = dapodom_new(ncrank,start,edges,stride,NULL);
while(dapodom_more(odom)) {
@ -1357,11 +1376,10 @@ fprintf(stderr,"old: %lu -> %lu %f\n",
*(float*)externalmem);
*/
dapodom_next(odom);
}
}
#endif
done:
return ncstat;
}
#endif /*EXTERN_UNUSED*/

View File

@ -5,6 +5,7 @@
#include "ncdap.h"
#include "ncd2dispatch.h"
#include "dapalign.h"
#ifdef HAVE_GETRLIMIT
# ifdef HAVE_SYS_RESOURCE_H
@ -44,7 +45,7 @@ static NCerror makeseqdim(NCDAPCOMMON*, CDFnode* seq, size_t count, CDFnode** sq
static NCerror countsequence(NCDAPCOMMON*, CDFnode* xseq, size_t* sizep);
static NCerror freeNCDAPCOMMON(NCDAPCOMMON*);
static NCerror fetchtemplatemetadata(NCDAPCOMMON*);
static int fieldindex(CDFnode* parent, CDFnode* child);
static size_t fieldindex(CDFnode* parent, CDFnode* child);
static NCerror computeseqcountconstraints(NCDAPCOMMON*, CDFnode*, NCbytes*);
static void computeseqcountconstraintsr(NCDAPCOMMON*, CDFnode*, CDFnode**);
static void estimatevarsizes(NCDAPCOMMON*);
@ -187,7 +188,7 @@ NCD2_initialize(void)
int i;
/* Create our dispatch table as the merge of NCD2 table and NCSUBSTRATE */
/* watch the order because we want NCD2 to overwrite NCSUBSTRATE */
NC_dispatch_overlay(&NCD2_dispatch_base, NCSUBSTRATE_dispatch_table, &NCD2_dispatcher);
NC_dispatch_overlay(&NCD2_dispatch_base, NCSUBSTRATE_dispatch_table, &NCD2_dispatcher);
NCD2_dispatch_table = &NCD2_dispatcher;
/* Local Initialization */
compute_nccalignments();
@ -315,7 +316,7 @@ NCD2_open(const char * path, int mode,
/* set the compile flag by default */
dapcomm->oc.rawurltext = (char*)emalloc(strlen(path)+strlen("[compile]")+1);
strcpy(dapcomm->oc.rawurltext,"[compile]");
strcat(dapcomm->oc.rawurltext, path);
strcat(dapcomm->oc.rawurltext, path);
#else
dapcomm->oc.rawurltext = strdup(path);
#endif
@ -338,7 +339,7 @@ NCD2_open(const char * path, int mode,
SETFLAG(dapcomm->controls,NCF_COLUMBIA);
}
}
}
}
#endif
/* fail if we are unconstrainable but have constraints */
@ -371,7 +372,7 @@ NCD2_open(const char * path, int mode,
dapcomm->oc.dapconstraint = (DCEconstraint*)dcecreate(CES_CONSTRAINT);
dapcomm->oc.dapconstraint->projections = nclistnew();
dapcomm->oc.dapconstraint->selections = nclistnew();
/* Parse constraints to make sure they are syntactically correct */
ncstat = parsedapconstraints(dapcomm,dapcomm->oc.url->constraint,dapcomm->oc.dapconstraint);
if(ncstat != NC_NOERR) {THROWCHK(ncstat); goto done;}
@ -399,9 +400,9 @@ NCD2_open(const char * path, int mode,
ncloginit();
if(nclogopen(value))
ncsetlogging(1);
ocloginit();
if(oclogopen(value))
ocsetlogging(1);
ncloginit();
if(nclogopen(value))
ncsetlogging(1);
}
/* fetch and build the unconstrained DDS for use as
@ -483,7 +484,7 @@ fprintf(stderr,"constrained dds: %s\n",dumptree(dapcomm->cdf.ddsroot));
if(dapcomm->cdf.recorddimname != NULL
&& nclistlength(dapcomm->cdf.ddsroot->tree->seqnodes) > 0) {
/*nclog(NCLOGWARN,"unlimited dimension specified, but sequences exist in DDS");*/
PANIC("unlimited dimension specified, but sequences exist in DDS");
PANIC("unlimited dimension specified, but sequences exist in DDS");
}
/* Re-compute the var names*/
@ -543,7 +544,7 @@ fprintf(stderr,"ncdap3: final constraint: %s\n",dapcomm->oc.url->constraint);
if(ncstat != NC_NOERR && ncstat != NC_EVARSIZE)
{THROWCHK(ncstat); goto done;}
#endif
{
NC* ncsub;
NC* drno = dapcomm->controller;
@ -563,7 +564,7 @@ fprintf(stderr,"ncdap3: final constraint: %s\n",dapcomm->oc.url->constraint);
/* Pretend the substrate is read-only */
NC_set_readonly(nc3i);
}
/* Do any necessary data prefetch */
@ -591,7 +592,7 @@ NCD2_close(int ncid)
NCDAPCOMMON* dapcomm;
int ncstatus = NC_NOERR;
ncstatus = NC_check_id(ncid, (NC**)&drno);
ncstatus = NC_check_id(ncid, (NC**)&drno);
if(ncstatus != NC_NOERR) return THROW(ncstatus);
dapcomm = (NCDAPCOMMON*)drno->dispatchdata;
@ -662,7 +663,7 @@ builddims(NCDAPCOMMON* dapcomm)
if(!swap) break;
}
/* Define unlimited only if needed */
/* Define unlimited only if needed */
if(dapcomm->cdf.recorddim != NULL) {
CDFnode* unlimited = dapcomm->cdf.recorddim;
definename = getdefinename(unlimited);
@ -695,7 +696,7 @@ fprintf(stderr,"define: dim: %s=%ld\n",dim->ncfullname,(long)dim->dim.declsize);
definename = getdefinename(dim);
ncstat = nc_def_dim(drno->substrate,definename,dim->dim.declsize,&dimid);
if(ncstat != NC_NOERR) {
THROWCHK(ncstat); goto done;
THROWCHK(ncstat); nullfree(definename); goto done;
}
nullfree(definename);
dim->ncid = dimid;
@ -747,7 +748,7 @@ fprintf(stderr,"buildvars.candidate=|%s|\n",var->ncfullname);
CDFnode* dim = (CDFnode*)nclistget(vardims,j);
dimids[j] = dim->ncid;
}
}
}
@ -787,7 +788,7 @@ fprintf(stderr,"\n");
/* Tag the variable with its DAP path */
if(dapparamcheck(dapcomm,"show","projection"))
showprojection(dapcomm,var);
}
}
done:
return THROW(ncstat);
}
@ -888,14 +889,16 @@ buildattribute(NCDAPCOMMON* dapcomm, NCattribute* att, nc_type vartype, int vari
modified: 10/28/09 to interpret escapes
*/
if(att->etype == NC_STRING || att->etype == NC_URL) {
char* newstring;
char* newstring = NULL;
size_t newlen = 0;
for(i=0;i<nvalues;i++) {
char* s = (char*)nclistget(att->values,i);
newlen += (1+strlen(s));
}
newstring = (char*)malloc(newlen);
MEMCHECK(newstring,NC_ENOMEM);
if(newlen > 0)
newstring = (char*)malloc(newlen);
MEMCHECK(newstring,NC_ENOMEM);
newstring[0] = '\0';
for(i=0;i<nvalues;i++) {
char* s = (char*)nclistget(att->values,i);
@ -911,7 +914,7 @@ buildattribute(NCDAPCOMMON* dapcomm, NCattribute* att, nc_type vartype, int vari
} else {
nc_type atype;
unsigned int typesize;
void* mem;
void* mem = NULL;
/* It turns out that some servers upgrade the type
of _FillValue in order to correctly preserve the
original value. However, since the type of the
@ -924,9 +927,10 @@ buildattribute(NCDAPCOMMON* dapcomm, NCattribute* att, nc_type vartype, int vari
else
atype = nctypeconvert(dapcomm,att->etype);
typesize = nctypesizeof(atype);
mem = malloc(typesize * nvalues);
ncstat = dapcvtattrval(atype,mem,att->values);
ncstat = nc_put_att(drno->substrate,varid,att->name,atype,nvalues,mem);
if(nvalues > 0)
mem = malloc(typesize * nvalues);
ncstat = dapcvtattrval(atype,mem,att->values);
ncstat = nc_put_att(drno->substrate,varid,att->name,atype,nvalues,mem);
nullfree(mem);
}
return THROW(ncstat);
@ -949,7 +953,7 @@ getdefinename(CDFnode* node)
case NC_Dimension:
/* Return just the node's ncname */
spath = nulldup(node->ncbasename);
spath = nulldup(node->ncbasename);
break;
default:
@ -993,10 +997,10 @@ computecdfdimnames(NCDAPCOMMON* nccomm)
NClist* varnodes = nccomm->cdf.ddsroot->tree->varnodes;
NClist* alldims;
NClist* basedims;
/* Collect all dimension nodes from dimsetall lists */
alldims = getalldims(nccomm,0);
alldims = getalldims(nccomm,0);
/* Assign an index to all anonymous dimensions
vis-a-vis its containing variable
@ -1237,7 +1241,7 @@ applyclientparams(NCDAPCOMMON* nccomm)
pathstr = makeocpathstring(conn,var->ocnode,".");
strncat(tmpname,pathstr,NC_MAX_NAME);
nullfree(pathstr);
value = oc_clientparam_get(conn,tmpname);
value = oc_clientparam_get(conn,tmpname);
if(value != NULL && strlen(value) != 0) {
if(sscanf(value,"%d",&len) && len > 0) var->maxstringlength = len;
}
@ -1316,7 +1320,7 @@ replacedims(NClist* dims)
Two dimensions are equivalent if
1. they have the same size
2. neither are anonymous
3. they ave the same names.
3. they ave the same names.
*/
static int
equivalentdim(CDFnode* basedim, CDFnode* dupdim)
@ -1408,7 +1412,7 @@ addstringdims(NCDAPCOMMON* dapcomm)
dimsize = var->maxstringlength;
/* check is a variable-specific string length was specified */
if(dimsize == 0)
if(dimsize == 0)
sdim = dapcomm->cdf.globalstringdim; /* use default */
else {
/* create a psuedo dimension for the charification of the string*/
@ -1487,7 +1491,7 @@ defseqdims(NCDAPCOMMON* dapcomm)
if(container->nctype != NC_Structure
|| nclistlength(container->array.dimset0) > 0)
{seq->usesequence = 0; break;}/* no good */
}
}
/* Does the user want us to compute the actual sequence dim size? */
if(seq->usesequence && seqdims) {
ncstat = getseqdimsize(dapcomm,seq,&seqsize);
@ -1496,7 +1500,7 @@ defseqdims(NCDAPCOMMON* dapcomm)
seq->usesequence = 0;
}
} else { /* !seqdims default to size = 1 */
seqsize = 1;
seqsize = 1;
}
if(seq->usesequence) {
/* Note: we are making the dimension in the dds root tree */
@ -1527,6 +1531,7 @@ showprojection(NCDAPCOMMON* dapcomm, CDFnode* var)
if(i > 0) ncbytescat(projection,".");
ncbytescat(projection,node->ocname);
}
nclistfree(path);
/* Now, add the dimension info */
rank = nclistlength(var->array.dimset0);
for(i=0;i<rank;i++) {
@ -1536,12 +1541,13 @@ showprojection(NCDAPCOMMON* dapcomm, CDFnode* var)
snprintf(tmp,sizeof(tmp),"%lu",(unsigned long)dim->dim.declsize);
ncbytescat(projection,tmp);
ncbytescat(projection,"]");
}
}
/* Define the attribute */
ncstat = nc_put_att_text(getncid(drno),var->ncid,
"_projection",
ncbyteslength(projection),
ncbytescontents(projection));
ncbytesfree(projection);
return ncstat;
}
@ -1573,15 +1579,15 @@ fprintf(stderr,"seqcountconstraints: %s\n",ncbytescontents(seqcountconstraints))
if(ncstat) goto fail;
ncstat = buildcdftree(dapcomm,ocroot,OCDATA,&dxdroot);
if(ncstat) goto fail;
if(ncstat) goto fail;
/* attach DATADDS to DDS */
ncstat = attach(dxdroot,seq);
if(ncstat) goto fail;
if(ncstat) goto fail;
/* WARNING: we are now switching to datadds tree */
xseq = seq->attachment;
ncstat = countsequence(dapcomm,xseq,&seqsize);
if(ncstat) goto fail;
if(ncstat != NC_NOERR) goto fail;
#ifdef DEBUG
fprintf(stderr,"sequencesize: %s = %lu\n",seq->ocname,(unsigned long)seqsize);
@ -1590,6 +1596,8 @@ fprintf(stderr,"sequencesize: %s = %lu\n",seq->ocname,(unsigned long)seqsize);
/* throw away the fetch'd trees */
unattach(dapcomm->cdf.ddsroot);
freecdfroot(dxdroot);
#if 1
/*Note sure what this is doing?*/
if(ncstat != NC_NOERR) {
/* Cannot get DATADDDS*/
char* code;
@ -1601,13 +1609,15 @@ fprintf(stderr,"sequencesize: %s = %lu\n",seq->ocname,(unsigned long)seqsize);
code,msg,httperr);
}
ocstat = OC_NOERR;
}
}
#endif
if(sizep) *sizep = seqsize;
fail:
ncbytesfree(seqcountconstraints);
oc_data_free(conn,rootcontent);
if(ocstat) ncstat = ocerrtoncerr(ocstat);
if(ocstat != OC_NOERR) ncstat = ocerrtoncerr(ocstat);
return ncstat;
}
@ -1664,7 +1674,7 @@ countsequence(NCDAPCOMMON* dapcomm, CDFnode* xseq, size_t* sizep)
CDFnode* current = (CDFnode*)nclistget(path,i);
OCdatanode nextdata = NULL;
CDFnode* next = NULL;
/* invariant: current = ith node in path; data = corresponding
datanode
*/
@ -1705,7 +1715,7 @@ countsequence(NCDAPCOMMON* dapcomm, CDFnode* xseq, size_t* sizep)
done:
nclistfree(path);
if(ocstat) ncstat = ocerrtoncerr(ocstat);
if(ocstat != OC_NOERR) ncstat = ocerrtoncerr(ocstat);
return THROW(ncstat);
}
@ -1737,7 +1747,7 @@ freeNCDAPCOMMON(NCDAPCOMMON* dapcomm)
return NC_NOERR;
}
static int
static size_t
fieldindex(CDFnode* parent, CDFnode* child)
{
unsigned int i;
@ -1804,7 +1814,7 @@ computeseqcountconstraints(NCDAPCOMMON* dapcomm, CDFnode* seq, NCbytes* seqcount
if(dapcomm->oc.url->selection != NULL)
ncbytescat(seqcountconstraints,dapcomm->oc.url->selection);
nclistfree(path);
return NC_NOERR;
return NC_NOERR;
}
@ -1833,7 +1843,7 @@ prefer(CDFnode* candidate, CDFnode* newchoice)
canisscalar = (nclistlength(candidate->array.dimset0) == 0);
ASSERT(candidate->nctype == NC_Atomic && newchoice->nctype == NC_Atomic);
/* choose non-string over string */
if(canisstring && !newisstring)
return newchoice;
@ -1974,7 +1984,7 @@ fetchtemplatemetadata(NCDAPCOMMON* dapcomm)
if(ncstat != NC_NOERR) {
/* Ignore but complain */
nclog(NCLOGWARN,"Could not read DAS; ignored");
dapcomm->oc.ocdasroot = NULL;
dapcomm->oc.ocdasroot = NULL;
ncstat = NC_NOERR;
}
@ -2028,7 +2038,7 @@ fetchconstrainedmetadata(NCDAPCOMMON* dapcomm)
if(!FLAGSET(dapcomm->controls,NCF_UNCONSTRAINABLE)) {
/* fix DAP server problem by adding back any inserting needed structure nodes */
ncstat = restruct(dapcomm, dapcomm->cdf.ddsroot,dapcomm->cdf.fullddsroot,dapcomm->oc.dapconstraint->projections);
ncstat = restruct(dapcomm, dapcomm->cdf.ddsroot,dapcomm->cdf.fullddsroot,dapcomm->oc.dapconstraint->projections);
if(ncstat) goto fail;
}
@ -2127,7 +2137,7 @@ applyclientparamcontrols(NCDAPCOMMON* dapcomm)
CLRFLAG(dapcomm->controls,NCF_PREFETCH_EAGER);
/* Turn on any default on flags */
SETFLAG(dapcomm->controls,DFALT_ON_FLAGS);
SETFLAG(dapcomm->controls,DFALT_ON_FLAGS);
SETFLAG(dapcomm->controls,(NCF_NC3|NCF_NCDAP));
/* enable/disable caching */

View File

@ -8,7 +8,7 @@
NCerror
ocerrtoncerr(OCerror ocerr)
{
if(ocerr >= 0) return ocerr; /* really a system error*/
if(ocerr > 0) return ocerr; /* really a system error*/
switch (ocerr) {
case OC_NOERR: return NC_NOERR;
case OC_EBADID: return NC_EBADID;

View File

@ -1,7 +1,3 @@
SET(CMAKE_INCLUDE_CURRENT_DIR ON)
INCLUDE_DIRECTORIES(".")
SET(libdispatch_SOURCES dparallel.c dcopy.c dfile.c ddim.c datt.c dattinq.c dattput.c dattget.c derror.c dvar.c dvarget.c dvarput.c dvarinq.c ddispatch.c nclog.c dstring.c dutf8proc.c ncuri.c nclist.c ncbytes.c nchashmap.c nctime.c dsubstrate.c nc.c nclistmgr.c)
IF(USE_NETCDF4)

View File

@ -1,44 +1,64 @@
## This is a automake file, part of Unidata's netCDF package.
# Copyright 2005, see the COPYRIGHT file for more information.
# This Makefile controls the building of the dispatch layer of the
# netCDF library. The dispatch layer decides whether to call the
# netcdf-classic code, netcdf-4 code, nc3 dap code, or nc4 dap
# code. It also contains code that sit above the dispatch layer, like
# the v2 API.
# Put together AM_CPPFLAGS and AM_LDFLAGS.
include $(top_srcdir)/lib_flags.am
# This is our output, the dispatch convenience library.
noinst_LTLIBRARIES = libdispatch.la
libdispatch_la_CPPFLAGS = ${AM_CPPFLAGS}
# The source files.
libdispatch_la_SOURCES = dparallel.c dcopy.c dfile.c ddim.c datt.c \
dattinq.c dattput.c dattget.c derror.c dvar.c dvarget.c dvarput.c \
dvarinq.c ddispatch.c \
nclog.c dstring.c dutf8proc.c utf8proc_data.h \
ncuri.c nclist.c ncbytes.c nchashmap.c nctime.c \
dsubstrate.c \
nc.c nclistmgr.c
# Add functions only found in netCDF-4.
if USE_NETCDF4
libdispatch_la_SOURCES += dgroup.c dvlen.c dcompound.c dtype.c denum.c \
dopaque.c ncaux.c
endif # USE_NETCDF4
# Turn on pre-processor flag when building a DLL for windows.
if BUILD_DLL
libdispatch_la_CPPFLAGS += -DDLL_EXPORT
endif # BUILD_DLL
# Add V2 API convenience library if needed.
if BUILD_V2
noinst_LTLIBRARIES += libnetcdf2.la
libnetcdf2_la_SOURCES = dv2i.c
libnetcdf2_la_CPPFLAGS = ${AM_CPPFLAGS} -DDLL_EXPORT
endif # BUILD_V2
EXTRA_DIST=CMakeLists.txt
## This is a automake file, part of Unidata's netCDF package.
# Copyright 2005, see the COPYRIGHT file for more information.
# This Makefile controls the building of the dispatch layer of the
# netCDF library. The dispatch layer decides whether to call the
# netcdf-classic code, netcdf-4 code, nc3 dap code, or nc4 dap
# code. It also contains code that sit above the dispatch layer, like
# the v2 API.
# Put together AM_CPPFLAGS and AM_LDFLAGS.
include $(top_srcdir)/lib_flags.am
# This is our output, the dispatch convenience library.
noinst_LTLIBRARIES = libdispatch.la
libdispatch_la_CPPFLAGS = ${AM_CPPFLAGS}
# The source files.
libdispatch_la_SOURCES = dparallel.c dcopy.c dfile.c ddim.c datt.c \
dattinq.c dattput.c dattget.c derror.c dvar.c dvarget.c dvarput.c \
dvarinq.c ddispatch.c \
nclog.c dstring.c dutf8proc.c utf8proc_data.h \
ncuri.c nclist.c ncbytes.c nchashmap.c nctime.c \
dsubstrate.c \
nc.c nclistmgr.c
# Add functions only found in netCDF-4.
if USE_NETCDF4
libdispatch_la_SOURCES += dgroup.c dvlen.c dcompound.c dtype.c denum.c \
dopaque.c ncaux.c
endif # USE_NETCDF4
# Turn on pre-processor flag when building a DLL for windows.
if BUILD_DLL
libdispatch_la_CPPFLAGS += -DDLL_EXPORT
endif # BUILD_DLL
# Add V2 API convenience library if needed.
if BUILD_V2
noinst_LTLIBRARIES += libnetcdf2.la
libnetcdf2_la_SOURCES = dv2i.c
libnetcdf2_la_CPPFLAGS = ${AM_CPPFLAGS} -DDLL_EXPORT
endif # BUILD_V2
EXTRA_DIST=CMakeLists.txt ncsettings.hdr
# Build ncsettings.c as follows:
# 1. copy ncsettings.hdr to ncsettings.c
# 2. append libnetcdf.settings to ncsettings.c after
# processing it as follows:
# 1. convert tabs and cr to blanks
# 2. convert embedded double quote (") to escaped form (\").
# 3. append newline (\n) to each line
# 4. surround each line with double quotes.
# 3. finally, add a semicolon to the end of ncsettings.c
# to complete the string constant.
ncsettings.c: $(top_srcdir)/libnetcdf.settings ncsettings.hdr
rm -f ncsettings.c
cat ncsettings.hdr > ncsettings.c
tr '\t\r' ' ' <${top_srcdir}/libnetcdf.settings | \
sed -e 's/"/\\"/g' | \
sed -e 's/\(.*\)/\"\1\\n\"/' | \
cat >> ncsettings.c
echo ';' >> ncsettings.c

View File

@ -371,9 +371,13 @@ nc_copy_var(int ncid_in, int varid_in, int ncid_out)
goto exit;
/* Allocate memory for one record. */
if (!(data = malloc(reclen * type_size)))
return NC_ENOMEM;
if (!(data = malloc(reclen * type_size))) {
if(count) free(count);
if(dimlen) free(dimlen);
if(start) free(start);
return NC_ENOMEM;
}
/* Copy the var data one record at a time. */
for (start[0]=0; !retval && start[0]<(size_t)dimlen[0]; start[0]++)
{

View File

@ -1,4 +1,4 @@
/** \file
/** \file
Dimension functions
These functions define and inquire about dimensions.
@ -9,9 +9,66 @@ Research/Unidata. See COPYRIGHT file for more info.
#include "ncdispatch.h"
/**@{*/
/*! \defgroup dimensions Dimensions
/*!
Dimensions are used to define the shape of data in netCDF.
Dimensions for a netCDF dataset are defined when it is created, while
the netCDF dataset is in define mode. Additional dimensions may be
added later by reentering define mode. A netCDF dimension has a name
and a length. In a netCDF classic or 64-bit offset file, at most one
dimension can have the unlimited length, which means variables using
this dimension can grow along this dimension. In a netCDF-4 file
multiple unlimited dimensions are supported.
There is a suggested limit (1024) to the number of dimensions that can
be defined in a single netCDF dataset. The limit is the value of the
predefined macro NC_MAX_DIMS. The purpose of the limit is to make
writing generic applications simpler. They need only provide an array
of NC_MAX_DIMS dimensions to handle any netCDF dataset. The
implementation of the netCDF library does not enforce this advisory
maximum, so it is possible to use more dimensions, if necessary, but
netCDF utilities that assume the advisory maximums may not be able to
handle the resulting netCDF datasets.
NC_MAX_VAR_DIMS, which must not exceed NC_MAX_DIMS, is the maximum
number of dimensions that can be used to specify the shape of a single
variable. It is also intended to simplify writing generic
applications.
Ordinarily, the name and length of a dimension are fixed when the
dimension is first defined. The name may be changed later, but the
length of a dimension (other than the unlimited dimension) cannot be
changed without copying all the data to a new netCDF dataset with a
redefined dimension length.
Dimension lengths in the C interface are type size_t rather than type
int to make it possible to access all the data in a netCDF dataset on
a platform that only supports a 16-bit int data type, for example
MSDOS. If dimension lengths were type int instead, it would not be
possible to access data from variables with a dimension length greater
than a 16-bit int can accommodate.
A netCDF dimension in an open netCDF dataset is referred to by a small
integer called a dimension ID. In the C interface, dimension IDs are
0, 1, 2, ..., in the order in which the dimensions were defined.
Operations supported on dimensions are:
- Create a dimension, given its name and length.
- Get a dimension ID from its name.
- Get a dimension's name and length from its ID.
- Rename a dimension.
*/
/*! \{*/ /* All these functions are part of the above defgroup... */
/** \name Deleting and Renaming Dimensions
Functions to delete or rename an dimension. */
/*! \{ */ /* All these functions are part of this named group... */
/*!
Define a new dimension. The function nc_def_dim adds a new
dimension to an open netCDF dataset in define mode. It returns (as an
@ -22,7 +79,7 @@ netCDF dataset. NetCDF-4 datasets may have multiple unlimited
dimensions.
\param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
\param name Name of the dimension to be created.
@ -63,8 +120,7 @@ named foo.nc:
\endcode
*/
int
nc_def_dim(int ncid, const char *name, size_t len, int *idp)
int nc_def_dim(int ncid, const char *name, size_t len, int *idp)
{
NC* ncp;
int stat = NC_check_id(ncid, &ncp);
@ -81,7 +137,7 @@ dimensions defined for a netCDF dataset, each dimension has an ID
between 0 and ndims-1.
\param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
\param name Name of the dimension.
@ -108,7 +164,7 @@ The length for the unlimited dimension, if any, is the number of
records written so far.
\param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
\param dimid Dimension ID, from a previous call to nc_inq_dimid() or
@ -141,7 +197,7 @@ unlimited dimension for an existing netCDF dataset named foo.nc:
size_t latlength, recs;
char recname[NC_MAX_NAME+1];
...
status = nc_open("foo.nc", NC_NOWRITE, &ncid);
status = nc_open("foo.nc", NC_NOWRITE, &ncid);
if (status != NC_NOERR) handle_error(status);
status = nc_inq_unlimdim(ncid, &recid);
if (status != NC_NOERR) handle_error(status);
@ -178,7 +234,7 @@ For netCDF-4 files the dataset is switched to define more for the
rename, regardless of the name length.
\param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
\param dimid Dimension ID, from a previous call to nc_inq_dimid() or
@ -203,10 +259,10 @@ latitude in an existing netCDF dataset named foo.nc:
...
int status, ncid, latid;
...
status = nc_open("foo.nc", NC_WRITE, &ncid);
status = nc_open("foo.nc", NC_WRITE, &ncid);
if (status != NC_NOERR) handle_error(status);
...
status = nc_redef(ncid);
status = nc_redef(ncid);
if (status != NC_NOERR) handle_error(status);
status = nc_inq_dimid(ncid, "lat", &latid);
if (status != NC_NOERR) handle_error(status);
@ -236,7 +292,7 @@ netCDF-4/HDF5 file, dimensions are in all sub-groups, sub-sub-groups,
etc.
\param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
\param ndimsp Pointer where number of dimensions will be
@ -265,7 +321,7 @@ dimension), the ID of the first unlimited dimesnion is returned. For
these files, nc_inq_unlimdims() will return all the unlimited dimension IDs.
\param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
\param unlimdimidp Pointer where unlimited dimension ID will be
@ -289,7 +345,7 @@ nc_inq_unlimdim(int ncid, int *unlimdimidp)
Find out the name of a dimension.
\param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
\param dimid Dimension ID, from a previous call to nc_inq_dimid() or
@ -321,7 +377,7 @@ unlimited dimension for an existing netCDF dataset named foo.nc:
...
status = nc_open("foo.nc", NC_NOWRITE, &ncid);
if (status != NC_NOERR) handle_error(status);
status = nc_inq_unlimdim(ncid, &recid);
status = nc_inq_unlimdim(ncid, &recid);
if (status != NC_NOERR) handle_error(status);
...
status = nc_inq_dimid(ncid, "lat", &latid);
@ -351,7 +407,7 @@ The length for the unlimited dimension, if any, is the number of
records written so far.
\param ncid NetCDF or group ID, from a previous call to nc_open(),
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_create(), nc_def_grp(), or associated inquiry functions such as
nc_inq_ncid().
\param dimid Dimension ID, from a previous call to nc_inq_dimid() or
@ -378,12 +434,12 @@ unlimited dimension for an existing netCDF dataset named foo.nc:
...
status = nc_open("foo.nc", NC_NOWRITE, &ncid);
if (status != NC_NOERR) handle_error(status);
status = nc_inq_unlimdim(ncid, &recid);
status = nc_inq_unlimdim(ncid, &recid);
if (status != NC_NOERR) handle_error(status);
...
status = nc_inq_dimid(ncid, "lat", &latid);
status = nc_inq_dimid(ncid, "lat", &latid);
if (status != NC_NOERR) handle_error(status);
status = nc_inq_dimlen(ncid, latid, &latlength);
status = nc_inq_dimlen(ncid, latid, &latlength);
if (status != NC_NOERR) handle_error(status);
status = nc_inq_dim(ncid, recid, recname, &recs);
@ -399,4 +455,7 @@ nc_inq_dimlen(int ncid, int dimid, size_t *lenp)
if(lenp == NULL) return NC_NOERR;
return ncp->dispatch->inq_dim(ncid,dimid,NULL,lenp);
}
/**@}*/
/*! \} */ /* End of named group ...*/
/*! \} */ /* End of defgroup. */

View File

@ -1,6 +1,8 @@
#include "ncdispatch.h"
#include "ncuri.h"
#define MAXSERVERURL 4096
extern int NCSUBSTRATE_intialize(void);
/* Define vectors of zeros and ones for use with various nc_get_varX function*/
@ -26,11 +28,11 @@ static struct NCPROTOCOLLIST {
{NULL,NULL,0} /* Terminate search */
};
/* Define the server to ping in order;
/* Define the default servers to ping in order;
make the order attempt to optimize
against future changes.
*/
static const char* servers[] = {
static const char* default_servers[] = {
"http://remotetest.unidata.ucar.edu",
NULL
};
@ -56,27 +58,39 @@ NCDISPATCH_initialize(void)
}
/* search list of servers and return first that succeeds when
concatenated with the specified path part
concatenated with the specified path part.
Search list can be prefixed by the second argument.
*/
const char*
NC_findtestserver(const char* path)
char*
NC_findtestserver(const char* path, const char** servers)
{
#ifdef USE_DAP
#ifdef ENABLE_DAP_REMOTE_TESTS
/* NCDAP_ping is defined in libdap2/ncdap.c */
const char** svc;
int stat;
char* url = (char*)malloc(MAXSERVERURL);
if(path == NULL) path = "";
for(svc=servers;*svc != NULL;svc++) {
int stat;
char url[4096];
snprintf(url,sizeof(url),"%s%s%s",
*svc,
(path[0] == '/' ? "" : "/"),
path);
stat = NCDAP_ping(url);
if(stat == NC_NOERR)
return *svc;
if(strlen(path) > 0 && path[0] == '/')
path++;
if(servers != NULL) {
for(svc=servers;*svc != NULL;svc++) {
snprintf(url,MAXSERVERURL,"%s/%s",*svc,path);
stat = NCDAP_ping(url);
if(stat == NC_NOERR)
return url;
}
}
/* not found in user supplied list; try defaults */
for(svc=default_servers;*svc != NULL;svc++) {
snprintf(url,MAXSERVERURL,"%s/%s",*svc,path);
stat = NCDAP_ping(url);
if(stat == NC_NOERR)
return url;
}
if(url) free(url);
#endif
#endif
return NULL;
@ -108,7 +122,7 @@ NC_testurl(const char* path)
if(strcmp(tmpurl->protocol,protolist->protocol) == 0) {
isurl=1;
break;
}
}
}
ncurifree(tmpurl);
return isurl;
@ -143,18 +157,17 @@ NC_urlmodel(const char* path)
}
if(model == 0) {
/* Now look at the protocol */
for(protolist=ncprotolist;protolist->protocol;protolist++) {
if(strcmp(tmpurl->protocol,protolist->protocol) == 0) {
model |= protolist->modelflags;
if(protolist->substitute) {
if(tmpurl->protocol) free(tmpurl->protocol);
free(tmpurl->protocol);
tmpurl->protocol = strdup(protolist->substitute);
}
break;
break;
}
}
}
}
/* Force NC_DISPATCH_NC3 if necessary */
if((model & NC_DISPATCH_NC4) == 0)

View File

@ -4,7 +4,7 @@
#include <string.h>
#include <netcdf.h>
#define URL "http://remotetest.unidata.ucar.edu/dts/test.02"
#define URL "http://%s/dts/test.02"
#define VAR "i32"
#define ERRCODE 2
@ -21,8 +21,16 @@ main()
size_t start[1];
size_t count[1];
int ok = 1;
char url[1024];
if ((retval = nc_open(URL, 0, &ncid)))
{
char* evv = getenv("REMOTETESTSERVER");
if(evv == NULL)
evv = "remotetest.unidata.ucar.edu";
snprintf(url,sizeof(url),URL,evv);
}
if ((retval = nc_open(url, 0, &ncid)))
ERR(retval);
if ((retval = nc_inq_varid(ncid, VAR, &varid)))
ERR(retval);

View File

@ -25,9 +25,11 @@ nc_inq_libvers(void)
return nc_libvers;
}
/** \addtogroup error NetCDF Error Handling
/*! NetCDF Error Handling
NetCDF functions non-zero status codes on error.
\addtogroup error NetCDF Error Handling
NetCDF functions return a non-zero status codes on error.
Each netCDF function returns an integer status value. If the returned
status value indicates an error, you may handle it in any way desired,
@ -48,10 +50,11 @@ available, you may get an error from a layer below the netCDF library,
but the resulting write error will still be reflected in the returned
status value.
*/
*/
/*! Given an error number, return an error message.
\addtogroup error
/** \{ */
/*! Given an error number, return an error message.
This function returns a static reference to an error message string
corresponding to an integer netCDF error status or to a system error
@ -62,10 +65,8 @@ function. The error codes are defined in netcdf.h.
\returns short string containing error message.
\section handle_error_example nc_strerror Example
Here is an example of a simple error handling function that uses
nc_strerror to print the error message corresponding to the netCDF
nc_strerror() to print the error message corresponding to the netCDF
error status returned from any netCDF function call and then exit:
\code
@ -79,8 +80,7 @@ error status returned from any netCDF function call and then exit:
}
\endcode
*/
const char *
nc_strerror(int ncerr1)
const char *nc_strerror(int ncerr1)
{
/* System error? */
if(NC_ISSYSERR(ncerr1))
@ -190,6 +190,8 @@ nc_strerror(int ncerr1)
return "NetCDF: Authorization failure";
case NC_ENOTFOUND:
return "NetCDF: file not found";
case NC_ECANTEXTEND:
return "NetCDF: Attempt to extend dataset during NC_INDEPENDENT I/O operation. Use nc_var_par_access to set mode NC_COLLECTIVE before extending variable.";
case NC_ECANTREMOVE:
return "NetCDF: cannot delete file";
case NC_EHDFERR:
@ -202,7 +204,7 @@ nc_strerror(int ncerr1)
return "NetCDF: Can't create file";
case NC_EFILEMETA:
return "NetCDF: Can't add HDF5 file metadata";
case NC_EDIMMETA:
case NC_EDIMMETA:
return "NetCDF: Can't define dimensional metadata";
case NC_EATTMETA:
return "NetCDF: Can't open HDF5 attribute";
@ -256,4 +258,4 @@ nc_strerror(int ncerr1)
}
}
/** \} */

View File

@ -24,17 +24,6 @@ Research/Unidata. See COPYRIGHT file for more info.
#endif
#include "ncdispatch.h"
/* Define an enum over the possible set of
File Types
*/
enum FileType {
FT_UNKNOWN,
FT_HDF,
FT_NC,
FT_PNETCDF
};
static int nc_initialized = 0;
/** \defgroup datasets NetCDF Files
@ -90,11 +79,12 @@ nc_local_initialize(void)
static int
NC_check_file_type(const char *path, int use_parallel, void *mpi_info,
enum FileType* filetype, int* version)
int* model, int* version)
{
char magic[MAGIC_NUMBER_LEN];
int status = NC_NOERR;
*filetype = FT_UNKNOWN;
*model = 0;
/* Get the 4-byte magic from the beginning of the file. Don't use posix
* for parallel, use the MPI functions instead. */
@ -102,7 +92,7 @@ NC_check_file_type(const char *path, int use_parallel, void *mpi_info,
if (use_parallel)
{
MPI_File fh;
MPI_Status status;
MPI_Status mstatus;
int retval;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
@ -113,12 +103,12 @@ NC_check_file_type(const char *path, int use_parallel, void *mpi_info,
}
if((retval = MPI_File_open(comm, (char *)path, MPI_MODE_RDONLY,info,
&fh)) != MPI_SUCCESS)
return NC_EPARINIT;
{status = NC_EPARINIT; goto done;}
if((retval = MPI_File_read(fh, magic, MAGIC_NUMBER_LEN, MPI_CHAR,
&status)) != MPI_SUCCESS)
return NC_EPARINIT;
&mstatus)) != MPI_SUCCESS)
{status = NC_EPARINIT; goto done;}
if((retval = MPI_File_close(&fh)) != MPI_SUCCESS)
return NC_EPARINIT;
{status = NC_EPARINIT; goto done;}
} else
#endif /* USE_PARALLEL */
{
@ -129,10 +119,10 @@ NC_check_file_type(const char *path, int use_parallel, void *mpi_info,
#endif
if(path == NULL || strlen(path)==0)
return NC_EINVAL;
{status = NC_EINVAL; goto done;}
if (!(fp = fopen(path, "r")))
return errno;
{status = errno; goto done;}
#ifdef HAVE_SYS_STAT_H
/* The file must be at least MAGIC_NUMBER_LEN in size,
@ -140,47 +130,56 @@ NC_check_file_type(const char *path, int use_parallel, void *mpi_info,
behavior. */
if(!(fstat(fileno(fp),&st) == 0)) {
fclose(fp);
return errno;
status = errno;
goto done;
}
if(st.st_size < MAGIC_NUMBER_LEN) {
fclose(fp);
return NC_ENOTNC;
status = NC_ENOTNC;
goto done;
}
#endif
i = fread(magic, MAGIC_NUMBER_LEN, 1, fp);
fclose(fp);
if(i == 0)
return NC_ENOTNC;
if(i == 0)
{status = NC_ENOTNC; goto done;}
if(i != 1)
return errno;
{status = errno; goto done;}
}
/* Look at the magic number */
/* Ignore the first byte for HDF */
#ifdef USE_NETCDF4
if(magic[1] == 'H' && magic[2] == 'D' && magic[3] == 'F') {
*filetype = FT_HDF;
*model = NC_DISPATCH_NC4;
*version = 5;
#ifdef USE_HDF4
} else if(magic[0] == '\016' && magic[1] == '\003'
&& magic[2] == '\023' && magic[3] == '\001') {
*filetype = FT_HDF;
*model = NC_DISPATCH_NC4;
*version = 4;
} else if(magic[0] == 'C' && magic[1] == 'D' && magic[2] == 'F') {
*filetype = FT_NC;
if(magic[3] == '\001')
#endif
} else
#endif
if(magic[0] == 'C' && magic[1] == 'D' && magic[2] == 'F') {
if(magic[3] == '\001')
*version = 1; /* netcdf classic version 1 */
else if(magic[3] == '\002')
else if(magic[3] == '\002')
*version = 2; /* netcdf classic version 2 */
else if(magic[3] == '\005') {
*filetype = FT_PNETCDF;
#ifdef USE_PNETCDF
else if(magic[3] == '\005')
*version = 5; /* pnetcdf file */
} else
return NC_ENOTNC;
#endif
else
{status = NC_ENOTNC; goto done;}
*model = (use_parallel || *version == 5)?NC_DISPATCH_NC5:NC_DISPATCH_NC3;
} else
return NC_ENOTNC;
return NC_NOERR;
{status = NC_ENOTNC; goto done;}
done:
return status;
}
/** \ingroup datasets
@ -1534,11 +1533,17 @@ NC_create(const char *path, int cmode, size_t initialsz,
/* Look to the incoming cmode for hints */
if(model == 0) {
#ifdef USE_NETCDF4
if(cmode & NC_NETCDF4)
model = NC_DISPATCH_NC4;
else if(cmode & NC_PNETCDF)
else
#endif
#ifdef USE_PNETCDF
if(cmode & NC_PNETCDF)
model = NC_DISPATCH_NC5;
else if(cmode & NC_CLASSIC_MODEL)
else
#endif
if(cmode & NC_CLASSIC_MODEL)
model = NC_DISPATCH_NC3;
}
@ -1655,7 +1660,6 @@ NC_open(const char *path, int cmode,
int model = 0;
int isurl = 0;
int version = 0;
enum FileType filetype = FT_UNKNOWN;
if(!nc_initialized) {
stat = NC_initialize();
@ -1679,38 +1683,32 @@ NC_open(const char *path, int cmode,
if(isurl)
model = NC_urlmodel(path);
else {
filetype = FT_UNKNOWN;
version = 0;
model = 0;
/* Look at the file if it exists */
stat = NC_check_file_type(path,useparallel,mpi_info,
&filetype,&version);
&model,&version);
if(stat == NC_NOERR) {
switch (filetype) {
case FT_NC:
if(version == 1 || version == 2)
model = NC_DISPATCH_NC3;
break;
case FT_HDF:
model = NC_DISPATCH_NC4;
break;
case FT_PNETCDF:
model = NC_DISPATCH_NC5;
break;
default:
if(model == 0)
return NC_ENOTNC;
}
} else /* presumably not a netcdf file */
return stat;
}
#if 1
if(model == 0) {
fprintf(stderr,"Model != 0\n");
return NC_ENOTNC;
}
#else
Not longer needed
/* Look to the incoming cmode for hints */
if(model == 0) {
if(cmode & NC_PNETCDF) model |= NC_DISPATCH_NC5;
else if(cmode & NC_NETCDF4) model |= NC_DISPATCH_NC4;
}
if(cmode & NC_PNETCDF) model = NC_DISPATCH_NC5;
else if(cmode & NC_NETCDF4) model = NC_DISPATCH_NC4;
}
if(model == 0) model = NC_DISPATCH_NC3; /* final default */
#endif
/* Force flag consistentcy */
if(model & NC_DISPATCH_NC4)
@ -1719,7 +1717,12 @@ NC_open(const char *path, int cmode,
cmode &= ~NC_NETCDF4; /* must be netcdf-3 */
if(version == 2) cmode |= NC_64BIT_OFFSET;
} else if(model & NC_DISPATCH_NC5) {
#if 0
It appears that pnetcdf can read NC_64_BIT_OFFSET
cmode &= ~(NC_NETCDF4 | NC_64BIT_OFFSET); /* must be pnetcdf */
#else
cmode &= ~(NC_NETCDF4);
#endif
cmode |= NC_PNETCDF;
}

Some files were not shown because too many files have changed in this diff Show More